From 838f1825e3eb8aea9603bd46a14b12fcdd837d3b Mon Sep 17 00:00:00 2001 From: ShrinidhiFCTS23 Date: Mon, 30 Jun 2025 11:59:41 +0530 Subject: [PATCH 01/36] added azure vpc 0.2 module --- .../network/azure_network/1.0/facets.yaml | 20 + .../network/azure_network/1.0/main.tf | 738 ++++++++++++++++++ .../network/azure_network/1.0/outputs.tf | 5 + .../network/azure_network/1.0/variables.tf | 26 + 4 files changed, 789 insertions(+) create mode 100644 modules/network/azure_vpc/network/azure_network/1.0/facets.yaml create mode 100644 modules/network/azure_vpc/network/azure_network/1.0/main.tf create mode 100644 modules/network/azure_vpc/network/azure_network/1.0/outputs.tf create mode 100644 modules/network/azure_vpc/network/azure_network/1.0/variables.tf diff --git a/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml b/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml new file mode 100644 index 000000000..07bae8e38 --- /dev/null +++ b/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml @@ -0,0 +1,20 @@ +intent: network +flavor: azure_network +version: "1.0" +clouds: [ azure ] +description: This is Network Module for Azure +spec: + title: Network Module for Azure + description: This is Network Module for Azure + type: object +outputs: + default: + type: "@outputs/network" +sample: + kind: network + flavor: azure_network + version: "1.0" + disabled: true + spec: + type: object + properties: \ No newline at end of file diff --git a/modules/network/azure_vpc/network/azure_network/1.0/main.tf b/modules/network/azure_vpc/network/azure_network/1.0/main.tf new file mode 100644 index 000000000..5f4376891 --- /dev/null +++ b/modules/network/azure_vpc/network/azure_network/1.0/main.tf @@ -0,0 +1,738 @@ +######################################################################### +# Terraform Module Structure # +# # +# ── Guidance for Code Generators / AI Tools ───────────────────────── # +# # +# • Keep this main.tf file **intentionally empty**. # +# It serves only as the module's entry point. # +# # +# • Create additional *.tf files that are **logically grouped** # +# according to the functionality and resources of the module. # +# # +# • Group related resources, data sources, locals, variables, and # +# outputs into separate files to improve clarity and maintainability. # +# # +# • Choose file names that clearly reflect the purpose of the contents. # +# # +# • Add new files as needed when new functionality areas are introduced,# +# instead of expanding existing files indefinitely. # +# # +# This ensures modules stay clean, scalable, and easy to navigate. # +######################################################################### + +# Local values for calculations +locals { + # Calculate subnet mask from IP count + subnet_mask_map = { + "256" = 24 # /24 = 256 IPs + "512" = 23 # /23 = 512 IPs + "1024" = 22 # /22 = 1024 IPs + "2048" = 21 # /21 = 2048 IPs + "4096" = 20 # /20 = 4096 IPs + "8192" = 19 # /19 = 8192 IPs + } + + # Use fixed CIDR allocation like the original (optional) + use_fixed_cidrs = lookup(var.instance.spec, "use_fixed_cidr_allocation", false) + + # Fixed CIDR allocation (similar to original logic) + fixed_private_subnets = local.use_fixed_cidrs ? [for i in range(4) : cidrsubnet(var.instance.spec.vnet_cidr, 4, i)] : [] + fixed_public_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 12), cidrsubnet(var.instance.spec.vnet_cidr, 4, 14), cidrsubnet(var.instance.spec.vnet_cidr, 4, 15)] : [] + fixed_database_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 4), cidrsubnet(var.instance.spec.vnet_cidr, 4, 5)] : [] + fixed_gateway_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 6)] : [] + fixed_cache_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 112)] : [] + fixed_functions_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 113)] : [] + fixed_private_link_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 114)] : [] + + vnet_prefix_length = tonumber(split("/", var.instance.spec.vnet_cidr)[1]) + + public_subnet_newbits = local.subnet_mask_map[var.instance.spec.public_subnets.subnet_size] - local.vnet_prefix_length + private_subnet_newbits = local.subnet_mask_map[var.instance.spec.private_subnets.subnet_size] - local.vnet_prefix_length + database_subnet_newbits = local.subnet_mask_map[var.instance.spec.database_subnets.subnet_size] - local.vnet_prefix_length + + # Calculate total number of subnets needed (only for dynamic allocation) + public_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0 + private_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az : 0 + database_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az : 0 + + # Specialized subnets (always use fixed allocation for these) + gateway_subnets_enabled = lookup(var.instance.spec, "enable_gateway_subnet", false) + cache_subnets_enabled = lookup(var.instance.spec, "enable_cache_subnet", false) + functions_subnets_enabled = lookup(var.instance.spec, "enable_functions_subnet", false) + private_link_svc_enabled = lookup(var.instance.spec, "enable_private_link_service_subnet", false) + + # Create list of newbits for cidrsubnets function (dynamic allocation only) + subnet_newbits = !local.use_fixed_cidrs ? concat( + var.instance.spec.public_subnets.count_per_az > 0 ? [ + for i in range(local.public_total_subnets) : local.public_subnet_newbits + ] : [], + [for i in range(local.private_total_subnets) : local.private_subnet_newbits], + [for i in range(local.database_total_subnets) : local.database_subnet_newbits] + ) : [] + + # Generate all subnet CIDRs using cidrsubnets function - this prevents overlaps (dynamic allocation) + all_subnet_cidrs = !local.use_fixed_cidrs && length(local.subnet_newbits) > 0 ? cidrsubnets(var.instance.spec.vnet_cidr, local.subnet_newbits...) : [] + + # Extract subnet CIDRs by type (dynamic allocation) + public_subnet_cidrs = !local.use_fixed_cidrs && var.instance.spec.public_subnets.count_per_az > 0 ? slice( + local.all_subnet_cidrs, + 0, + local.public_total_subnets + ) : local.fixed_public_subnets + + private_subnet_cidrs = !local.use_fixed_cidrs ? slice( + local.all_subnet_cidrs, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets : 0, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets + ) : local.fixed_private_subnets + + database_subnet_cidrs = !local.use_fixed_cidrs ? slice( + local.all_subnet_cidrs, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets + local.database_total_subnets : local.private_total_subnets + local.database_total_subnets + ) : local.fixed_database_subnets + + # Create subnet mappings with AZ and CIDR + public_subnets = var.instance.spec.public_subnets.count_per_az > 0 ? ( + local.use_fixed_cidrs ? [ + for i, cidr in local.public_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.public_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.public_subnet_cidrs[az_index * var.instance.spec.public_subnets.count_per_az + subnet_index] + } + ] + ]) + ) : [] + + private_subnets = local.use_fixed_cidrs ? [ + for i, cidr in local.private_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.private_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.private_subnet_cidrs[az_index * var.instance.spec.private_subnets.count_per_az + subnet_index] + } + ] + ]) + + database_subnets = local.use_fixed_cidrs ? [ + for i, cidr in local.database_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.database_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.database_subnet_cidrs[az_index * var.instance.spec.database_subnets.count_per_az + subnet_index] + } + ] + ]) + + # Specialized subnets (always use fixed allocation) + gateway_subnets = local.gateway_subnets_enabled ? [ + for i, cidr in local.fixed_gateway_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + cache_subnets = local.cache_subnets_enabled ? [ + for i, cidr in local.fixed_cache_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + functions_subnets = local.functions_subnets_enabled ? [ + for i, cidr in local.fixed_functions_subnets : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + private_link_service_subnets = local.private_link_svc_enabled ? [ + for i, cidr in local.fixed_private_link_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + # Private endpoints configuration with defaults + private_endpoints = var.instance.spec.private_endpoints != null ? var.instance.spec.private_endpoints : { + enable_storage = true + enable_sql = true + enable_keyvault = true + enable_acr = true + enable_aks = false + enable_cosmos = false + enable_servicebus = false + enable_eventhub = false + enable_monitor = false + enable_cognitive = false + } + + # Resource naming prefix + name_prefix = "${var.environment.unique_name}-${var.instance_name}" + + # Common tags + common_tags = merge( + var.environment.cloud_tags, + lookup(var.instance.spec, "tags", {}), + { + Name = local.name_prefix + Environment = var.environment.name + } + ) +} + +# Resource Group +resource "azurerm_resource_group" "main" { + name = "${local.name_prefix}-rg" + location = var.instance.spec.region + + tags = local.common_tags + + lifecycle { + prevent_destroy = true + } +} + +# Virtual Network +resource "azurerm_virtual_network" "main" { + name = "${local.name_prefix}-vnet" + address_space = [var.instance.spec.vnet_cidr] + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags + + lifecycle { + prevent_destroy = true + } +} + +# Public Subnets +resource "azurerm_subnet" "public" { + for_each = var.instance.spec.public_subnets.count_per_az > 0 ? { + for subnet in local.public_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } : {} + + name = "${local.name_prefix}-public-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Private Subnets +resource "azurerm_subnet" "private" { + for_each = { + for subnet in local.private_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-private-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Delegate subnet to specific services if needed + dynamic "delegation" { + for_each = var.instance.spec.enable_aks ? [1] : [] + content { + name = "aks-delegation" + service_delegation { + name = "Microsoft.ContainerService/managedClusters" + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + ] + } + } + } + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Database Subnets +resource "azurerm_subnet" "database" { + for_each = { + for subnet in local.database_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-database-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Enable private link endpoint policies + enforce_private_link_endpoint_network_policies = true + + # Delegate to SQL services + delegation { + name = "sql-delegation" + service_delegation { + name = "Microsoft.DBforMySQL/flexibleServers" + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", + "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action" + ] + } + } + + lifecycle { + ignore_changes = [service_endpoints, delegation, name] + } +} + +# Gateway Subnets (for VPN/ExpressRoute gateways) +resource "azurerm_subnet" "gateway" { + for_each = { + for subnet in local.gateway_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-gateway-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Cache Subnets (for Redis and other caching services) +resource "azurerm_subnet" "cache" { + for_each = { + for subnet in local.cache_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-cache-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Functions Subnets (dedicated for Azure Functions) +resource "azurerm_subnet" "functions" { + for_each = { + for subnet in local.functions_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-functions-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Enable private link endpoint policies + enforce_private_link_endpoint_network_policies = true + + # Delegate to Azure Functions + delegation { + name = "functions-delegation" + service_delegation { + name = "Microsoft.Web/serverFarms" + actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"] + } + } + + lifecycle { + ignore_changes = [service_endpoints, delegation, name] + } +} + +# Private Link Service Subnets +resource "azurerm_subnet" "private_link_service" { + for_each = { + for subnet in local.private_link_service_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-pls-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Enable private link service policies (this is why we need a dedicated subnet) + enforce_private_link_service_network_policies = true + + lifecycle { + ignore_changes = [service_endpoints, name] + } +} + +# Public IP for NAT Gateway +resource "azurerm_public_ip" "nat_gateway" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = var.instance.spec.availability_zones[0] + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-pip-${each.key}" : "${local.name_prefix}-natgw-pip" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + allocation_method = "Static" + sku = "Standard" + zones = [each.key] + + tags = local.common_tags + + lifecycle { + ignore_changes = [name] + } +} + +# NAT Gateway +resource "azurerm_nat_gateway" "main" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = var.instance.spec.availability_zones[0] + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-${each.key}" : "${local.name_prefix}-natgw" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + sku_name = "Standard" + idle_timeout_in_minutes = 10 + zones = [each.key] + + tags = local.common_tags + + lifecycle { + ignore_changes = [name] + } +} + +# Associate Public IP with NAT Gateway +resource "azurerm_nat_gateway_public_ip_association" "main" { + for_each = azurerm_nat_gateway.main + + nat_gateway_id = each.value.id + public_ip_address_id = azurerm_public_ip.nat_gateway[each.key].id +} + +# Route Table for Public Subnets +resource "azurerm_route_table" "public" { + count = var.instance.spec.public_subnets.count_per_az > 0 ? 1 : 0 + + name = "${local.name_prefix}-public-rt" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Public Subnets +resource "azurerm_subnet_route_table_association" "public" { + for_each = azurerm_subnet.public + + subnet_id = each.value.id + route_table_id = azurerm_route_table.public[0].id +} + +# Route Table for Private Subnets +resource "azurerm_route_table" "private" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = "single" + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-private-rt-${each.key}" : "${local.name_prefix}-private-rt" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Private Subnets +resource "azurerm_subnet_route_table_association" "private" { + for_each = azurerm_subnet.private + + subnet_id = each.value.id + route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id +} + +# Route Table for Database Subnets (isolated) +resource "azurerm_route_table" "database" { + for_each = { + for az in var.instance.spec.availability_zones : az => az + } + + name = "${local.name_prefix}-database-rt-${each.key}" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Database Subnets +resource "azurerm_subnet_route_table_association" "database" { + for_each = azurerm_subnet.database + + subnet_id = each.value.id + route_table_id = azurerm_route_table.database[split("-", each.key)[0]].id +} + +# Associate NAT Gateway with Private Route Tables +resource "azurerm_subnet_nat_gateway_association" "private" { + for_each = { + for k, v in azurerm_subnet.private : k => v + if var.instance.spec.public_subnets.count_per_az > 0 + } + + subnet_id = each.value.id + nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id +} + +# Associate NAT Gateway with Functions Subnets +resource "azurerm_subnet_nat_gateway_association" "functions" { + for_each = { + for k, v in azurerm_subnet.functions : k => v + if var.instance.spec.public_subnets.count_per_az > 0 + } + + subnet_id = each.value.id + nat_gateway_id = azurerm_nat_gateway.main["single"].id # Functions typically use single NAT Gateway +} + +# Network Security Group - Allow all within VNet (similar to original logic) +resource "azurerm_network_security_group" "allow_all_default" { + name = "${local.name_prefix}-allow-all-default-nsg" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + security_rule { + name = "AllowVnetInbound" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = var.instance.spec.vnet_cidr + destination_address_prefix = "*" + description = "Allowing connection from within vnet" + } + + tags = merge(local.common_tags, { + Terraform = "true" + }) + + lifecycle { + ignore_changes = [name] + } +} + +# Security Group for VPC Endpoints (keep existing for private endpoints) +resource "azurerm_network_security_group" "vpc_endpoints" { + count = anytrue([ + try(local.private_endpoints.enable_storage, false), + try(local.private_endpoints.enable_sql, false), + try(local.private_endpoints.enable_keyvault, false), + try(local.private_endpoints.enable_acr, false), + try(local.private_endpoints.enable_aks, false), + try(local.private_endpoints.enable_cosmos, false), + try(local.private_endpoints.enable_servicebus, false), + try(local.private_endpoints.enable_eventhub, false), + try(local.private_endpoints.enable_monitor, false), + try(local.private_endpoints.enable_cognitive, false) + ]) ? 1 : 0 + + name = "${local.name_prefix}-private-endpoints-nsg" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + security_rule { + name = "AllowHTTPS" + priority = 1001 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = var.instance.spec.vnet_cidr + destination_address_prefix = "*" + } + + security_rule { + name = "AllowOutbound" + priority = 1001 + direction = "Outbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + tags = local.common_tags +} + +# Network Security Groups for Subnets - Apply the allow-all NSG to all subnets +resource "azurerm_subnet_network_security_group_association" "public" { + for_each = azurerm_subnet.public + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "private" { + for_each = azurerm_subnet.private + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "database" { + for_each = azurerm_subnet.database + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "gateway" { + for_each = azurerm_subnet.gateway + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "cache" { + for_each = azurerm_subnet.cache + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "functions" { + for_each = azurerm_subnet.functions + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "private_link_service" { + for_each = azurerm_subnet.private_link_service + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +# Private DNS Zone for Private Endpoints +resource "azurerm_private_dns_zone" "private_endpoints" { + for_each = { + storage = try(local.private_endpoints.enable_storage, false) ? "privatelink.blob.core.windows.net" : null + sql = try(local.private_endpoints.enable_sql, false) ? "privatelink.database.windows.net" : null + keyvault = try(local.private_endpoints.enable_keyvault, false) ? "privatelink.vaultcore.azure.net" : null + acr = try(local.private_endpoints.enable_acr, false) ? "privatelink.azurecr.io" : null + cosmos = try(local.private_endpoints.enable_cosmos, false) ? "privatelink.documents.azure.com" : null + servicebus = try(local.private_endpoints.enable_servicebus, false) ? "privatelink.servicebus.windows.net" : null + eventhub = try(local.private_endpoints.enable_eventhub, false) ? "privatelink.servicebus.windows.net" : null + monitor = try(local.private_endpoints.enable_monitor, false) ? "privatelink.monitor.azure.com" : null + cognitive = try(local.private_endpoints.enable_cognitive, false) ? "privatelink.cognitiveservices.azure.com" : null + } + + name = each.value + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Link Private DNS Zone to VNet +resource "azurerm_private_dns_zone_virtual_network_link" "private_endpoints" { + for_each = azurerm_private_dns_zone.private_endpoints + + name = "${local.name_prefix}-${each.key}-dns-link" + resource_group_name = azurerm_resource_group.main.name + private_dns_zone_name = each.value.name + virtual_network_id = azurerm_virtual_network.main.id + registration_enabled = false + + tags = local.common_tags +} + +# Example Storage Account (for demonstration of private endpoint) +resource "azurerm_storage_account" "example" { + count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 + + name = "${replace(local.name_prefix, "-", "")}stor" + resource_group_name = azurerm_resource_group.main.name + location = azurerm_resource_group.main.location + account_tier = "Standard" + account_replication_type = "LRS" + + # Disable public access + public_network_access_enabled = false + + tags = local.common_tags +} + +# Private Endpoint for Storage Account +resource "azurerm_private_endpoint" "storage" { + count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 + + name = "${local.name_prefix}-storage-pe" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + subnet_id = values(azurerm_subnet.private)[0].id + + private_service_connection { + name = "${local.name_prefix}-storage-psc" + private_connection_resource_id = azurerm_storage_account.example[0].id + subresource_names = ["blob"] + is_manual_connection = false + } + + private_dns_zone_group { + name = "storage-dns-zone-group" + private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["storage"].id] + } + + tags = local.common_tags +} \ No newline at end of file diff --git a/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf b/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf new file mode 100644 index 000000000..fe76d0638 --- /dev/null +++ b/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf @@ -0,0 +1,5 @@ +locals { + output_interfaces = {} + output_attributes = { + } +} \ No newline at end of file diff --git a/modules/network/azure_vpc/network/azure_network/1.0/variables.tf b/modules/network/azure_vpc/network/azure_network/1.0/variables.tf new file mode 100644 index 000000000..c508f14ab --- /dev/null +++ b/modules/network/azure_vpc/network/azure_network/1.0/variables.tf @@ -0,0 +1,26 @@ +variable "instance" { + description = "This is Network Module for Azure" + type = object({ + kind = string + flavor = string + version = string + spec = object({ + }) + }) +} +variable "instance_name" { + description = "The architectural name for the resource as added in the Facets blueprint designer." + type = string +} +variable "environment" { + description = "An object containing details about the environment." + type = object({ + name = string + unique_name = string + }) +} +variable "inputs" { + description = "A map of inputs requested by the module developer." + type = object({ + }) +} \ No newline at end of file From a2bf8774b1f760680c42504f785a04ccb112c2e6 Mon Sep 17 00:00:00 2001 From: ShrinidhiFCTS23 Date: Mon, 30 Jun 2025 14:12:26 +0530 Subject: [PATCH 02/36] added outputs and facets yaml for azure vpc --- .../network/azure_network/1.0/facets.yaml | 292 +++++++++++++++++- .../network/azure_network/1.0/outputs.tf | 23 +- 2 files changed, 306 insertions(+), 9 deletions(-) diff --git a/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml b/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml index 07bae8e38..e48b7c4a3 100644 --- a/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml +++ b/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml @@ -1,20 +1,296 @@ intent: network flavor: azure_network version: "1.0" -clouds: [ azure ] -description: This is Network Module for Azure +description: Creates an Azure Virtual Network with configurable public subnets, private subnets, database subnets, and specialized subnets across availability zones +clouds: +- azure spec: - title: Network Module for Azure - description: This is Network Module for Azure type: object + properties: + vnet_cidr: + type: string + title: VNet CIDR Block + description: CIDR block for the Virtual Network (e.g., 10.0.0.0/16) + pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$ + x-ui-overrides-only: true + x-ui-error-message: CIDR must be a valid IP block (e.g., 10.0.0.0/16) + x-ui-placeholder: 10.0.0.0/16 + + region: + type: string + title: Azure Region + description: Azure region where the VNet will be created + x-ui-overrides-only: true + + availability_zones: + type: array + title: Availability Zones + description: List of availability zones to use for subnets (e.g., ["1", "2", "3"]) + x-ui-overrides-only: true + items: + type: string + minItems: 1 + maxItems: 3 + + use_fixed_cidr_allocation: + type: boolean + title: Use Fixed CIDR Allocation + description: Use predefined CIDR ranges instead of dynamic allocation + default: false + + public_subnets: + type: object + title: Public Subnets Configuration + properties: + count_per_az: + type: number + title: Public Subnets per AZ + description: Number of public subnets to create in each availability zone + minimum: 0 + maximum: 3 + default: 1 + subnet_size: + type: string + title: Public Subnet Size + description: Number of IP addresses in each public subnet + enum: + - '256' + - '512' + - '1024' + - '2048' + - '4096' + - '8192' + default: '256' + required: + - count_per_az + - subnet_size + + private_subnets: + type: object + title: Private Subnets Configuration + properties: + count_per_az: + type: number + title: Private Subnets per AZ + description: Number of private subnets to create in each availability zone + minimum: 1 + maximum: 3 + default: 1 + subnet_size: + type: string + title: Private Subnet Size + description: Number of IP addresses in each private subnet + enum: + - '256' + - '512' + - '1024' + - '2048' + - '4096' + - '8192' + default: '1024' + required: + - count_per_az + - subnet_size + + database_subnets: + type: object + title: Database Subnets Configuration + description: Configure dedicated subnets for database resources + properties: + count_per_az: + type: number + title: Database Subnets per AZ + description: Number of database subnets to create in each availability zone + minimum: 0 + maximum: 3 + default: 1 + subnet_size: + type: string + title: Database Subnet Size + description: Number of IP addresses in each database subnet + enum: + - '256' + - '512' + - '1024' + - '2048' + - '4096' + - '8192' + default: '256' + required: + - count_per_az + - subnet_size + + # Specialized Subnets + enable_gateway_subnet: + type: boolean + title: Enable Gateway Subnet + description: Create subnet for VPN/ExpressRoute gateways + default: false + + enable_cache_subnet: + type: boolean + title: Enable Cache Subnet + description: Create subnet for Redis and other caching services + default: false + + enable_functions_subnet: + type: boolean + title: Enable Functions Subnet + description: Create subnet for Azure Functions with VNet integration + default: false + + enable_private_link_service_subnet: + type: boolean + title: Enable Private Link Service Subnet + description: Create subnet for hosting Private Link Services + default: false + + # AKS Integration + enable_aks: + type: boolean + title: Enable AKS Integration + description: Configure subnets for Azure Kubernetes Service integration + default: false + + nat_gateway: + type: object + title: NAT Gateway Configuration + properties: + strategy: + type: string + title: NAT Gateway Strategy + description: Choose whether to create one NAT Gateway or one per availability zone + enum: + - single + - per_az + default: single + required: + - strategy + + private_endpoints: + type: object + title: Private Endpoints Configuration + description: Configure private endpoints for Azure services to improve security and reduce data transfer costs + x-ui-toggle: true + properties: + enable_storage: + type: boolean + title: Enable Storage Private Endpoint + description: Create private endpoint for Azure Storage (Blob) + default: true + enable_sql: + type: boolean + title: Enable SQL Private Endpoint + description: Create private endpoint for Azure SQL Database + default: true + enable_keyvault: + type: boolean + title: Enable Key Vault Private Endpoint + description: Create private endpoint for Azure Key Vault + default: true + enable_acr: + type: boolean + title: Enable ACR Private Endpoint + description: Create private endpoint for Azure Container Registry + default: true + enable_aks: + type: boolean + title: Enable AKS Private Endpoint + description: Create private endpoint for Azure Kubernetes Service + default: false + enable_cosmos: + type: boolean + title: Enable Cosmos DB Private Endpoint + description: Create private endpoint for Azure Cosmos DB + default: false + enable_servicebus: + type: boolean + title: Enable Service Bus Private Endpoint + description: Create private endpoint for Azure Service Bus + default: false + enable_eventhub: + type: boolean + title: Enable Event Hub Private Endpoint + description: Create private endpoint for Azure Event Hub + default: false + enable_monitor: + type: boolean + title: Enable Monitor Private Endpoint + description: Create private endpoint for Azure Monitor + default: false + enable_cognitive: + type: boolean + title: Enable Cognitive Services Private Endpoint + description: Create private endpoint for Azure Cognitive Services + default: false + + tags: + type: object + title: Additional Tags + description: Optional additional tags to apply to all VNet resources. These will be merged with the standard environment tags. + x-ui-yaml-editor: true + + required: + - vnet_cidr + - region + - availability_zones + - public_subnets + - private_subnets + - database_subnets + - nat_gateway +inputs: + cloud_account: + type: '@outputs/cloud_account' + displayName: Cloud Account + description: The Azure Cloud Account where the VNet will be created + optional: false + providers: + - azure + - azurerm outputs: default: - type: "@outputs/network" + type: '@facets/azure-vnet-details' + sample: kind: network flavor: azure_network version: "1.0" - disabled: true spec: - type: object - properties: \ No newline at end of file + vnet_cidr: 10.0.0.0/16 + region: centralindia + availability_zones: ["1", "2", "3"] + use_fixed_cidr_allocation: false + public_subnets: + count_per_az: 1 + subnet_size: '256' + private_subnets: + count_per_az: 1 + subnet_size: '1024' + database_subnets: + count_per_az: 1 + subnet_size: '256' + enable_gateway_subnet: false + enable_cache_subnet: false + enable_functions_subnet: false + enable_private_link_service_subnet: false + enable_aks: false + nat_gateway: + strategy: single + private_endpoints: + enable_storage: true + enable_sql: true + enable_keyvault: true + enable_acr: true + enable_aks: false + enable_cosmos: false + enable_servicebus: false + enable_eventhub: false + enable_monitor: false + enable_cognitive: false + tags: + Environment: production + Project: main-infrastructure + +iac: + validated_files: + - variables.tf \ No newline at end of file diff --git a/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf b/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf index fe76d0638..44872663a 100644 --- a/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf +++ b/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf @@ -1,5 +1,26 @@ locals { - output_interfaces = {} output_attributes = { + resource_group_id = azurerm_resource_group.main.id + resource_group_name = azurerm_resource_group.main.name + vnet_id = azurerm_virtual_network.main.id + vnet_name = azurerm_virtual_network.main.name + vnet_cidr_block = var.instance.spec.vnet_cidr + location = azurerm_resource_group.main.location + availability_zones = var.instance.spec.availability_zones + nat_gateway_ids = values(azurerm_nat_gateway.main)[*].id + nat_gateway_public_ip_ids = values(azurerm_public_ip.nat_gateway)[*].id + public_subnet_ids = values(azurerm_subnet.public)[*].id + private_subnet_ids = values(azurerm_subnet.private)[*].id + database_subnet_ids = values(azurerm_subnet.database)[*].id + gateway_subnet_ids = values(azurerm_subnet.gateway)[*].id + cache_subnet_ids = values(azurerm_subnet.cache)[*].id + functions_subnet_ids = values(azurerm_subnet.functions)[*].id + private_link_service_subnet_ids = values(azurerm_subnet.private_link_service)[*].id + default_security_group_id = azurerm_network_security_group.allow_all_default.id + private_endpoints_security_group_id = try(azurerm_network_security_group.vpc_endpoints[0].id, null) + storage_private_endpoint_id = try(azurerm_private_endpoint.storage[0].id, null) + storage_account_id = try(azurerm_storage_account.example[0].id, null) + } + output_interfaces = { } } \ No newline at end of file From 0b255fa5781fb5588d8ac71680d05785be3fd718 Mon Sep 17 00:00:00 2001 From: ShrinidhiFCTS23 Date: Tue, 1 Jul 2025 21:39:05 +0530 Subject: [PATCH 03/36] added azure aks --- .../azure_aks/0.2/README.md | 0 .../azure_aks/0.2/facets.yaml | 542 ++++++++++++++++++ .../azure_aks/0.2/k8s_cluster/locals.tf | 75 +++ .../azure_aks/0.2/k8s_cluster/main.tf | 321 +++++++++++ .../azure_aks/0.2/k8s_cluster/outputs.tf | 41 ++ .../azure_aks/0.2/k8s_cluster/variables.tf | 65 +++ .../azure_aks/0.2/k8s_cluster/versions.tf | 26 + .../azure_aks/0.2/locals.tf | 90 +++ .../kubernetes_cluster/azure_aks/0.2/main.tf | 138 +++++ .../azure_aks/0.2/outputs.tf | 67 +++ .../azure_aks/0.2/variables.tf | 26 + .../network/azure_network/1.0/facets.yaml | 54 +- .../network/azure_network/1.0/main.tf | 110 ++-- .../network/azure_network/1.0/outputs.tf | 2 +- 14 files changed, 1464 insertions(+), 93 deletions(-) create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/README.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/facets.yaml create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/locals.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/outputs.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/variables.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/README.md b/modules/kubernetes_cluster/azure_aks/0.2/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml new file mode 100644 index 000000000..8b0d9a126 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -0,0 +1,542 @@ +intent: kubernetes_cluster +flavor: azure_aks +alias-flavors: +- default +version: '0.3' +clouds: +- azure +title: AKS Cluster with Auto-Upgrade Support +description: A Kubernetes AKS cluster module with auto-upgrade enabled by default and + all necessary configurations preset. +allow_skipping_module_on_selective_release: false +spec: + type: object + x-ui-order: + - cluster + - auto_upgrade_settings + - node_pools + - secret_copier + - features + - tags + properties: + secret_copier: + type: object + title: Secret Copier + description: Configuration for the Secret Copier. + x-ui-toggle: false + properties: + disabled: + type: boolean + title: Disabled + description: Disable secret copier. + default: false + values: + type: object + title: Values + description: Values to pass to the secret copier. + default: {} + x-ui-yaml-editor: true + cluster: + type: object + title: Cluster + description: Configuration for the AKS cluster. + x-ui-toggle: false + properties: + kubernetes_version: + type: string + title: Kubernetes Version + description: Version of Kubernetes to use for the AKS cluster. + default: '1.31' + cluster_endpoint_public_access: + type: boolean + title: Cluster Endpoint Public Access + description: Whether the AKS public API server endpoint is enabled. + default: true + x-ui-overrides-only: true + cluster_endpoint_private_access: + type: boolean + title: Cluster Endpoint Private Access + description: Whether the AKS private API server endpoint is enabled. + default: false + x-ui-overrides-only: true + cluster_endpoint_public_access_cidrs: + type: array + title: Cluster Endpoint Public Access CIDRs + description: List of CIDR blocks which can access the AKS public + API server endpoint. + default: + - 0.0.0.0/0 + x-ui-overrides-only: true + cluster_endpoint_private_access_cidrs: + type: array + title: Cluster Endpoint Private Access CIDRs + description: List of CIDR blocks which can access the AKS private + API server endpoint. + default: [] + x-ui-overrides-only: true + cluster_enabled_log_types: + type: array + title: Cluster Enabled Log Types + description: List of log types to enable for the AKS cluster. + default: [] + x-ui-overrides-only: true + items: + type: string + enum: + - api + - audit + - authenticator + - controllerManager + - scheduler + default_reclaim_policy: + type: string + title: Default Reclaim Policy + description: Default reclaim policy for the AKS cluster. + default: Delete + x-ui-overrides-only: true + enum: + - Delete + - Retain + sku_tier: + type: string + title: SKU Tier + description: SKU tier for the AKS cluster. + default: Free + enum: + - Free + - Standard + storage_account_last_access_time_enabled: + type: boolean + title: Storage Account Last Access Time Enabled + description: Enable last access time tracking for storage account. + default: true + x-ui-overrides-only: true + required: + - kubernetes_version + auto_upgrade_settings: + type: object + title: Auto-Upgrade Settings + description: Configuration for automatic cluster upgrades. + x-ui-toggle: false + properties: + enable_auto_upgrade: + type: boolean + title: Enable Auto-Upgrade + description: Enable automatic cluster upgrades. + default: true + automatic_channel_upgrade: + type: string + title: Automatic Channel Upgrade + description: Auto-upgrade channel for the cluster. + default: stable + enum: + - rapid + - regular + - stable + - patch + - node-image + - none + x-ui-visible-if: + field: spec.auto_upgrade_settings.enable_auto_upgrade + values: + - true + max_surge: + type: string + title: Max Surge + description: Maximum number of nodes that can be added during upgrade (number or percentage). + default: '1' + pattern: ^([0-9]+%?|[0-9]+)$ + x-ui-error-message: Max surge must be a number or percentage (e.g., 1, 33%) + x-ui-visible-if: + field: spec.auto_upgrade_settings.enable_auto_upgrade + values: + - true + maintenance_window: + type: object + title: Maintenance Window + description: Maintenance window configuration for upgrades. + x-ui-toggle: false + x-ui-visible-if: + field: spec.auto_upgrade_settings.enable_auto_upgrade + values: + - true + properties: + is_disabled: + type: boolean + title: Disable Maintenance Window + description: Disable maintenance window (allow upgrades anytime). + default: true + day_of_week: + type: string + title: Day of Week + description: Day of week for maintenance. + default: SUN + enum: + - SUN + - MON + - TUE + - WED + - THU + - FRI + - SAT + x-ui-visible-if: + field: spec.auto_upgrade_settings.maintenance_window.is_disabled + values: + - false + start_time: + type: integer + title: Start Time + description: Start hour for maintenance window (24-hour format). + default: 2 + minimum: 0 + maximum: 23 + x-ui-visible-if: + field: spec.auto_upgrade_settings.maintenance_window.is_disabled + values: + - false + end_time: + type: integer + title: End Time + description: End hour for maintenance window (24-hour format). + default: 6 + minimum: 0 + maximum: 23 + x-ui-visible-if: + field: spec.auto_upgrade_settings.maintenance_window.is_disabled + values: + - false + tags: + type: object + title: Tags + description: Tags to apply to the AKS cluster. + x-ui-toggle: false + x-ui-yaml-editor: true + node_pools: + type: object + title: Node Pools + description: Configuration for managed node pools. + x-ui-toggle: false + properties: + system_np: + type: object + title: System Node Pool + description: Configuration for system node pool (required for AKS). + x-ui-toggle: false + properties: + enabled: + type: boolean + title: Enabled + description: Enable system node pool. + default: true + readOnly: true + node_count: + type: integer + title: Node Count + description: Initial number of nodes. + default: 1 + minimum: 1 + maximum: 1000 + x-ui-visible-if: + field: spec.node_pools.system_np.enabled + values: + - true + instance_type: + type: string + title: Instance Type + description: Azure VM size for system nodes. + default: Standard_D2_v4 + x-ui-visible-if: + field: spec.node_pools.system_np.enabled + values: + - true + max_pods: + type: integer + title: Max Pods + description: Maximum pods per node. + default: 30 + minimum: 10 + maximum: 250 + x-ui-visible-if: + field: spec.node_pools.system_np.enabled + values: + - true + os_disk_size_gb: + type: integer + title: OS Disk Size (GB) + description: OS disk size in GB. + default: 50 + minimum: 30 + maximum: 2048 + x-ui-visible-if: + field: spec.node_pools.system_np.enabled + values: + - true + enable_auto_scaling: + type: boolean + title: Enable Auto Scaling + description: Enable auto-scaling for system node pool. + default: false + x-ui-visible-if: + field: spec.node_pools.system_np.enabled + values: + - true + default: + type: object + title: Default Node Pool + description: Configuration for default application node pool. + x-ui-toggle: false + properties: + enabled: + type: boolean + title: Enabled + description: Enable default node pool. + default: true + instance_type: + type: string + title: Instance Type + description: Azure VM size for default nodes. + default: Standard_D4s_v3 + x-ui-visible-if: + field: spec.node_pools.default.enabled + values: + - true + node_lifecycle_type: + type: string + title: Node Lifecycle Type + description: Node lifecycle type. + default: SPOT + enum: + - ON_DEMAND + - SPOT + x-ui-visible-if: + field: spec.node_pools.default.enabled + values: + - true + max_nodes: + type: integer + title: Maximum Nodes + description: Maximum number of nodes in the pool. + default: 200 + minimum: 1 + maximum: 1000 + x-ui-visible-if: + field: spec.node_pools.default.enabled + values: + - true + root_disk_volume: + type: integer + title: Root Disk Volume (GB) + description: Root disk size in GB. + default: 100 + minimum: 30 + maximum: 2048 + x-ui-visible-if: + field: spec.node_pools.default.enabled + values: + - true + azure_disk_type: + type: string + title: Azure Disk Type + description: Azure disk type. + default: Managed + enum: + - Managed + - Ephemeral + x-ui-visible-if: + field: spec.node_pools.default.enabled + values: + - true + required: + - enabled + - node_lifecycle_type + facets_dedicated: + type: object + title: Facets Dedicated Node Pool + description: Configuration for Facets dedicated node pool. + x-ui-toggle: false + properties: + enable: + type: boolean + title: Enable + description: Enable Facets dedicated node pool. + default: true + instance_type: + type: string + title: Instance Type + description: Azure VM size for Facets dedicated nodes. + default: Standard_D4as_v5 + x-ui-visible-if: + field: spec.node_pools.facets_dedicated.enable + values: + - true + node_lifecycle_type: + type: string + title: Node Lifecycle Type + description: Node lifecycle type for Facets nodes. + default: SPOT + enum: + - ON_DEMAND + - SPOT + x-ui-visible-if: + field: spec.node_pools.facets_dedicated.enable + values: + - true + max_nodes: + type: integer + title: Maximum Nodes + description: Maximum number of Facets dedicated nodes. + default: 200 + minimum: 1 + maximum: 1000 + x-ui-visible-if: + field: spec.node_pools.facets_dedicated.enable + values: + - true + root_disk_volume: + type: integer + title: Root Disk Volume (GB) + description: Root disk size in GB for Facets nodes. + default: 100 + minimum: 30 + maximum: 2048 + x-ui-visible-if: + field: spec.node_pools.facets_dedicated.enable + values: + - true + required: + - enable + - node_lifecycle_type + features: + type: object + title: Features + description: Additional cluster features. + x-ui-toggle: false + properties: + enable_agic: + type: boolean + title: Enable AGIC + description: Enable Application Gateway Ingress Controller. + default: false + enable_overprovisioner: + type: boolean + title: Enable Overprovisioner + description: Enable cluster overprovisioner for better resource management. + default: true + overprovisioner_replicas: + type: integer + title: Overprovisioner Replicas + description: Number of overprovisioner replicas. + default: 1 + minimum: 0 + maximum: 10 + x-ui-visible-if: + field: spec.features.enable_overprovisioner + values: + - true + required: + - cluster +inputs: + network_details: + type: '@facets/azure-network-details' + displayName: Network + default: + resource_type: network + resource_name: default + cloud_account: + type: '@outputs/cloud_account' + displayName: Cloud Account + description: The Azure Cloud Account where the AKS cluster will be created + optional: false + providers: + - azurerm +outputs: + default: + type: '@outputs/azure_aks' + title: Kubernetes Cluster Output + description: The output for the Kubernetes cluster + providers: + kubernetes: + source: hashicorp/kubernetes + version: 2.17.0 + attributes: + host: attributes.cluster.auth.host + token: attributes.cluster.auth.token + cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate + helm: + source: hashicorp/helm + version: 2.8.0 + attributes: + kubernetes: + host: attributes.cluster.auth.host + token: attributes.cluster.auth.token + cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate + kubernetes-alpha: + source: hashicorp/kubernetes-alpha + version: 0.6.0 + attributes: + host: attributes.cluster.auth.host + cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate + token: attributes.cluster.auth.token + default_node_pool: + type: '@outputs/azure_aks_nodepool' + title: Default Node Pool + description: The default node pool for Azure AKS + dedicated_node_pool: + type: '@outputs/azure_aks_nodepool' + title: Dedicated Node Pool + description: The dedicated node pool for Azure AKS +sample: + kind: kubernetes_cluster + flavor: azure_aks + alias-flavors: + - default + version: '0.3' + metadata: + name: aks-cluster + spec: + cluster: + kubernetes_version: '1.31' + cluster_endpoint_public_access: true + cluster_endpoint_private_access: false + cluster_endpoint_public_access_cidrs: + - 0.0.0.0/0 + cluster_endpoint_private_access_cidrs: [] + sku_tier: Free + auto_upgrade_settings: + enable_auto_upgrade: true + automatic_channel_upgrade: stable + max_surge: '1' + maintenance_window: + is_disabled: true + day_of_week: SUN + start_time: 2 + end_time: 6 + secret_copier: + disabled: false + values: + resources: + requests: + cpu: 150m + memory: 256Mi + node_pools: + system_np: + enabled: true + node_count: 1 + instance_type: Standard_D2_v4 + max_pods: 30 + os_disk_size_gb: 50 + enable_auto_scaling: false + default: + enabled: true + instance_type: Standard_D4s_v3 + node_lifecycle_type: SPOT + max_nodes: 200 + root_disk_volume: 100 + azure_disk_type: Managed + facets_dedicated: + enable: true + instance_type: Standard_D4as_v5 + node_lifecycle_type: SPOT + max_nodes: 200 + root_disk_volume: 100 + tags: {} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf new file mode 100644 index 000000000..ae2dd7db9 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf @@ -0,0 +1,75 @@ +locals { + name = module.name.name + spec = lookup(var.instance, "spec", {}) + cluster = lookup(local.spec, "cluster", {}) + auto_upgrade_settings = lookup(local.spec, "auto_upgrade_settings", {}) + + # Cluster configuration + kubernetes_version = lookup(local.cluster, "kubernetes_version", "1.31") + automatic_channel_upgrade = lookup(local.auto_upgrade_settings, "automatic_channel_upgrade", "stable") + enable_auto_upgrade = lookup(local.auto_upgrade_settings, "enable_auto_upgrade", true) + max_surge = lookup(local.auto_upgrade_settings, "max_surge", "1") + + # Access configuration + cluster_endpoint_public_access = lookup(local.cluster, "cluster_endpoint_public_access", true) + cluster_endpoint_private_access = lookup(local.cluster, "cluster_endpoint_private_access", false) + cluster_endpoint_public_access_cidrs = lookup(local.cluster, "cluster_endpoint_public_access_cidrs", ["0.0.0.0/0"]) + cluster_endpoint_private_access_cidrs = lookup(local.cluster, "cluster_endpoint_private_access_cidrs", []) + + # Node pool configuration + node_pools = lookup(local.spec, "node_pools", {}) + system_np = lookup(local.node_pools, "system_np", {}) + default_np = lookup(local.node_pools, "default", {}) + facets_dedicated_np = lookup(local.node_pools, "facets_dedicated", {}) + enable_default_nodepool = lookup(local.system_np, "enabled", true) + + # Maintenance window configuration + maintenance_window = lookup(local.auto_upgrade_settings, "maintenance_window", {}) + maintenance_window_config = { + is_disabled = lookup(local.maintenance_window, "is_disabled", true) + day_of_week = lookup(local.maintenance_window, "day_of_week", "SUN") + start_time = lookup(local.maintenance_window, "start_time", 2) + end_time = lookup(local.maintenance_window, "end_time", 6) + } + + # Day abbreviation mapping + day_abbreviation_to_full_name = { + "SUN" = "Sunday" + "MON" = "Monday" + "TUE" = "Tuesday" + "WED" = "Wednesday" + "THU" = "Thursday" + "FRI" = "Friday" + "SAT" = "Saturday" + } + + # Calculate maintenance window hours + hours = range(0, 24) + maintenance_window_hours = ( + local.maintenance_window_config.start_time <= local.maintenance_window_config.end_time + ? slice(local.hours, local.maintenance_window_config.start_time, local.maintenance_window_config.end_time + 1) + : concat( + slice(local.hours, local.maintenance_window_config.start_time, 24), + slice(local.hours, 0, local.maintenance_window_config.end_time + 1) + ) + ) + + # Storage and networking + default_reclaim_policy = lookup(local.cluster, "default_reclaim_policy", "Delete") + cluster_enabled_log_types = lookup(local.cluster, "cluster_enabled_log_types", []) + sku_tier = lookup(local.cluster, "sku_tier", "Free") + + # Resource naming + aks_name = "${substr(var.cluster.name, 0, 45 - 11)}-${var.cluster.clusterCode}" + node_resource_group = "MC_${substr(var.cluster.name, 0, 53)}_${var.cluster.clusterCode}_node_res_grp" + + # Cloud tags + cloud_tags = { + facetscontrolplane = split(".", var.cc_metadata.cc_host)[0] + cluster = var.cluster.name + facetsclustername = var.cluster.name + facetsclusterid = var.cluster.id + } + + tags = merge(var.environment.cloud_tags, lookup(local.spec, "tags", {}), local.cloud_tags) +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf new file mode 100644 index 000000000..1d4c8a869 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf @@ -0,0 +1,321 @@ +module "name" { + source = "github.com/Facets-cloud/facets-utility-modules//name" + environment = var.environment + limit = 32 + resource_name = var.instance_name + resource_type = "kubernetes_cluster" + globally_unique = true +} + +data "azurerm_kubernetes_service_versions" "current" { + location = var.vpc_details.region + version_prefix = local.kubernetes_version + include_preview = false +} + +# SSH key for Linux nodes +resource "tls_private_key" "ssh" { + algorithm = "RSA" + rsa_bits = 2048 +} + +resource "local_file" "private_key" { + content = tls_private_key.ssh.private_key_pem + filename = "./private_ssh_key" +} + +# AKS Cluster +resource "azurerm_kubernetes_cluster" "aks_cluster" { + name = local.aks_name + location = var.region + resource_group_name = var.resource_group_name + node_resource_group = local.node_resource_group + dns_prefix = local.aks_name + kubernetes_version = local.kubernetes_version + automatic_channel_upgrade = local.enable_auto_upgrade ? local.automatic_channel_upgrade : null + role_based_access_control_enabled = true + sku_tier = local.sku_tier + + identity { + type = "SystemAssigned" + } + + default_node_pool { + name = "defaultnp" + node_count = lookup(local.system_np, "node_count", 1) + vm_size = lookup(local.system_np, "instance_type", "Standard_D2_v4") + type = "VirtualMachineScaleSets" + max_pods = lookup(local.system_np, "max_pods", 30) + os_disk_size_gb = lookup(local.system_np, "os_disk_size_gb", 50) + enable_auto_scaling = lookup(local.system_np, "enable_auto_scaling", false) + only_critical_addons_enabled = lookup(local.system_np, "enable_critical_addons", true) + enable_node_public_ip = lookup(local.system_np, "enable_node_public_ip", false) + vnet_subnet_id = var.k8s_subnets[0] + zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] + orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version + temporary_name_for_rotation = "tmpdefaultnp" + + dynamic "upgrade_settings" { + for_each = local.enable_auto_upgrade ? [1] : [] + content { + max_surge = local.max_surge + } + } + } + + network_profile { + network_plugin = "azure" + } + + linux_profile { + admin_username = "azureuser" + ssh_key { + key_data = replace(tls_private_key.ssh.public_key_openssh, "\n", "") + } + } + + dynamic "maintenance_window" { + for_each = local.maintenance_window_config.is_disabled == false ? [1] : [] + content { + allowed { + day = lookup(local.day_abbreviation_to_full_name, local.maintenance_window_config.day_of_week, "Sunday") + hours = local.maintenance_window_hours + } + } + } + + tags = local.tags + + lifecycle { + prevent_destroy = true + ignore_changes = [ + sku_tier, + role_based_access_control_enabled, + name, + dns_prefix, + node_resource_group, + public_network_access_enabled, + image_cleaner_enabled, + image_cleaner_interval_hours, + private_cluster_public_fqdn_enabled, + run_command_enabled, + workload_identity_enabled, + network_profile, + auto_scaler_profile, + identity, + default_node_pool + ] + } +} + +# Generate unique suffix for node pools +locals { + np_unique_seed = "${local.default_np}-${lookup(local.default_np, "root_disk_volume", 100)}" + np_unique_seed_md5 = md5(local.np_unique_seed) + np_suffix = substr(local.np_unique_seed_md5, 0, 3) +} + +# On-demand node pool +resource "azurerm_kubernetes_cluster_node_pool" "ondemand_node_pool" { + count = lookup(local.default_np, "node_lifecycle_type", "SPOT") == "ON_DEMAND" && local.enable_default_nodepool ? 1 : 0 + name = "ondemand${local.np_suffix}" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id + vm_size = local.default_np.instance_type + node_count = 1 + enable_auto_scaling = true + mode = "System" + max_pods = 50 + min_count = 1 + max_count = lookup(local.default_np, "max_nodes", 200) + os_disk_size_gb = lookup(local.default_np, "root_disk_volume", 100) + os_disk_type = lookup(local.default_np, "azure_disk_type", "Managed") == "Ephemeral" ? "Ephemeral" : null + enable_node_public_ip = false + vnet_subnet_id = var.private_subnets[2] + zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] + tags = local.tags + + dynamic "upgrade_settings" { + for_each = local.enable_auto_upgrade ? [1] : [] + content { + max_surge = local.max_surge + } + } + + lifecycle { + ignore_changes = [node_count, zones, orchestrator_version, name] + prevent_destroy = true + } +} + +# Spot node pool +resource "azurerm_kubernetes_cluster_node_pool" "spot_node_pool" { + count = lookup(local.default_np, "node_lifecycle_type", "SPOT") == "SPOT" && local.enable_default_nodepool ? 1 : 0 + name = "spot${local.np_suffix}" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id + vm_size = local.default_np.instance_type + node_count = 1 + priority = "Spot" + spot_max_price = -1 + eviction_policy = "Delete" + enable_auto_scaling = true + min_count = 1 + max_count = lookup(local.default_np, "max_nodes", 200) + mode = "User" + max_pods = 50 + os_disk_size_gb = lookup(local.default_np, "root_disk_volume", 100) + os_disk_type = lookup(local.default_np, "azure_disk_type", "Managed") == "Ephemeral" ? "Ephemeral" : null + enable_node_public_ip = false + vnet_subnet_id = var.private_subnets[0] + zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] + node_taints = ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule"] + tags = local.tags + + node_labels = { + "kubernetes.azure.com/scalesetpriority" = "spot" + "ccLifecycle" = "spot" + } + + lifecycle { + ignore_changes = [node_count, zones, orchestrator_version, name, ultra_ssd_enabled, scale_down_mode] + prevent_destroy = true + } +} + +# Facets dedicated node pool +resource "azurerm_kubernetes_cluster_node_pool" "facets_dedicated_np" { + count = lookup(local.facets_dedicated_np, "enable", "true") ? 1 : 0 + name = "facets" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id + vm_size = lookup(local.facets_dedicated_np, "instance_type", "standard_D4as_v5") + node_count = 1 + priority = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? "Spot" : "Regular" + spot_max_price = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? -1 : null + eviction_policy = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? "Delete" : null + enable_auto_scaling = true + min_count = 1 + max_count = lookup(local.facets_dedicated_np, "max_nodes", 200) + mode = "User" + max_pods = 50 + os_disk_size_gb = lookup(local.facets_dedicated_np, "root_disk_volume", 100) + enable_node_public_ip = false + vnet_subnet_id = var.private_subnets[1] + zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] + orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version + node_taints = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule", "facets.cloud/dedicated=true:NoSchedule"] : ["facets.cloud/dedicated=true:NoSchedule"] + tags = local.tags + + node_labels = merge({ + facets-node-type = "facets-dedicated" + }, lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? { + "kubernetes.azure.com/scalesetpriority" = "spot" + ccLifecycle = "spot" + } : {}) + + dynamic "upgrade_settings" { + for_each = local.enable_auto_upgrade && var.settings.FACETS_DEDICATED_NODE_LIFECYCLE_TYPE != "SPOT" ? [1] : [] + content { + max_surge = local.max_surge + } + } + + lifecycle { + ignore_changes = [node_count, zones, orchestrator_version, ultra_ssd_enabled] + prevent_destroy = true + } +} + +# Storage account +data "http" "whatismyip" { + url = "http://ipv4.icanhazip.com" +} + +resource "azurerm_storage_account" "storageacct" { + name = "${substr(replace(var.cluster.name, "-", ""), 0, 24 - 10)}${var.cluster.clusterCode}" + resource_group_name = var.resource_group_name + location = var.region + account_tier = "Standard" + account_replication_type = "LRS" + account_kind = "StorageV2" + access_tier = "Hot" + min_tls_version = "TLS1_2" + + network_rules { + default_action = "Deny" + ip_rules = ["${chomp(data.http.whatismyip.body)}"] + virtual_network_subnet_ids = concat(var.private_subnets, var.public_subnets) + } + + blob_properties { + last_access_time_enabled = lookup(local.spec, "storage_account_last_access_time_enabled", true) + } + + tags = local.tags + + lifecycle { + ignore_changes = [ + name, + nfsv3_enabled, + infrastructure_encryption_enabled, + queue_encryption_key_type, + table_encryption_key_type, + cross_tenant_replication_enabled, + default_to_oauth_authentication, + public_network_access_enabled, + sftp_enabled, + shared_access_key_enabled, + allow_nested_items_to_be_public + ] + } +} + +# Role assignments +resource "azurerm_role_assignment" "cluster_identity_role_assignment" { + scope = var.resource_group_name + role_definition_name = "Contributor" + principal_id = azurerm_kubernetes_cluster.aks_cluster.identity[0].principal_id +} + +# These providers need to be force replaced with empty object blocks to prevent Terraform from using default providers +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + } + kubernetes = { + source = "hashicorp/kubernetes" + } + helm = { + source = "hashicorp/helm" + } + null = { + source = "hashicorp/null" + } + tls = { + source = "hashicorp/tls" + } + local = { + source = "hashicorp/local" + } + http = { + source = "hashicorp/http" + } + } +} + +provider "kubernetes" { + alias = "k8s" + host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host + client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) +} + +provider "helm" { + alias = "k8s" + kubernetes { + host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host + client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) + } +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf new file mode 100644 index 000000000..75ee76428 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf @@ -0,0 +1,41 @@ +output "k8s_details" { + value = { + auth = { + host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host + client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) + token = try(kubernetes_secret_v1.capillary-cloud-admin-token.data["token"], "na") + } + az_storage_account = azurerm_storage_account.storageacct.name + az_storage_account_id = azurerm_storage_account.storageacct.id + az_storage_account_key = azurerm_storage_account.storageacct.primary_access_key + + #registry_secrets = local.secret_list + #registry_secret_objects = [for i in local.secret_list : { name : i }] + node_group_iam_role_arn = "na" + cluster_id = var.cluster.name + + # stub_prometheus_dep = helm_release.prometheus-operator + #stub_ecr_token_refresh = null_resource.wait-for-ecr-token-patch + principalId = azurerm_kubernetes_cluster.aks_cluster.kubelet_identity[0].object_id + priority-class = kubernetes_priority_class.facets-critical.metadata[0].name + } +} +output "registry_secret_objects" { + value = module.ecr-token-registry.registry_secret_objects +} + +output "node_resource_group" { + value = azurerm_kubernetes_cluster.aks_cluster.node_resource_group +} + +output "aks_cluster_id" { + value = azurerm_kubernetes_cluster.aks_cluster.id +} + +output "cluster_auto_upgrade" { + value = { + max_surge = local.max_surge + } // "" for spot nodes +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf new file mode 100644 index 000000000..242ea6173 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf @@ -0,0 +1,65 @@ +variable "instance" { + description = "A Kubernetes EKS cluster module with auto mode enabled by default and all necessary configurations preset." + type = any +} + +variable "cluster" { + description = "cluster object configuration" + type = any + default = {} +} + +variable "cc_metadata" { + description = "cc_metadata object configuration" + type = any +} + +variable "instance_name" { + description = "The architectural name for the resource as added in the Facets blueprint designer." + type = string + +} + +variable "environment" { + description = "An object containing details about the environment." + type = object({ + name = string + unique_name = string + cloud_tags = map(string) + }) +} + +variable "vpc_id" { + description = "The VPC ID for the cluster." + type = string +} + +variable "k8s_subnets" { + description = "The subnets for the cluster." + type = list(string) +} + +variable "region" { + description = "The AZURE region where the cluster is deployed." + type = string +} + +variable "resource_group_name" { + description = "value for the resource group name where the cluster is deployed." + type = string +} + +variable "azs" { + description = "The availability zones for the cluster." + type = list(string) +} + +variable "private_subnets" { + description = "value for the private subnets where the cluster is deployed." + type = list(string) +} + +variable "public_subnets" { + description = "value for the public subnets where the cluster is deployed." + type = list(string) +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf new file mode 100644 index 000000000..4a30b6d77 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + azurerm = { + source = "hashicorp/azurerm3" + } + helm = { + source = "hashicorp/helm" + } + http = { + source = "hashicorp/http" + } + kubernetes = { + source = "hashicorp/kubernetes" + } + local = { + source = "hashicorp/local" + } + tls = { + source = "hashicorp/tls" + } + } + required_version = ">= 0.13" +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf new file mode 100644 index 000000000..64324ae48 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf @@ -0,0 +1,90 @@ +locals { + name = module.name.name + metadata = lookup(var.instance, "metadata", {}) + spec = lookup(var.instance, "spec", {}) + cluster = lookup(local.spec, "cluster", {}) + node_pools = lookup(local.spec, "node_pools", {}) + default_node_pool = lookup(local.node_pools, "default", {}) + dedicated_node_pool = lookup(local.node_pools, "dedicated", {}) + default_reclaim_policy = lookup(local.cluster, "default_reclaim_policy", "Delete") + namespace = lookup(local.metadata, "namespace", "default") + user_supplied_helm_values = lookup(local.secret_copier, "values", {}) + secret_copier = lookup(local.spec, "secret-copier", {}) + + facets_default_node_pool = { + name = "default-node-pool" + node_class_name = "default" + labels = { + "managed-by" = "facets" + facets-node-type = "facets-default" + } + } + + facets_dedicated_node_pool = { + name = "dedicated-node-pool" + node_class_name = "default" + labels = { + managed-by = "facets" + facets-node-type = "facets-dedicated" + } + } + + cloud_tags = { + facetscontrolplane = split(".", var.cc_metadata.cc_host)[0] + cluster = var.cluster.name + facetsclustername = var.cluster.name + facetsclusterid = var.cluster.id + } + + # Storage class data for AKS + storage_class_data = { + apiVersion = "storage.k8s.io/v1" + kind = "StorageClass" + metadata = { + name = "aks-default-sc" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + provisioner = "disk.csi.azure.com" + parameters = { + storageaccounttype = "Premium_LRS" + kind = "Managed" + cachingmode = "ReadOnly" + } + allowVolumeExpansion = true + reclaimPolicy = local.default_reclaim_policy + volumeBindingMode = "Immediate" + } + + default_node_pool_data = { + apiVersion = "v1" + kind = "Node" + metadata = { + name = local.facets_default_node_pool.name + labels = local.facets_default_node_pool.labels + } + spec = { + # AKS node configuration would be handled by the cluster itself + # This is more for reference and labeling + } + } + + dedicated_node_pool_data = { + apiVersion = "v1" + kind = "Node" + metadata = { + name = local.facets_dedicated_node_pool.name + labels = local.facets_dedicated_node_pool.labels + } + spec = { + taints = [ + { + key = "facets.cloud/dedicated" + value = "true" + effect = "NoSchedule" + } + ] + } + } +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf new file mode 100644 index 000000000..8e1a397d0 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -0,0 +1,138 @@ +module "name" { + source = "github.com/Facets-cloud/facets-utility-modules//name" + environment = var.environment + limit = 32 + resource_name = var.instance_name + resource_type = "kubernetes_cluster" + globally_unique = true +} + +module "k8s_cluster" { + source = "./k8s_cluster" + instance = var.instance + vpc_id = var.inputs.network_details.attributes.vpc_id + cc_metadata = var.cc_metadata + environment = var.environment + cluster = var.cluster + k8s_subnets = var.inputs.network_details.attributes.private_subnet_ids + instance_name = var.instance_name + region = var.inputs.network_details.attributes.region + azs = var.inputs.network_details.attributes.availability_zones + resource_group_name = var.inputs.network_details.attributes.resource_group_name + public_subnets = var.inputs.network_details.attributes.public_subnet_ids + private_subnets = var.inputs.network_details.attributes.private_subnet_ids + +} + +# Storage class for AKS +module "storage_class" { + depends_on = [module.k8s_cluster] + source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" + name = "aks-storage-class" + namespace = var.environment.namespace + release_name = "${local.name}-fc-storage-class" + data = local.storage_class_data + advanced_config = {} +} + +# Default node pool reference (for compatibility) +module "default_node_pool" { + depends_on = [module.k8s_cluster] + count = lookup(local.default_node_pool, "enabled", true) ? 1 : 0 + source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" + name = "${local.name}-fc-default-np" + namespace = var.environment.namespace + release_name = "${local.name}-fc-default-np" + data = local.default_node_pool_data + advanced_config = {} +} + +# Dedicated node pool reference (for compatibility) +module "dedicated_node_pool" { + depends_on = [module.k8s_cluster] + count = lookup(local.dedicated_node_pool, "enabled", false) ? 1 : 0 + source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" + name = "${local.name}-fc-dedicated-np" + namespace = var.environment.namespace + release_name = "${local.name}-fc-dedicated-np" + data = local.dedicated_node_pool_data + advanced_config = {} +} + +provider "kubernetes" { + host = module.k8s_cluster.k8s_details.cluster.auth.host + client_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + client_key = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + cluster_ca_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) +} + +provider "helm" { + kubernetes { + host = module.k8s_cluster.k8s_details.cluster.auth.host + client_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + client_key = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + cluster_ca_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + } +} + +# Secret copier helm release +resource "helm_release" "secret-copier" { + depends_on = [module.k8s_cluster] + count = lookup(local.secret_copier, "disabled", false) ? 0 : 1 + chart = lookup(local.secret_copier, "chart", "secret-copier") + namespace = lookup(local.secret_copier, "namespace", local.namespace) + name = lookup(local.secret_copier, "name", "facets-secret-copier") + repository = lookup(local.secret_copier, "repository", "https://facets-cloud.github.io/helm-charts") + version = lookup(local.secret_copier, "version", "1.0.2") + + values = [ + yamlencode( + { + resources = { + requests = { + cpu = "50m" + memory = "256Mi" + } + limits = { + cpu = "300m" + memory = "1000Mi" + } + } + } + ), + yamlencode(local.user_supplied_helm_values) + ] +} + +# Cluster overprovisioner for better resource management +resource "helm_release" "cluster-overprovisioner" { + depends_on = [module.k8s_cluster] + name = "${local.name}-cluster-overprovisioner" + repository = "https://charts.deliveryhero.io/" + chart = "cluster-overprovisioner" + version = "0.7.10" + wait = false + cleanup_on_fail = true + + values = [ + < Date: Tue, 1 Jul 2025 21:43:55 +0530 Subject: [PATCH 04/36] removed outputs for np --- modules/kubernetes_cluster/azure_aks/0.2/facets.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 8b0d9a126..878425adf 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -477,14 +477,6 @@ outputs: host: attributes.cluster.auth.host cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate token: attributes.cluster.auth.token - default_node_pool: - type: '@outputs/azure_aks_nodepool' - title: Default Node Pool - description: The default node pool for Azure AKS - dedicated_node_pool: - type: '@outputs/azure_aks_nodepool' - title: Dedicated Node Pool - description: The dedicated node pool for Azure AKS sample: kind: kubernetes_cluster flavor: azure_aks From 6418217abb33232ab9a73d4d2da45f8ca6bc8683 Mon Sep 17 00:00:00 2001 From: ShrinidhiFCTS23 Date: Thu, 3 Jul 2025 16:53:24 +0530 Subject: [PATCH 05/36] updated the cluster and network module --- .../azure_aks/0.2/facets.yaml | 25 +++++------ .../azure_aks/0.2/k8s_cluster/main.tf | 12 +++--- .../azure_aks/0.2/k8s_cluster/outputs.tf | 12 +++++- .../azure_aks/0.2/k8s_cluster/variables.tf | 7 +++ .../azure_aks/0.2/k8s_cluster/versions.tf | 26 ----------- .../azure_aks/0.2/locals.tf | 24 +++++------ .../kubernetes_cluster/azure_aks/0.2/main.tf | 28 ++++++------ .../azure_aks/0.2/outputs.tf | 4 +- .../network/azure_network/1.0/facets.yaml | 13 +++++- .../network/azure_network/1.0/main.tf | 43 +++++++++++-------- .../network/azure_network/1.0/variables.tf | 18 ++------ 11 files changed, 107 insertions(+), 105 deletions(-) delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 878425adf..9a995082f 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -1,13 +1,13 @@ intent: kubernetes_cluster -flavor: azure_aks +flavor: azure_aks_cluster alias-flavors: - default -version: '0.3' +version: '0.2' clouds: - azure title: AKS Cluster with Auto-Upgrade Support -description: A Kubernetes AKS cluster module with auto-upgrade enabled by default and - all necessary configurations preset. +description: A Kubernetes AKS cluster module with auto-upgrade enabled by default + and all necessary configurations preset. allow_skipping_module_on_selective_release: false spec: type: object @@ -62,16 +62,16 @@ spec: cluster_endpoint_public_access_cidrs: type: array title: Cluster Endpoint Public Access CIDRs - description: List of CIDR blocks which can access the AKS public - API server endpoint. + description: List of CIDR blocks which can access the AKS public API server + endpoint. default: - 0.0.0.0/0 x-ui-overrides-only: true cluster_endpoint_private_access_cidrs: type: array title: Cluster Endpoint Private Access CIDRs - description: List of CIDR blocks which can access the AKS private - API server endpoint. + description: List of CIDR blocks which can access the AKS private API server + endpoint. default: [] x-ui-overrides-only: true cluster_enabled_log_types: @@ -143,7 +143,8 @@ spec: max_surge: type: string title: Max Surge - description: Maximum number of nodes that can be added during upgrade (number or percentage). + description: Maximum number of nodes that can be added during upgrade (number + or percentage). default: '1' pattern: ^([0-9]+%?|[0-9]+)$ x-ui-error-message: Max surge must be a number or percentage (e.g., 1, 33%) @@ -479,10 +480,10 @@ outputs: token: attributes.cluster.auth.token sample: kind: kubernetes_cluster - flavor: azure_aks + flavor: azure_aks_cluster alias-flavors: - default - version: '0.3' + version: '0.2' metadata: name: aks-cluster spec: @@ -531,4 +532,4 @@ sample: node_lifecycle_type: SPOT max_nodes: 200 root_disk_volume: 100 - tags: {} \ No newline at end of file + tags: {} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf index 1d4c8a869..4ef473046 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf @@ -94,7 +94,7 @@ resource "azurerm_kubernetes_cluster" "aks_cluster" { name, dns_prefix, node_resource_group, - public_network_access_enabled, + image_cleaner_enabled, image_cleaner_interval_hours, private_cluster_public_fqdn_enabled, @@ -258,12 +258,12 @@ resource "azurerm_storage_account" "storageacct" { infrastructure_encryption_enabled, queue_encryption_key_type, table_encryption_key_type, - cross_tenant_replication_enabled, - default_to_oauth_authentication, - public_network_access_enabled, - sftp_enabled, + + + + shared_access_key_enabled, - allow_nested_items_to_be_public + ] } } diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf index 75ee76428..6c7ee445b 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf @@ -23,7 +23,17 @@ output "k8s_details" { } } output "registry_secret_objects" { - value = module.ecr-token-registry.registry_secret_objects + value = [] +} + +output "legacy_outputs" { + description = "Legacy outputs for backward compatibility" + value = { + cluster_name = azurerm_kubernetes_cluster.k8s.name + cluster_endpoint = azurerm_kubernetes_cluster.k8s.kube_config.0.host + cluster_ca_certificate = azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate + node_resource_group = azurerm_kubernetes_cluster.k8s.node_resource_group + } } output "node_resource_group" { diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf index 242ea6173..2b959f613 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf @@ -1,3 +1,10 @@ +variable "vpc_details" { + description = "VPC details including region and other network information" + type = object({ + region = string + }) +} + variable "instance" { description = "A Kubernetes EKS cluster module with auto mode enabled by default and all necessary configurations preset." type = any diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf deleted file mode 100644 index 4a30b6d77..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/versions.tf +++ /dev/null @@ -1,26 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - azurerm = { - source = "hashicorp/azurerm3" - } - helm = { - source = "hashicorp/helm" - } - http = { - source = "hashicorp/http" - } - kubernetes = { - source = "hashicorp/kubernetes" - } - local = { - source = "hashicorp/local" - } - tls = { - source = "hashicorp/tls" - } - } - required_version = ">= 0.13" -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf index 64324ae48..91658ce15 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf @@ -10,7 +10,7 @@ locals { namespace = lookup(local.metadata, "namespace", "default") user_supplied_helm_values = lookup(local.secret_copier, "values", {}) secret_copier = lookup(local.spec, "secret-copier", {}) - + facets_default_node_pool = { name = "default-node-pool" node_class_name = "default" @@ -19,7 +19,7 @@ locals { facets-node-type = "facets-default" } } - + facets_dedicated_node_pool = { name = "dedicated-node-pool" node_class_name = "default" @@ -28,14 +28,14 @@ locals { facets-node-type = "facets-dedicated" } } - + cloud_tags = { facetscontrolplane = split(".", var.cc_metadata.cc_host)[0] cluster = var.cluster.name facetsclustername = var.cluster.name facetsclusterid = var.cluster.id } - + # Storage class data for AKS storage_class_data = { apiVersion = "storage.k8s.io/v1" @@ -49,19 +49,19 @@ locals { provisioner = "disk.csi.azure.com" parameters = { storageaccounttype = "Premium_LRS" - kind = "Managed" - cachingmode = "ReadOnly" + kind = "Managed" + cachingmode = "ReadOnly" } allowVolumeExpansion = true - reclaimPolicy = local.default_reclaim_policy - volumeBindingMode = "Immediate" + reclaimPolicy = local.default_reclaim_policy + volumeBindingMode = "Immediate" } - + default_node_pool_data = { apiVersion = "v1" kind = "Node" metadata = { - name = local.facets_default_node_pool.name + name = local.facets_default_node_pool.name labels = local.facets_default_node_pool.labels } spec = { @@ -69,12 +69,12 @@ locals { # This is more for reference and labeling } } - + dedicated_node_pool_data = { apiVersion = "v1" kind = "Node" metadata = { - name = local.facets_dedicated_node_pool.name + name = local.facets_dedicated_node_pool.name labels = local.facets_dedicated_node_pool.labels } spec = { diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index 8e1a397d0..7a6c88683 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -8,19 +8,19 @@ module "name" { } module "k8s_cluster" { - source = "./k8s_cluster" - instance = var.instance - vpc_id = var.inputs.network_details.attributes.vpc_id - cc_metadata = var.cc_metadata - environment = var.environment - cluster = var.cluster - k8s_subnets = var.inputs.network_details.attributes.private_subnet_ids - instance_name = var.instance_name - region = var.inputs.network_details.attributes.region - azs = var.inputs.network_details.attributes.availability_zones + source = "./k8s_cluster" + instance = var.instance + vpc_id = var.inputs.network_details.attributes.vpc_id + cc_metadata = var.cc_metadata + environment = var.environment + cluster = var.cluster + k8s_subnets = var.inputs.network_details.attributes.private_subnet_ids + instance_name = var.instance_name + region = var.inputs.network_details.attributes.region + azs = var.inputs.network_details.attributes.availability_zones resource_group_name = var.inputs.network_details.attributes.resource_group_name - public_subnets = var.inputs.network_details.attributes.public_subnet_ids - private_subnets = var.inputs.network_details.attributes.private_subnet_ids + public_subnets = var.inputs.network_details.attributes.public_subnet_ids + private_subnets = var.inputs.network_details.attributes.private_subnet_ids } @@ -84,7 +84,7 @@ resource "helm_release" "secret-copier" { name = lookup(local.secret_copier, "name", "facets-secret-copier") repository = lookup(local.secret_copier, "repository", "https://facets-cloud.github.io/helm-charts") version = lookup(local.secret_copier, "version", "1.0.2") - + values = [ yamlencode( { @@ -113,7 +113,7 @@ resource "helm_release" "cluster-overprovisioner" { version = "0.7.10" wait = false cleanup_on_fail = true - + values = [ < lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true + } # Calculate subnet mask from IP count subnet_mask_map = { "256" = 24 # /24 = 256 IPs @@ -479,7 +496,7 @@ resource "azurerm_route_table" "private" { for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { for az in var.instance.spec.availability_zones : az => az } : var.instance.spec.public_subnets.count_per_az > 0 ? { - single = "single" + single = "1" } : {} name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-private-rt-${each.key}" : "${local.name_prefix}-private-rt" @@ -494,7 +511,7 @@ resource "azurerm_subnet_route_table_association" "private" { for_each = azurerm_subnet.private subnet_id = each.value.id - route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id + route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["1"].id } # Route Table for Database Subnets (isolated) @@ -526,7 +543,7 @@ resource "azurerm_subnet_nat_gateway_association" "private" { } subnet_id = each.value.id - nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id + nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["1"].id } # Associate NAT Gateway with Functions Subnets @@ -537,7 +554,7 @@ resource "azurerm_subnet_nat_gateway_association" "functions" { } subnet_id = each.value.id - nat_gateway_id = azurerm_nat_gateway.main["single"].id # Functions typically use single NAT Gateway + nat_gateway_id = azurerm_nat_gateway.main["1"].id # Functions typically use single NAT Gateway } # Network Security Group - Allow all within VNet (similar to original logic) @@ -667,21 +684,13 @@ resource "azurerm_subnet_network_security_group_association" "private_link_servi # Private DNS Zone for Private Endpoints resource "azurerm_private_dns_zone" "private_endpoints" { for_each = { - storage = try(local.private_endpoints.enable_storage, false) ? "privatelink.blob.core.windows.net" : null - sql = try(local.private_endpoints.enable_sql, false) ? "privatelink.database.windows.net" : null - keyvault = try(local.private_endpoints.enable_keyvault, false) ? "privatelink.vaultcore.azure.net" : null - acr = try(local.private_endpoints.enable_acr, false) ? "privatelink.azurecr.io" : null - cosmos = try(local.private_endpoints.enable_cosmos, false) ? "privatelink.documents.azure.com" : null - servicebus = try(local.private_endpoints.enable_servicebus, false) ? "privatelink.servicebus.windows.net" : null - eventhub = try(local.private_endpoints.enable_eventhub, false) ? "privatelink.servicebus.windows.net" : null - monitor = try(local.private_endpoints.enable_monitor, false) ? "privatelink.monitor.azure.com" : null - cognitive = try(local.private_endpoints.enable_cognitive, false) ? "privatelink.cognitiveservices.azure.com" : null + for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true } name = each.value resource_group_name = azurerm_resource_group.main.name - tags = local.common_tags + tags = var.instance.spec.tags } # Link Private DNS Zone to VNet @@ -701,14 +710,14 @@ resource "azurerm_private_dns_zone_virtual_network_link" "private_endpoints" { resource "azurerm_storage_account" "example" { count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 - name = "${replace(local.name_prefix, "-", "")}stor" + name = substr(replace(replace(lower(local.name_prefix), "-", ""), "_", ""), 0, 20) resource_group_name = azurerm_resource_group.main.name location = azurerm_resource_group.main.location account_tier = "Standard" account_replication_type = "LRS" # Disable public access - public_network_access_enabled = false + tags = local.common_tags } @@ -731,7 +740,7 @@ resource "azurerm_private_endpoint" "storage" { private_dns_zone_group { name = "storage-dns-zone-group" - private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["storage"].id] + private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["enable_storage"].id] } tags = local.common_tags diff --git a/modules/network/azure_vpc/network/azure_network/1.0/variables.tf b/modules/network/azure_vpc/network/azure_network/1.0/variables.tf index c508f14ab..8a299b587 100644 --- a/modules/network/azure_vpc/network/azure_network/1.0/variables.tf +++ b/modules/network/azure_vpc/network/azure_network/1.0/variables.tf @@ -1,12 +1,6 @@ variable "instance" { - description = "This is Network Module for Azure" - type = object({ - kind = string - flavor = string - version = string - spec = object({ - }) - }) + description = "The resource instance" + type = any } variable "instance_name" { description = "The architectural name for the resource as added in the Facets blueprint designer." @@ -14,13 +8,9 @@ variable "instance_name" { } variable "environment" { description = "An object containing details about the environment." - type = object({ - name = string - unique_name = string - }) + type = any } variable "inputs" { description = "A map of inputs requested by the module developer." - type = object({ - }) + type = any } \ No newline at end of file From a7057e5fa25b8f8c262ddf3b8dec0d8bdbad1a93 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 22 Jul 2025 17:17:48 +0530 Subject: [PATCH 06/36] refactored module --- .../azure_vpc/{network/azure_network/1.0 => 0.2}/facets.yaml | 0 .../network/azure_vpc/{network/azure_network/1.0 => 0.2}/main.tf | 0 .../azure_vpc/{network/azure_network/1.0 => 0.2}/outputs.tf | 0 .../azure_vpc/{network/azure_network/1.0 => 0.2}/variables.tf | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename modules/network/azure_vpc/{network/azure_network/1.0 => 0.2}/facets.yaml (100%) rename modules/network/azure_vpc/{network/azure_network/1.0 => 0.2}/main.tf (100%) rename modules/network/azure_vpc/{network/azure_network/1.0 => 0.2}/outputs.tf (100%) rename modules/network/azure_vpc/{network/azure_network/1.0 => 0.2}/variables.tf (100%) diff --git a/modules/network/azure_vpc/network/azure_network/1.0/facets.yaml b/modules/network/azure_vpc/0.2/facets.yaml similarity index 100% rename from modules/network/azure_vpc/network/azure_network/1.0/facets.yaml rename to modules/network/azure_vpc/0.2/facets.yaml diff --git a/modules/network/azure_vpc/network/azure_network/1.0/main.tf b/modules/network/azure_vpc/0.2/main.tf similarity index 100% rename from modules/network/azure_vpc/network/azure_network/1.0/main.tf rename to modules/network/azure_vpc/0.2/main.tf diff --git a/modules/network/azure_vpc/network/azure_network/1.0/outputs.tf b/modules/network/azure_vpc/0.2/outputs.tf similarity index 100% rename from modules/network/azure_vpc/network/azure_network/1.0/outputs.tf rename to modules/network/azure_vpc/0.2/outputs.tf diff --git a/modules/network/azure_vpc/network/azure_network/1.0/variables.tf b/modules/network/azure_vpc/0.2/variables.tf similarity index 100% rename from modules/network/azure_vpc/network/azure_network/1.0/variables.tf rename to modules/network/azure_vpc/0.2/variables.tf From d99f7a5fe5258912e40af0c36d76e75e58c2745f Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 22 Jul 2025 17:24:10 +0530 Subject: [PATCH 07/36] fixed the nat_gateway_id --- modules/network/azure_vpc/0.2/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/network/azure_vpc/0.2/main.tf b/modules/network/azure_vpc/0.2/main.tf index 21661bd29..20708d0b8 100644 --- a/modules/network/azure_vpc/0.2/main.tf +++ b/modules/network/azure_vpc/0.2/main.tf @@ -543,7 +543,7 @@ resource "azurerm_subnet_nat_gateway_association" "private" { } subnet_id = each.value.id - nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["1"].id + nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id } # Associate NAT Gateway with Functions Subnets From 9415a951b93cbb6c6d81cf5210a6923fde85d602 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Wed, 23 Jul 2025 15:40:48 +0530 Subject: [PATCH 08/36] azure network fixes --- modules/network/azure_vpc/0.2/facets.yaml | 4 ++-- modules/network/azure_vpc/0.2/main.tf | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/network/azure_vpc/0.2/facets.yaml b/modules/network/azure_vpc/0.2/facets.yaml index 129332a39..7ddffcb0d 100644 --- a/modules/network/azure_vpc/0.2/facets.yaml +++ b/modules/network/azure_vpc/0.2/facets.yaml @@ -1,6 +1,6 @@ intent: network flavor: azure_network -version: '1.0' +version: '0.2' description: Creates an Azure Virtual Network with configurable public subnets, private subnets, database subnets, and specialized subnets across availability zones clouds: @@ -243,7 +243,7 @@ outputs: sample: kind: network flavor: azure_network - version: '1.0' + version: '0.2' spec: vnet_cidr: 10.0.0.0/16 region: centralindia diff --git a/modules/network/azure_vpc/0.2/main.tf b/modules/network/azure_vpc/0.2/main.tf index 20708d0b8..bf35679bc 100644 --- a/modules/network/azure_vpc/0.2/main.tf +++ b/modules/network/azure_vpc/0.2/main.tf @@ -433,7 +433,7 @@ resource "azurerm_public_ip" "nat_gateway" { resource_group_name = azurerm_resource_group.main.name allocation_method = "Static" sku = "Standard" - zones = [each.key] + zones = [each.value] tags = local.common_tags @@ -455,7 +455,7 @@ resource "azurerm_nat_gateway" "main" { resource_group_name = azurerm_resource_group.main.name sku_name = "Standard" idle_timeout_in_minutes = 10 - zones = [each.key] + zones = [each.value] tags = local.common_tags @@ -511,7 +511,7 @@ resource "azurerm_subnet_route_table_association" "private" { for_each = azurerm_subnet.private subnet_id = each.value.id - route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["1"].id + route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id } # Route Table for Database Subnets (isolated) From df115b79bb2d8ad41680e4937689da0068775e4b Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:21:08 +0530 Subject: [PATCH 09/36] k8scluster module --- .../azure_aks/0.2/README.md | 53 +++ .../azure_aks/0.2/facets.yaml | 170 +--------- .../azure_aks/0.2/k8s_cluster/locals.tf | 75 ---- .../azure_aks/0.2/k8s_cluster/main.tf | 321 ------------------ .../azure_aks/0.2/k8s_cluster/outputs.tf | 51 --- .../azure_aks/0.2/k8s_cluster/variables.tf | 72 ---- .../azure_aks/0.2/locals.tf | 127 +++---- .../kubernetes_cluster/azure_aks/0.2/main.tf | 241 ++++++------- .../azure_aks/0.2/outputs.tf | 94 ++--- .../azure_aks/0.2/variables.tf | 186 +++++++++- 10 files changed, 440 insertions(+), 950 deletions(-) delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/README.md b/modules/kubernetes_cluster/azure_aks/0.2/README.md index e69de29bb..ac5107e02 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/README.md +++ b/modules/kubernetes_cluster/azure_aks/0.2/README.md @@ -0,0 +1,53 @@ +# Azure AKS Cluster Module v0.2 + +![Azure](https://img.shields.io/badge/cloud-azure-blue.svg) +![Terraform](https://img.shields.io/badge/terraform-1.5.7-623CE4.svg) + +## Overview + +This Terraform module creates a production-ready Azure Kubernetes Service (AKS) cluster with auto-upgrade capabilities and comprehensive monitoring. It uses the official Azure/aks/azurerm module version 10.2.0 to ensure reliability and access to the latest features. + +The module provides a simplified interface for developers while maintaining enterprise-grade security and operational features. + +## Environment as Dimension + +This module is environment-aware and supports different configurations per environment: + +- **Cluster endpoint access controls** can be customized per environment (public/private access, authorized IP ranges) +- **Auto-upgrade settings** including maintenance windows can vary by environment +- **Node pool configurations** can be scaled differently across environments +- **SKU tiers** can be adjusted based on environment requirements (Free for dev, Standard/Premium for production) +- **Tags** are automatically applied with environment-specific values + +## Resources Created + +This module creates the following Azure resources: + +- **AKS Cluster** - Managed Kubernetes cluster with specified version and configuration +- **System Node Pool** - Required node pool for system workloads with auto-scaling capability +- **Managed Identity** - System-assigned identity for cluster authentication +- **Network Configuration** - Integration with existing VNet and subnets +- **Log Analytics Integration** - Optional monitoring and logging setup +- **RBAC Configuration** - Azure AD integration with role-based access control +- **Auto-scaler Profile** - Cluster autoscaler configuration for optimal resource management + +## Security Considerations + +The module implements several security best practices: + +- **Azure AD Integration** - RBAC is enabled with Azure AD for authentication and authorization +- **Private Cluster Support** - Option to create private clusters with no public endpoint exposure +- **Network Policies** - Calico network policies are enabled for pod-to-pod communication control +- **Workload Identity** - Azure AD Workload Identity is enabled for secure pod identity +- **Local Account Disabled** - Local cluster accounts are disabled for better security posture +- **Authorized IP Ranges** - Configurable IP allowlists for API server access +- **Azure Policy Integration** - Built-in Azure Policy support for governance and compliance + +## Key Features + +- **Auto-upgrade Support** - Configurable automatic cluster and node upgrades with maintenance windows +- **High Availability** - Multi-zone deployment capability for production workloads +- **Monitoring Ready** - Built-in integration with Azure Monitor and Log Analytics +- **Enterprise Security** - Azure AD RBAC, Workload Identity, and network policies enabled +- **Cost Optimization** - Configurable SKU tiers and auto-scaling for cost management +- **Production Ready** - Based on the official Microsoft-maintained Terraform module diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 9a995082f..6237b7654 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -15,27 +15,9 @@ spec: - cluster - auto_upgrade_settings - node_pools - - secret_copier - features - tags properties: - secret_copier: - type: object - title: Secret Copier - description: Configuration for the Secret Copier. - x-ui-toggle: false - properties: - disabled: - type: boolean - title: Disabled - description: Disable secret copier. - default: false - values: - type: object - title: Values - description: Values to pass to the secret copier. - default: {} - x-ui-yaml-editor: true cluster: type: object title: Cluster @@ -281,132 +263,6 @@ spec: field: spec.node_pools.system_np.enabled values: - true - default: - type: object - title: Default Node Pool - description: Configuration for default application node pool. - x-ui-toggle: false - properties: - enabled: - type: boolean - title: Enabled - description: Enable default node pool. - default: true - instance_type: - type: string - title: Instance Type - description: Azure VM size for default nodes. - default: Standard_D4s_v3 - x-ui-visible-if: - field: spec.node_pools.default.enabled - values: - - true - node_lifecycle_type: - type: string - title: Node Lifecycle Type - description: Node lifecycle type. - default: SPOT - enum: - - ON_DEMAND - - SPOT - x-ui-visible-if: - field: spec.node_pools.default.enabled - values: - - true - max_nodes: - type: integer - title: Maximum Nodes - description: Maximum number of nodes in the pool. - default: 200 - minimum: 1 - maximum: 1000 - x-ui-visible-if: - field: spec.node_pools.default.enabled - values: - - true - root_disk_volume: - type: integer - title: Root Disk Volume (GB) - description: Root disk size in GB. - default: 100 - minimum: 30 - maximum: 2048 - x-ui-visible-if: - field: spec.node_pools.default.enabled - values: - - true - azure_disk_type: - type: string - title: Azure Disk Type - description: Azure disk type. - default: Managed - enum: - - Managed - - Ephemeral - x-ui-visible-if: - field: spec.node_pools.default.enabled - values: - - true - required: - - enabled - - node_lifecycle_type - facets_dedicated: - type: object - title: Facets Dedicated Node Pool - description: Configuration for Facets dedicated node pool. - x-ui-toggle: false - properties: - enable: - type: boolean - title: Enable - description: Enable Facets dedicated node pool. - default: true - instance_type: - type: string - title: Instance Type - description: Azure VM size for Facets dedicated nodes. - default: Standard_D4as_v5 - x-ui-visible-if: - field: spec.node_pools.facets_dedicated.enable - values: - - true - node_lifecycle_type: - type: string - title: Node Lifecycle Type - description: Node lifecycle type for Facets nodes. - default: SPOT - enum: - - ON_DEMAND - - SPOT - x-ui-visible-if: - field: spec.node_pools.facets_dedicated.enable - values: - - true - max_nodes: - type: integer - title: Maximum Nodes - description: Maximum number of Facets dedicated nodes. - default: 200 - minimum: 1 - maximum: 1000 - x-ui-visible-if: - field: spec.node_pools.facets_dedicated.enable - values: - - true - root_disk_volume: - type: integer - title: Root Disk Volume (GB) - description: Root disk size in GB for Facets nodes. - default: 100 - minimum: 30 - maximum: 2048 - x-ui-visible-if: - field: spec.node_pools.facets_dedicated.enable - values: - - true - required: - - enable - - node_lifecycle_type features: type: object title: Features @@ -504,13 +360,6 @@ sample: day_of_week: SUN start_time: 2 end_time: 6 - secret_copier: - disabled: false - values: - resources: - requests: - cpu: 150m - memory: 256Mi node_pools: system_np: enabled: true @@ -519,17 +368,10 @@ sample: max_pods: 30 os_disk_size_gb: 50 enable_auto_scaling: false - default: - enabled: true - instance_type: Standard_D4s_v3 - node_lifecycle_type: SPOT - max_nodes: 200 - root_disk_volume: 100 - azure_disk_type: Managed - facets_dedicated: - enable: true - instance_type: Standard_D4as_v5 - node_lifecycle_type: SPOT - max_nodes: 200 - root_disk_volume: 100 tags: {} +iac: + validated_files: + - main.tf + - variables.tf + - outputs.tf + - locals.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf deleted file mode 100644 index ae2dd7db9..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/locals.tf +++ /dev/null @@ -1,75 +0,0 @@ -locals { - name = module.name.name - spec = lookup(var.instance, "spec", {}) - cluster = lookup(local.spec, "cluster", {}) - auto_upgrade_settings = lookup(local.spec, "auto_upgrade_settings", {}) - - # Cluster configuration - kubernetes_version = lookup(local.cluster, "kubernetes_version", "1.31") - automatic_channel_upgrade = lookup(local.auto_upgrade_settings, "automatic_channel_upgrade", "stable") - enable_auto_upgrade = lookup(local.auto_upgrade_settings, "enable_auto_upgrade", true) - max_surge = lookup(local.auto_upgrade_settings, "max_surge", "1") - - # Access configuration - cluster_endpoint_public_access = lookup(local.cluster, "cluster_endpoint_public_access", true) - cluster_endpoint_private_access = lookup(local.cluster, "cluster_endpoint_private_access", false) - cluster_endpoint_public_access_cidrs = lookup(local.cluster, "cluster_endpoint_public_access_cidrs", ["0.0.0.0/0"]) - cluster_endpoint_private_access_cidrs = lookup(local.cluster, "cluster_endpoint_private_access_cidrs", []) - - # Node pool configuration - node_pools = lookup(local.spec, "node_pools", {}) - system_np = lookup(local.node_pools, "system_np", {}) - default_np = lookup(local.node_pools, "default", {}) - facets_dedicated_np = lookup(local.node_pools, "facets_dedicated", {}) - enable_default_nodepool = lookup(local.system_np, "enabled", true) - - # Maintenance window configuration - maintenance_window = lookup(local.auto_upgrade_settings, "maintenance_window", {}) - maintenance_window_config = { - is_disabled = lookup(local.maintenance_window, "is_disabled", true) - day_of_week = lookup(local.maintenance_window, "day_of_week", "SUN") - start_time = lookup(local.maintenance_window, "start_time", 2) - end_time = lookup(local.maintenance_window, "end_time", 6) - } - - # Day abbreviation mapping - day_abbreviation_to_full_name = { - "SUN" = "Sunday" - "MON" = "Monday" - "TUE" = "Tuesday" - "WED" = "Wednesday" - "THU" = "Thursday" - "FRI" = "Friday" - "SAT" = "Saturday" - } - - # Calculate maintenance window hours - hours = range(0, 24) - maintenance_window_hours = ( - local.maintenance_window_config.start_time <= local.maintenance_window_config.end_time - ? slice(local.hours, local.maintenance_window_config.start_time, local.maintenance_window_config.end_time + 1) - : concat( - slice(local.hours, local.maintenance_window_config.start_time, 24), - slice(local.hours, 0, local.maintenance_window_config.end_time + 1) - ) - ) - - # Storage and networking - default_reclaim_policy = lookup(local.cluster, "default_reclaim_policy", "Delete") - cluster_enabled_log_types = lookup(local.cluster, "cluster_enabled_log_types", []) - sku_tier = lookup(local.cluster, "sku_tier", "Free") - - # Resource naming - aks_name = "${substr(var.cluster.name, 0, 45 - 11)}-${var.cluster.clusterCode}" - node_resource_group = "MC_${substr(var.cluster.name, 0, 53)}_${var.cluster.clusterCode}_node_res_grp" - - # Cloud tags - cloud_tags = { - facetscontrolplane = split(".", var.cc_metadata.cc_host)[0] - cluster = var.cluster.name - facetsclustername = var.cluster.name - facetsclusterid = var.cluster.id - } - - tags = merge(var.environment.cloud_tags, lookup(local.spec, "tags", {}), local.cloud_tags) -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf deleted file mode 100644 index 4ef473046..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/main.tf +++ /dev/null @@ -1,321 +0,0 @@ -module "name" { - source = "github.com/Facets-cloud/facets-utility-modules//name" - environment = var.environment - limit = 32 - resource_name = var.instance_name - resource_type = "kubernetes_cluster" - globally_unique = true -} - -data "azurerm_kubernetes_service_versions" "current" { - location = var.vpc_details.region - version_prefix = local.kubernetes_version - include_preview = false -} - -# SSH key for Linux nodes -resource "tls_private_key" "ssh" { - algorithm = "RSA" - rsa_bits = 2048 -} - -resource "local_file" "private_key" { - content = tls_private_key.ssh.private_key_pem - filename = "./private_ssh_key" -} - -# AKS Cluster -resource "azurerm_kubernetes_cluster" "aks_cluster" { - name = local.aks_name - location = var.region - resource_group_name = var.resource_group_name - node_resource_group = local.node_resource_group - dns_prefix = local.aks_name - kubernetes_version = local.kubernetes_version - automatic_channel_upgrade = local.enable_auto_upgrade ? local.automatic_channel_upgrade : null - role_based_access_control_enabled = true - sku_tier = local.sku_tier - - identity { - type = "SystemAssigned" - } - - default_node_pool { - name = "defaultnp" - node_count = lookup(local.system_np, "node_count", 1) - vm_size = lookup(local.system_np, "instance_type", "Standard_D2_v4") - type = "VirtualMachineScaleSets" - max_pods = lookup(local.system_np, "max_pods", 30) - os_disk_size_gb = lookup(local.system_np, "os_disk_size_gb", 50) - enable_auto_scaling = lookup(local.system_np, "enable_auto_scaling", false) - only_critical_addons_enabled = lookup(local.system_np, "enable_critical_addons", true) - enable_node_public_ip = lookup(local.system_np, "enable_node_public_ip", false) - vnet_subnet_id = var.k8s_subnets[0] - zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] - orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version - temporary_name_for_rotation = "tmpdefaultnp" - - dynamic "upgrade_settings" { - for_each = local.enable_auto_upgrade ? [1] : [] - content { - max_surge = local.max_surge - } - } - } - - network_profile { - network_plugin = "azure" - } - - linux_profile { - admin_username = "azureuser" - ssh_key { - key_data = replace(tls_private_key.ssh.public_key_openssh, "\n", "") - } - } - - dynamic "maintenance_window" { - for_each = local.maintenance_window_config.is_disabled == false ? [1] : [] - content { - allowed { - day = lookup(local.day_abbreviation_to_full_name, local.maintenance_window_config.day_of_week, "Sunday") - hours = local.maintenance_window_hours - } - } - } - - tags = local.tags - - lifecycle { - prevent_destroy = true - ignore_changes = [ - sku_tier, - role_based_access_control_enabled, - name, - dns_prefix, - node_resource_group, - - image_cleaner_enabled, - image_cleaner_interval_hours, - private_cluster_public_fqdn_enabled, - run_command_enabled, - workload_identity_enabled, - network_profile, - auto_scaler_profile, - identity, - default_node_pool - ] - } -} - -# Generate unique suffix for node pools -locals { - np_unique_seed = "${local.default_np}-${lookup(local.default_np, "root_disk_volume", 100)}" - np_unique_seed_md5 = md5(local.np_unique_seed) - np_suffix = substr(local.np_unique_seed_md5, 0, 3) -} - -# On-demand node pool -resource "azurerm_kubernetes_cluster_node_pool" "ondemand_node_pool" { - count = lookup(local.default_np, "node_lifecycle_type", "SPOT") == "ON_DEMAND" && local.enable_default_nodepool ? 1 : 0 - name = "ondemand${local.np_suffix}" - kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id - vm_size = local.default_np.instance_type - node_count = 1 - enable_auto_scaling = true - mode = "System" - max_pods = 50 - min_count = 1 - max_count = lookup(local.default_np, "max_nodes", 200) - os_disk_size_gb = lookup(local.default_np, "root_disk_volume", 100) - os_disk_type = lookup(local.default_np, "azure_disk_type", "Managed") == "Ephemeral" ? "Ephemeral" : null - enable_node_public_ip = false - vnet_subnet_id = var.private_subnets[2] - zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] - tags = local.tags - - dynamic "upgrade_settings" { - for_each = local.enable_auto_upgrade ? [1] : [] - content { - max_surge = local.max_surge - } - } - - lifecycle { - ignore_changes = [node_count, zones, orchestrator_version, name] - prevent_destroy = true - } -} - -# Spot node pool -resource "azurerm_kubernetes_cluster_node_pool" "spot_node_pool" { - count = lookup(local.default_np, "node_lifecycle_type", "SPOT") == "SPOT" && local.enable_default_nodepool ? 1 : 0 - name = "spot${local.np_suffix}" - kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id - vm_size = local.default_np.instance_type - node_count = 1 - priority = "Spot" - spot_max_price = -1 - eviction_policy = "Delete" - enable_auto_scaling = true - min_count = 1 - max_count = lookup(local.default_np, "max_nodes", 200) - mode = "User" - max_pods = 50 - os_disk_size_gb = lookup(local.default_np, "root_disk_volume", 100) - os_disk_type = lookup(local.default_np, "azure_disk_type", "Managed") == "Ephemeral" ? "Ephemeral" : null - enable_node_public_ip = false - vnet_subnet_id = var.private_subnets[0] - zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] - node_taints = ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule"] - tags = local.tags - - node_labels = { - "kubernetes.azure.com/scalesetpriority" = "spot" - "ccLifecycle" = "spot" - } - - lifecycle { - ignore_changes = [node_count, zones, orchestrator_version, name, ultra_ssd_enabled, scale_down_mode] - prevent_destroy = true - } -} - -# Facets dedicated node pool -resource "azurerm_kubernetes_cluster_node_pool" "facets_dedicated_np" { - count = lookup(local.facets_dedicated_np, "enable", "true") ? 1 : 0 - name = "facets" - kubernetes_cluster_id = azurerm_kubernetes_cluster.aks_cluster.id - vm_size = lookup(local.facets_dedicated_np, "instance_type", "standard_D4as_v5") - node_count = 1 - priority = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? "Spot" : "Regular" - spot_max_price = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? -1 : null - eviction_policy = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? "Delete" : null - enable_auto_scaling = true - min_count = 1 - max_count = lookup(local.facets_dedicated_np, "max_nodes", 200) - mode = "User" - max_pods = 50 - os_disk_size_gb = lookup(local.facets_dedicated_np, "root_disk_volume", 100) - enable_node_public_ip = false - vnet_subnet_id = var.private_subnets[1] - zones = length(compact(var.azs)) == 0 ? null : [var.azs[0]] - orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version - node_taints = lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule", "facets.cloud/dedicated=true:NoSchedule"] : ["facets.cloud/dedicated=true:NoSchedule"] - tags = local.tags - - node_labels = merge({ - facets-node-type = "facets-dedicated" - }, lookup(local.facets_dedicated_np, "node_lifecycle_type", "SPOT") == "SPOT" ? { - "kubernetes.azure.com/scalesetpriority" = "spot" - ccLifecycle = "spot" - } : {}) - - dynamic "upgrade_settings" { - for_each = local.enable_auto_upgrade && var.settings.FACETS_DEDICATED_NODE_LIFECYCLE_TYPE != "SPOT" ? [1] : [] - content { - max_surge = local.max_surge - } - } - - lifecycle { - ignore_changes = [node_count, zones, orchestrator_version, ultra_ssd_enabled] - prevent_destroy = true - } -} - -# Storage account -data "http" "whatismyip" { - url = "http://ipv4.icanhazip.com" -} - -resource "azurerm_storage_account" "storageacct" { - name = "${substr(replace(var.cluster.name, "-", ""), 0, 24 - 10)}${var.cluster.clusterCode}" - resource_group_name = var.resource_group_name - location = var.region - account_tier = "Standard" - account_replication_type = "LRS" - account_kind = "StorageV2" - access_tier = "Hot" - min_tls_version = "TLS1_2" - - network_rules { - default_action = "Deny" - ip_rules = ["${chomp(data.http.whatismyip.body)}"] - virtual_network_subnet_ids = concat(var.private_subnets, var.public_subnets) - } - - blob_properties { - last_access_time_enabled = lookup(local.spec, "storage_account_last_access_time_enabled", true) - } - - tags = local.tags - - lifecycle { - ignore_changes = [ - name, - nfsv3_enabled, - infrastructure_encryption_enabled, - queue_encryption_key_type, - table_encryption_key_type, - - - - - shared_access_key_enabled, - - ] - } -} - -# Role assignments -resource "azurerm_role_assignment" "cluster_identity_role_assignment" { - scope = var.resource_group_name - role_definition_name = "Contributor" - principal_id = azurerm_kubernetes_cluster.aks_cluster.identity[0].principal_id -} - -# These providers need to be force replaced with empty object blocks to prevent Terraform from using default providers -terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - } - kubernetes = { - source = "hashicorp/kubernetes" - } - helm = { - source = "hashicorp/helm" - } - null = { - source = "hashicorp/null" - } - tls = { - source = "hashicorp/tls" - } - local = { - source = "hashicorp/local" - } - http = { - source = "hashicorp/http" - } - } -} - -provider "kubernetes" { - alias = "k8s" - host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host - client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) - client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) - cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) -} - -provider "helm" { - alias = "k8s" - kubernetes { - host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host - client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) - client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) - cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) - } -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf deleted file mode 100644 index 6c7ee445b..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/outputs.tf +++ /dev/null @@ -1,51 +0,0 @@ -output "k8s_details" { - value = { - auth = { - host = azurerm_kubernetes_cluster.aks_cluster.kube_config[0].host - client_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_certificate) - client_key = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].client_key) - cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks_cluster.kube_config[0].cluster_ca_certificate) - token = try(kubernetes_secret_v1.capillary-cloud-admin-token.data["token"], "na") - } - az_storage_account = azurerm_storage_account.storageacct.name - az_storage_account_id = azurerm_storage_account.storageacct.id - az_storage_account_key = azurerm_storage_account.storageacct.primary_access_key - - #registry_secrets = local.secret_list - #registry_secret_objects = [for i in local.secret_list : { name : i }] - node_group_iam_role_arn = "na" - cluster_id = var.cluster.name - - # stub_prometheus_dep = helm_release.prometheus-operator - #stub_ecr_token_refresh = null_resource.wait-for-ecr-token-patch - principalId = azurerm_kubernetes_cluster.aks_cluster.kubelet_identity[0].object_id - priority-class = kubernetes_priority_class.facets-critical.metadata[0].name - } -} -output "registry_secret_objects" { - value = [] -} - -output "legacy_outputs" { - description = "Legacy outputs for backward compatibility" - value = { - cluster_name = azurerm_kubernetes_cluster.k8s.name - cluster_endpoint = azurerm_kubernetes_cluster.k8s.kube_config.0.host - cluster_ca_certificate = azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate - node_resource_group = azurerm_kubernetes_cluster.k8s.node_resource_group - } -} - -output "node_resource_group" { - value = azurerm_kubernetes_cluster.aks_cluster.node_resource_group -} - -output "aks_cluster_id" { - value = azurerm_kubernetes_cluster.aks_cluster.id -} - -output "cluster_auto_upgrade" { - value = { - max_surge = local.max_surge - } // "" for spot nodes -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf deleted file mode 100644 index 2b959f613..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8s_cluster/variables.tf +++ /dev/null @@ -1,72 +0,0 @@ -variable "vpc_details" { - description = "VPC details including region and other network information" - type = object({ - region = string - }) -} - -variable "instance" { - description = "A Kubernetes EKS cluster module with auto mode enabled by default and all necessary configurations preset." - type = any -} - -variable "cluster" { - description = "cluster object configuration" - type = any - default = {} -} - -variable "cc_metadata" { - description = "cc_metadata object configuration" - type = any -} - -variable "instance_name" { - description = "The architectural name for the resource as added in the Facets blueprint designer." - type = string - -} - -variable "environment" { - description = "An object containing details about the environment." - type = object({ - name = string - unique_name = string - cloud_tags = map(string) - }) -} - -variable "vpc_id" { - description = "The VPC ID for the cluster." - type = string -} - -variable "k8s_subnets" { - description = "The subnets for the cluster." - type = list(string) -} - -variable "region" { - description = "The AZURE region where the cluster is deployed." - type = string -} - -variable "resource_group_name" { - description = "value for the resource group name where the cluster is deployed." - type = string -} - -variable "azs" { - description = "The availability zones for the cluster." - type = list(string) -} - -variable "private_subnets" { - description = "value for the private subnets where the cluster is deployed." - type = list(string) -} - -variable "public_subnets" { - description = "value for the public subnets where the cluster is deployed." - type = list(string) -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf index 91658ce15..7db3ec326 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf @@ -1,90 +1,51 @@ locals { - name = module.name.name - metadata = lookup(var.instance, "metadata", {}) - spec = lookup(var.instance, "spec", {}) - cluster = lookup(local.spec, "cluster", {}) - node_pools = lookup(local.spec, "node_pools", {}) - default_node_pool = lookup(local.node_pools, "default", {}) - dedicated_node_pool = lookup(local.node_pools, "dedicated", {}) - default_reclaim_policy = lookup(local.cluster, "default_reclaim_policy", "Delete") - namespace = lookup(local.metadata, "namespace", "default") - user_supplied_helm_values = lookup(local.secret_copier, "values", {}) - secret_copier = lookup(local.spec, "secret-copier", {}) + # Generate cluster name + name = module.name.name - facets_default_node_pool = { - name = "default-node-pool" - node_class_name = "default" - labels = { - "managed-by" = "facets" - facets-node-type = "facets-default" - } - } + # Extract spec configurations + metadata = lookup(var.instance, "metadata", {}) + spec = lookup(var.instance, "spec", {}) - facets_dedicated_node_pool = { - name = "dedicated-node-pool" - node_class_name = "default" - labels = { - managed-by = "facets" - facets-node-type = "facets-dedicated" - } - } + # Cluster configuration + cluster_config = lookup(local.spec, "cluster", {}) - cloud_tags = { - facetscontrolplane = split(".", var.cc_metadata.cc_host)[0] - cluster = var.cluster.name - facetsclustername = var.cluster.name - facetsclusterid = var.cluster.id - } + # Node pools configuration + node_pools_config = lookup(local.spec, "node_pools", {}) + system_np_config = lookup(local.node_pools_config, "system_np", {}) - # Storage class data for AKS - storage_class_data = { - apiVersion = "storage.k8s.io/v1" - kind = "StorageClass" - metadata = { - name = "aks-default-sc" - annotations = { - "storageclass.kubernetes.io/is-default-class" = "true" - } - } - provisioner = "disk.csi.azure.com" - parameters = { - storageaccounttype = "Premium_LRS" - kind = "Managed" - cachingmode = "ReadOnly" - } - allowVolumeExpansion = true - reclaimPolicy = local.default_reclaim_policy - volumeBindingMode = "Immediate" - } + # Auto-upgrade configuration + auto_upgrade_config = lookup(local.spec, "auto_upgrade_settings", {}) + maintenance_window = lookup(local.auto_upgrade_config, "maintenance_window", {}) - default_node_pool_data = { - apiVersion = "v1" - kind = "Node" - metadata = { - name = local.facets_default_node_pool.name - labels = local.facets_default_node_pool.labels - } - spec = { - # AKS node configuration would be handled by the cluster itself - # This is more for reference and labeling - } - } + # Features configuration + features_config = lookup(local.spec, "features", {}) - dedicated_node_pool_data = { - apiVersion = "v1" - kind = "Node" - metadata = { - name = local.facets_dedicated_node_pool.name - labels = local.facets_dedicated_node_pool.labels - } - spec = { - taints = [ - { - key = "facets.cloud/dedicated" - value = "true" - effect = "NoSchedule" - } - ] - } - } -} \ No newline at end of file + # Tags configuration + tags_config = lookup(local.spec, "tags", {}) + + # Computed values for the cluster + kubernetes_version = lookup(local.cluster_config, "kubernetes_version", "1.31") + sku_tier = lookup(local.cluster_config, "sku_tier", "Free") + + # Node pool computed values + node_count = lookup(local.system_np_config, "node_count", 1) + instance_type = lookup(local.system_np_config, "instance_type", "Standard_D2_v4") + max_pods = lookup(local.system_np_config, "max_pods", 30) + os_disk_size_gb = lookup(local.system_np_config, "os_disk_size_gb", 50) + enable_auto_scaling = lookup(local.system_np_config, "enable_auto_scaling", false) + + # Auto-upgrade computed values + enable_auto_upgrade = lookup(local.auto_upgrade_config, "enable_auto_upgrade", true) + automatic_channel_upgrade = lookup(local.auto_upgrade_config, "automatic_channel_upgrade", "stable") + max_surge = lookup(local.auto_upgrade_config, "max_surge", "1") + + # Maintenance window computed values + maintenance_window_disabled = lookup(local.maintenance_window, "is_disabled", true) + maintenance_day_of_week = lookup(local.maintenance_window, "day_of_week", "SUN") + maintenance_start_time = lookup(local.maintenance_window, "start_time", 2) + maintenance_end_time = lookup(local.maintenance_window, "end_time", 6) + + # Network access computed values + cluster_endpoint_public_access = lookup(local.cluster_config, "cluster_endpoint_public_access", true) + cluster_endpoint_public_access_cidrs = lookup(local.cluster_config, "cluster_endpoint_public_access_cidrs", ["0.0.0.0/0"]) +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index 7a6c88683..c02f6e30f 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -1,138 +1,141 @@ +# Generate a unique name for the AKS cluster module "name" { source = "github.com/Facets-cloud/facets-utility-modules//name" environment = var.environment - limit = 32 + limit = 63 resource_name = var.instance_name resource_type = "kubernetes_cluster" globally_unique = true } -module "k8s_cluster" { - source = "./k8s_cluster" - instance = var.instance - vpc_id = var.inputs.network_details.attributes.vpc_id - cc_metadata = var.cc_metadata - environment = var.environment - cluster = var.cluster - k8s_subnets = var.inputs.network_details.attributes.private_subnet_ids - instance_name = var.instance_name - region = var.inputs.network_details.attributes.region - azs = var.inputs.network_details.attributes.availability_zones +# Create the AKS cluster using the official Azure module +module "k8scluster" { + source = "Azure/aks/azurerm" + version = "10.2.0" + + # Required variables resource_group_name = var.inputs.network_details.attributes.resource_group_name - public_subnets = var.inputs.network_details.attributes.public_subnet_ids - private_subnets = var.inputs.network_details.attributes.private_subnet_ids + location = var.inputs.network_details.attributes.region -} + # Basic cluster configuration + cluster_name = local.name + prefix = "" -# Storage class for AKS -module "storage_class" { - depends_on = [module.k8s_cluster] - source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" - name = "aks-storage-class" - namespace = var.environment.namespace - release_name = "${local.name}-fc-storage-class" - data = local.storage_class_data - advanced_config = {} -} + # Kubernetes version + kubernetes_version = var.instance.spec.cluster.kubernetes_version -# Default node pool reference (for compatibility) -module "default_node_pool" { - depends_on = [module.k8s_cluster] - count = lookup(local.default_node_pool, "enabled", true) ? 1 : 0 - source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" - name = "${local.name}-fc-default-np" - namespace = var.environment.namespace - release_name = "${local.name}-fc-default-np" - data = local.default_node_pool_data - advanced_config = {} -} + # SKU tier + sku_tier = var.instance.spec.cluster.sku_tier -# Dedicated node pool reference (for compatibility) -module "dedicated_node_pool" { - depends_on = [module.k8s_cluster] - count = lookup(local.dedicated_node_pool, "enabled", false) ? 1 : 0 - source = "github.com/Facets-cloud/facets-utility-modules//any-k8s-resource" - name = "${local.name}-fc-dedicated-np" - namespace = var.environment.namespace - release_name = "${local.name}-fc-dedicated-np" - data = local.dedicated_node_pool_data - advanced_config = {} -} + # Network configuration + network_plugin = "azure" + network_policy = "calico" + vnet_subnet = { + id = var.inputs.network_details.attributes.private_subnet_ids[0] + } + net_profile_service_cidr = "10.254.0.0/16" + net_profile_dns_service_ip = "10.254.0.254" -provider "kubernetes" { - host = module.k8s_cluster.k8s_details.cluster.auth.host - client_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) - client_key = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) - cluster_ca_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) -} + # Private cluster configuration + private_cluster_enabled = !var.instance.spec.cluster.cluster_endpoint_public_access + api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access ? var.instance.spec.cluster.cluster_endpoint_public_access_cidrs : null + + # Node pool configuration + agents_count = var.instance.spec.node_pools.system_np.node_count + agents_size = var.instance.spec.node_pools.system_np.instance_type + agents_max_pods = var.instance.spec.node_pools.system_np.max_pods + os_disk_size_gb = var.instance.spec.node_pools.system_np.os_disk_size_gb + agents_availability_zones = var.inputs.network_details.attributes.availability_zones + agents_pool_name = "system" + + # Auto-scaling configuration + enable_auto_scaling = var.instance.spec.node_pools.system_np.enable_auto_scaling + agents_min_count = var.instance.spec.node_pools.system_np.enable_auto_scaling ? var.instance.spec.node_pools.system_np.node_count : null + agents_max_count = var.instance.spec.node_pools.system_np.enable_auto_scaling ? 10 : null + + # System node pool - mark it as system mode + only_critical_addons_enabled = true + + # Auto-upgrade configuration + automatic_channel_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade ? var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade : null -provider "helm" { - kubernetes { - host = module.k8s_cluster.k8s_details.cluster.auth.host - client_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) - client_key = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) - cluster_ca_certificate = base64decode(module.k8s_cluster.k8s_details.cluster.auth.cluster_ca_certificate) + # Maintenance window configuration + maintenance_window_auto_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && !var.instance.spec.auto_upgrade_settings.maintenance_window.is_disabled ? { + frequency = "Weekly" + interval = 1 + duration = var.instance.spec.auto_upgrade_settings.maintenance_window.end_time - var.instance.spec.auto_upgrade_settings.maintenance_window.start_time + day_of_week = lookup({ + "SUN" = "Sunday" + "MON" = "Monday" + "TUE" = "Tuesday" + "WED" = "Wednesday" + "THU" = "Thursday" + "FRI" = "Friday" + "SAT" = "Saturday" + }, var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week, "Sunday") + start_time = format("%02d:00", var.instance.spec.auto_upgrade_settings.maintenance_window.start_time) + utc_offset = "+00:00" + } : null + + # Node surge configuration for upgrades + agents_pool_max_surge = var.instance.spec.auto_upgrade_settings.max_surge + + # Enable Azure Policy + azure_policy_enabled = true + + # Enable workload identity and OIDC issuer + workload_identity_enabled = true + oidc_issuer_enabled = true + + # Enable monitoring if log analytics workspace is provided + log_analytics_workspace_enabled = var.inputs.network_details.attributes.log_analytics_workspace_id != null + log_analytics_workspace = var.inputs.network_details.attributes.log_analytics_workspace_id != null ? { + id = var.inputs.network_details.attributes.log_analytics_workspace_id + name = split("/", var.inputs.network_details.attributes.log_analytics_workspace_id)[8] + } : null + + # Auto-scaler profile configuration + auto_scaler_profile_enabled = var.instance.spec.node_pools.system_np.enable_auto_scaling + auto_scaler_profile_balance_similar_node_groups = false + auto_scaler_profile_expander = "random" + auto_scaler_profile_max_graceful_termination_sec = "600" + auto_scaler_profile_max_node_provisioning_time = "15m" + auto_scaler_profile_max_unready_nodes = 3 + auto_scaler_profile_max_unready_percentage = 45 + auto_scaler_profile_new_pod_scale_up_delay = "10s" + auto_scaler_profile_scale_down_delay_after_add = "10m" + auto_scaler_profile_scale_down_delay_after_delete = "10s" + auto_scaler_profile_scale_down_delay_after_failure = "3m" + auto_scaler_profile_scan_interval = "10s" + auto_scaler_profile_scale_down_unneeded = "10m" + auto_scaler_profile_scale_down_unready = "20m" + auto_scaler_profile_scale_down_utilization_threshold = "0.5" + auto_scaler_profile_empty_bulk_delete_max = 10 + auto_scaler_profile_skip_nodes_with_local_storage = true + auto_scaler_profile_skip_nodes_with_system_pods = true + + # Node labels for system node pool + agents_labels = { + "facets.cloud/node-type" = "system" + "managed-by" = "facets" } -} -# Secret copier helm release -resource "helm_release" "secret-copier" { - depends_on = [module.k8s_cluster] - count = lookup(local.secret_copier, "disabled", false) ? 0 : 1 - chart = lookup(local.secret_copier, "chart", "secret-copier") - namespace = lookup(local.secret_copier, "namespace", local.namespace) - name = lookup(local.secret_copier, "name", "facets-secret-copier") - repository = lookup(local.secret_copier, "repository", "https://facets-cloud.github.io/helm-charts") - version = lookup(local.secret_copier, "version", "1.0.2") - - values = [ - yamlencode( - { - resources = { - requests = { - cpu = "50m" - memory = "256Mi" - } - limits = { - cpu = "300m" - memory = "1000Mi" - } - } - } - ), - yamlencode(local.user_supplied_helm_values) - ] + # Tags + tags = merge( + var.environment.cloud_tags, + var.instance.spec.tags != null ? var.instance.spec.tags : {} + ) + + # Disable http application routing + http_application_routing_enabled = false + + # Disable local accounts for better security + local_account_disabled = true + + # Enable RBAC with Azure AD + rbac_aad = true + rbac_aad_azure_rbac_enabled = true } -# Cluster overprovisioner for better resource management -resource "helm_release" "cluster-overprovisioner" { - depends_on = [module.k8s_cluster] - name = "${local.name}-cluster-overprovisioner" - repository = "https://charts.deliveryhero.io/" - chart = "cluster-overprovisioner" - version = "0.7.10" - wait = false - cleanup_on_fail = true - - values = [ - <= 1 && var.instance.spec.node_pools.system_np.node_count <= 1000 + error_message = "System node pool node_count must be between 1 and 1000." + } + + validation { + condition = var.instance.spec.node_pools.system_np.max_pods >= 10 && var.instance.spec.node_pools.system_np.max_pods <= 250 + error_message = "System node pool max_pods must be between 10 and 250." + } + + validation { + condition = var.instance.spec.node_pools.system_np.os_disk_size_gb >= 30 && var.instance.spec.node_pools.system_np.os_disk_size_gb <= 2048 + error_message = "System node pool os_disk_size_gb must be between 30 and 2048." + } + + validation { + condition = can(regex("^([0-9]+%?|[0-9]+)$", var.instance.spec.auto_upgrade_settings.max_surge)) + error_message = "Max surge must be a number or percentage (e.g., 1, 33%)." + } + + validation { + condition = contains([ + "rapid", "regular", "stable", "patch", "node-image", "none" + ], var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade) + error_message = "Automatic channel upgrade must be one of: rapid, regular, stable, patch, node-image, none." + } + + validation { + condition = contains([ + "SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT" + ], var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week) + error_message = "Maintenance window day_of_week must be one of: SUN, MON, TUE, WED, THU, FRI, SAT." + } + + validation { + condition = ( + var.instance.spec.auto_upgrade_settings.maintenance_window.start_time >= 0 && + var.instance.spec.auto_upgrade_settings.maintenance_window.start_time <= 23 + ) + error_message = "Maintenance window start_time must be between 0 and 23." + } + + validation { + condition = ( + var.instance.spec.auto_upgrade_settings.maintenance_window.end_time >= 0 && + var.instance.spec.auto_upgrade_settings.maintenance_window.end_time <= 23 + ) + error_message = "Maintenance window end_time must be between 0 and 23." + } + + validation { + condition = ( + var.instance.spec.auto_upgrade_settings.maintenance_window.end_time > + var.instance.spec.auto_upgrade_settings.maintenance_window.start_time + ) + error_message = "Maintenance window end_time must be greater than start_time." + } } + variable "instance_name" { description = "The architectural name for the resource as added in the Facets blueprint designer." type = string + + validation { + condition = length(var.instance_name) > 0 && length(var.instance_name) <= 63 + error_message = "Instance name must be between 1 and 63 characters long." + } + + validation { + condition = can(regex("^[a-z0-9-]+$", var.instance_name)) + error_message = "Instance name must contain only lowercase letters, numbers, and hyphens." + } } + variable "environment" { description = "An object containing details about the environment." type = object({ name = string unique_name = string + cloud_tags = optional(map(string), {}) }) + + validation { + condition = length(var.environment.name) > 0 + error_message = "Environment name cannot be empty." + } + + validation { + condition = length(var.environment.unique_name) > 0 + error_message = "Environment unique_name cannot be empty." + } } + variable "inputs" { description = "A map of inputs requested by the module developer." type = object({ + network_details = object({ + attributes = object({ + vpc_id = string + region = string + resource_group_name = string + availability_zones = list(string) + private_subnet_ids = list(string) + public_subnet_ids = list(string) + log_analytics_workspace_id = optional(string, null) + }) + }) + cloud_account = object({ + attributes = object({ + subscription_id = string + tenant_id = string + }) + }) }) -} \ No newline at end of file + + validation { + condition = length(var.inputs.network_details.attributes.vpc_id) > 0 + error_message = "VPC ID cannot be empty." + } + + validation { + condition = length(var.inputs.network_details.attributes.region) > 0 + error_message = "Region cannot be empty." + } + + validation { + condition = length(var.inputs.network_details.attributes.resource_group_name) > 0 + error_message = "Resource group name cannot be empty." + } + + validation { + condition = length(var.inputs.network_details.attributes.availability_zones) > 0 + error_message = "At least one availability zone must be specified." + } + + validation { + condition = length(var.inputs.network_details.attributes.private_subnet_ids) > 0 + error_message = "At least one private subnet ID must be specified." + } + + validation { + condition = length(var.inputs.cloud_account.attributes.subscription_id) > 0 + error_message = "Azure subscription ID cannot be empty." + } + + validation { + condition = length(var.inputs.cloud_account.attributes.tenant_id) > 0 + error_message = "Azure tenant ID cannot be empty." + } +} From ee4e6071233add4744096914cbe733106cf60e00 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:23:58 +0530 Subject: [PATCH 10/36] removed gitignore --- .../0.2/k8scluster/.checkov_config.yaml | 30 + .../azure_aks/0.2/k8scluster/CHANGELOG-v4.md | 20 + .../azure_aks/0.2/k8scluster/CHANGELOG-v5.md | 31 + .../azure_aks/0.2/k8scluster/CHANGELOG-v6.md | 122 ++ .../azure_aks/0.2/k8scluster/CHANGELOG-v7.md | 93 + .../azure_aks/0.2/k8scluster/CHANGELOG-v8.md | 27 + .../azure_aks/0.2/k8scluster/CHANGELOG-v9.md | 76 + .../azure_aks/0.2/k8scluster/CHANGELOG.md | 5 + .../0.2/k8scluster/CODE_OF_CONDUCT.md | 5 + .../azure_aks/0.2/k8scluster/GNUmakefile | 4 + .../azure_aks/0.2/k8scluster/LICENSE | 21 + .../0.2/k8scluster/NoticeOnUpgradeTov10.0.md | 53 + .../0.2/k8scluster/NoticeOnUpgradeTov5.0.md | 93 + .../0.2/k8scluster/NoticeOnUpgradeTov6.0.md | 5 + .../0.2/k8scluster/NoticeOnUpgradeTov7.0.md | 52 + .../0.2/k8scluster/NoticeOnUpgradeTov8.0.md | 53 + .../0.2/k8scluster/NoticeOnUpgradeTov9.0.md | 9 + .../azure_aks/0.2/k8scluster/README.md | 490 +++++ .../azure_aks/0.2/k8scluster/SECURITY.md | 41 + .../0.2/k8scluster/extra_node_pool.tf | 317 ++++ .../k8scluster/extra_node_pool_override.tf | 17 + .../azure_aks/0.2/k8scluster/locals.tf | 74 + .../azure_aks/0.2/k8scluster/log_analytics.tf | 124 ++ .../azure_aks/0.2/k8scluster/main.tf | 741 ++++++++ .../azure_aks/0.2/k8scluster/main_override.tf | 6 + .../azure_aks/0.2/k8scluster/outputs.tf | 231 +++ .../0.2/k8scluster/role_assignments.tf | 126 ++ .../azure_aks/0.2/k8scluster/tfvmmakefile | 85 + .../azure_aks/0.2/k8scluster/variables.tf | 1601 +++++++++++++++++ .../azure_aks/0.2/k8scluster/versions.tf | 26 + 30 files changed, 4578 insertions(+) create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml new file mode 100644 index 000000000..b39c33402 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml @@ -0,0 +1,30 @@ +block-list-secret-scan: [] +branch: master +directory: + - ./ +download-external-modules: false +evaluate-variables: true +external-modules-download-path: .external_modules +framework: + - all +quiet: true +secrets-scan-file-type: [] +skip-check: + - CKV_GHA_3 + - CKV_AZURE_5 + - CKV_AZURE_6 + - CKV_AZURE_112 + - CKV_AZURE_115 + - CKV_AZURE_116 + - CKV_AZURE_168 + - CKV_AZURE_170 + - CKV_AZURE_139 + - CKV_AZURE_165 + - CKV_AZURE_166 + - CKV_AZURE_164 +skip-framework: + - dockerfile + - kubernetes +skip-path: + - test/vendor +summary-position: top diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md new file mode 100644 index 000000000..42433d0ea --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md @@ -0,0 +1,20 @@ +## 4.15.0 (May 06, 2022) + +ENHANCEMENTS: + +* Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146)) +* Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136)) + +BUG FIXES: + +## 4.16.0 (June 02, 2022) + +ENHANCEMENTS: + +* Added output for `addon_profile` ([#151](https://github.com/Azure/terraform-azurerm-aks/pull/151)) +* Adding Microsoft SECURITY.MD ([#167](https://github.com/Azure/terraform-azurerm-aks/pull/167)) +* Added variable `os_disk_type` for default node pools ([#169](https://github.com/Azure/terraform-azurerm-aks/pull/169)) + +BUG FIXES: + +* Trivial fix to the example in the README ([#166](https://github.com/Azure/terraform-azurerm-aks/pull/166)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md new file mode 100644 index 000000000..bda5b8027 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md @@ -0,0 +1,31 @@ +## 5.0.0 (July 14, 2022) + +ENHANCEMENTS: + +* Variable `enable_kube_dashboard` has been removed as [#181](https://github.com/Azure/terraform-azurerm-aks/issues/181) described. ([#187](https://github.com/Azure/terraform-azurerm-aks/pull/187)) +* Add new variable `location` so we can define location for the resources explicitly. ([#172](https://github.com/Azure/terraform-azurerm-aks/pull/172)) +* Bump AzureRM Provider version to 3.3.0. ([#157](https://github.com/Azure/terraform-azurerm-aks/pull/157)) +* Add new variable `private_dns_zone_id` to make argument `private_dns_zone_id` configurable. ([#174](https://github.com/Azure/terraform-azurerm-aks/pull/174)) +* Add new variable `open_service_mesh_enabled` to make argument `open_service_mesh_enabled` configurable. ([#132](https://github.com/Azure/terraform-azurerm-aks/pull/132)) +* Remove `addon_profile` in the outputs since the block has been removed from provider 3.x. Extract embedded blocks inside `addon_profile` block into standalone outputs. ([#188](https://github.com/Azure/terraform-azurerm-aks/pull/188)) +* Add `nullable = true` to some variables to simplify the conditional expressions. ([#193](https://github.com/Azure/terraform-azurerm-aks/pull/193)) +* Add new variable `oidc_issuer_enabled` to make argument `oidc_issuer_enabled` configurable. ([#205](https://github.com/Azure/terraform-azurerm-aks/pull/205) +* Add new output `oidc_issuer_url` to expose the created issuer URL from the module. [#206](https://github.com/Azure/terraform-azurerm-aks/pull/206)) +* Turn monitoring on in the test code. ([#201](https://github.com/Azure/terraform-azurerm-aks/pull/201)) +* Add new variables `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` to make arguments `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` configurable. ([#149](https://github.com/Azure/terraform-azurerm-aks/pull/149)) +* Remove `module.ssh-key` and moves resource `tls_private_key` inside the module to root directory, then outputs tls keys. ([#189](https://github.com/Azure/terraform-azurerm-aks/pull/189)) +* Add new variables `rbac_aad_azure_rbac_enabled` and `rbac_aad_tenant_id` to make arguments in `azure_active_directory_role_based_access_control` configurable. ([#199](https://github.com/Azure/terraform-azurerm-aks/pull/199)) +* Add `count` meta-argument to resource `tls_private_key` to avoid the unnecessary creation. ([#209](https://github.com/Azure/terraform-azurerm-aks/pull/209)) +* Add new variable `only_critical_addons_enabled` to make argument `only_critical_addons_enabled` in block `default_node_pool` configurable. ([#129](https://github.com/Azure/terraform-azurerm-aks/pull/129)) +* Add support for the argument `key_vault_secrets_provider`. ([#214](https://github.com/Azure/terraform-azurerm-aks/pull/214)) +* Provides a way to attach existing Log Analytics Workspace to AKS through Container Insights. ([#213](https://github.com/Azure/terraform-azurerm-aks/pull/213)) +* Add new variable `local_account_disabled` to make argument `local_account_disabled` configurable. ([#218](https://github.com/Azure/terraform-azurerm-aks/pull/218)) +* Set argument `private_cluster_enabled` to `true` in the test code. ([#219](https://github.com/Azure/terraform-azurerm-aks/pull/219)) +* Add new variable `disk_encryption_set_id` to make argument `disk_encryption_set_id` configurable. Create resource `azurerm_disk_encryption_set` in the test code to turn disk encryption on for the cluster. ([#195](https://github.com/Azure/terraform-azurerm-aks/pull/195)) +* Add new variable `api_server_authorized_ip_ranges` to make argument `api_server_authorized_ip_ranges` configurable. ([#220](https://github.com/Azure/terraform-azurerm-aks/pull/220)) +* Rename output `system_assigned_identity` to `cluster_identity` since it could be user assigned identity. Remove the index inside output's value expression. ([#197](https://github.com/Azure/terraform-azurerm-aks/pull/197)) +* Rename `var.enable_azure_policy` to `var.azure_policy_enabled` to meet the naming convention. Set `azure_policy_enabled` to `true` in test fixture code. ([#203](https://github.com/Azure/terraform-azurerm-aks/pull/203)) + +BUG FIXES: + +* Change the incorrect description of variable `tags`. ([#175](https://github.com/Azure/terraform-azurerm-aks/pull/175)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md new file mode 100644 index 000000000..ed1f9f094 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md @@ -0,0 +1,122 @@ +# Changelog + +## [Unreleased](https://github.com/Azure/terraform-azurerm-aks/tree/HEAD) + +**Merged pull requests:** + +- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) + +## [6.8.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.8.0) (2023-04-04) + +**Merged pull requests:** + +- Add support for `monitor_metrics` [\#341](https://github.com/Azure/terraform-azurerm-aks/pull/341) ([zioproto](https://github.com/zioproto)) +- Support setting os\_sku for default\_node\_pool [\#339](https://github.com/Azure/terraform-azurerm-aks/pull/339) ([mjeco](https://github.com/mjeco)) +- Upgrade required Terraform version [\#338](https://github.com/Azure/terraform-azurerm-aks/pull/338) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support `temporary_name_for_rotation` [\#334](https://github.com/Azure/terraform-azurerm-aks/pull/334) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.9.1 to 0.12.0 in /test [\#330](https://github.com/Azure/terraform-azurerm-aks/pull/330) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Fix example multiple\_node\_pools [\#328](https://github.com/Azure/terraform-azurerm-aks/pull/328) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add Network Contributor role assignments scoped to AKS nodepools subnets [\#327](https://github.com/Azure/terraform-azurerm-aks/pull/327) ([zioproto](https://github.com/zioproto)) +- Add support for extra node pools [\#323](https://github.com/Azure/terraform-azurerm-aks/pull/323) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `default_node_pool.kubelet_config` [\#322](https://github.com/Azure/terraform-azurerm-aks/pull/322) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for `public_network_access_enabled` [\#314](https://github.com/Azure/terraform-azurerm-aks/pull/314) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.7.1](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.1) (2023-03-06) + +**Merged pull requests:** + +- Fix \#316 `current client lacks permissions to read Key Rotation Policy` issue [\#317](https://github.com/Azure/terraform-azurerm-aks/pull/317) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.7.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.0) (2023-02-27) + +**Merged pull requests:** + +- Add support for `linux_os_config` [\#309](https://github.com/Azure/terraform-azurerm-aks/pull/309) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/gruntwork-io/terratest from 0.41.10 to 0.41.11 in /test [\#307](https://github.com/Azure/terraform-azurerm-aks/pull/307) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/Azure/terraform-module-test-helper from 0.8.1 to 0.9.1 in /test [\#306](https://github.com/Azure/terraform-azurerm-aks/pull/306) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump golang.org/x/net from 0.1.0 to 0.7.0 in /test [\#305](https://github.com/Azure/terraform-azurerm-aks/pull/305) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-getter from 1.6.1 to 1.7.0 in /test [\#304](https://github.com/Azure/terraform-azurerm-aks/pull/304) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-getter/v2 from 2.1.1 to 2.2.0 in /test [\#303](https://github.com/Azure/terraform-azurerm-aks/pull/303) ([dependabot[bot]](https://github.com/apps/dependabot)) +- fix: allow orchestrator\_version if auto-upgrade is 'patch' to allow default\_node\_pool upgrade [\#302](https://github.com/Azure/terraform-azurerm-aks/pull/302) ([aescrob](https://github.com/aescrob)) +- Add support for default node pool's `node_taints` [\#300](https://github.com/Azure/terraform-azurerm-aks/pull/300) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for acr attachment [\#298](https://github.com/Azure/terraform-azurerm-aks/pull/298) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `web_app_routing` [\#297](https://github.com/Azure/terraform-azurerm-aks/pull/297) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.7.1 to 0.8.1 in /test [\#295](https://github.com/Azure/terraform-azurerm-aks/pull/295) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [6.6.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.6.0) (2023-01-29) + +**Merged pull requests:** + +- Bump github.com/Azure/terraform-module-test-helper from 0.6.0 to 0.7.1 in /test [\#293](https://github.com/Azure/terraform-azurerm-aks/pull/293) ([dependabot[bot]](https://github.com/apps/dependabot)) +- identity type is either SystemAssigned or UserAssigned [\#292](https://github.com/Azure/terraform-azurerm-aks/pull/292) ([zioproto](https://github.com/zioproto)) +- Bump github.com/gruntwork-io/terratest from 0.41.7 to 0.41.9 in /test [\#290](https://github.com/Azure/terraform-azurerm-aks/pull/290) ([dependabot[bot]](https://github.com/apps/dependabot)) +- feat: Implement support for KMS arguments [\#288](https://github.com/Azure/terraform-azurerm-aks/pull/288) ([mkilchhofer](https://github.com/mkilchhofer)) +- feat: allow for configuring auto\_scaler\_profile [\#278](https://github.com/Azure/terraform-azurerm-aks/pull/278) ([DavidSpek](https://github.com/DavidSpek)) +- Azure AD RBAC enable/disable with variable rbac\_aad [\#269](https://github.com/Azure/terraform-azurerm-aks/pull/269) ([zioproto](https://github.com/zioproto)) + +## [6.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.5.0) (2023-01-03) + +**Merged pull requests:** + +- Bump github.com/Azure/terraform-module-test-helper from 0.4.0 to 0.6.0 in /test [\#287](https://github.com/Azure/terraform-azurerm-aks/pull/287) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.41.6 to 0.41.7 in /test [\#286](https://github.com/Azure/terraform-azurerm-aks/pull/286) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add support for `scale_down_mode` [\#285](https://github.com/Azure/terraform-azurerm-aks/pull/285) ([lonegunmanb](https://github.com/lonegunmanb)) +- auto-upgrade: variable orchestrator\_version to null [\#283](https://github.com/Azure/terraform-azurerm-aks/pull/283) ([zioproto](https://github.com/zioproto)) + +## [6.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.4.0) (2022-12-26) + +**Merged pull requests:** + +- feat\(storage\_profile\): add support for CSI arguments [\#282](https://github.com/Azure/terraform-azurerm-aks/pull/282) ([aescrob](https://github.com/aescrob)) + +## [6.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.3.0) (2022-12-20) + +**Merged pull requests:** + +- feat: add var automatic\_channel\_upgrade [\#281](https://github.com/Azure/terraform-azurerm-aks/pull/281) ([the-technat](https://github.com/the-technat)) +- Upgrade `terraform-module-test-helper` lib so we can get rid of override file to execute version upgrade test [\#279](https://github.com/Azure/terraform-azurerm-aks/pull/279) ([lonegunmanb](https://github.com/lonegunmanb)) +- Added support for load\_balancer\_profile [\#277](https://github.com/Azure/terraform-azurerm-aks/pull/277) ([mazilu88](https://github.com/mazilu88)) +- Add auto changelog update to this repo. [\#275](https://github.com/Azure/terraform-azurerm-aks/pull/275) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump test helper version [\#273](https://github.com/Azure/terraform-azurerm-aks/pull/273) ([lonegunmanb](https://github.com/lonegunmanb)) +- Ignore `scripts` soft link [\#272](https://github.com/Azure/terraform-azurerm-aks/pull/272) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for pod subnet [\#271](https://github.com/Azure/terraform-azurerm-aks/pull/271) ([mr-onion-2](https://github.com/mr-onion-2)) + +## [6.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.2.0) (2022-10-18) + +**Merged pull requests:** + +- Add breaking change detect CI step. [\#268](https://github.com/Azure/terraform-azurerm-aks/pull/268) ([lonegunmanb](https://github.com/lonegunmanb)) +- Workload Identity support [\#266](https://github.com/Azure/terraform-azurerm-aks/pull/266) ([nlamirault](https://github.com/nlamirault)) +- Add unit test for complex local logic [\#264](https://github.com/Azure/terraform-azurerm-aks/pull/264) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.1.0) (2022-09-30) + +**Merged pull requests:** + +- Improve placeholders for visibility in the UX [\#262](https://github.com/Azure/terraform-azurerm-aks/pull/262) ([zioproto](https://github.com/zioproto)) +- align acc test in CI pipeline with local machine by running e2e test … [\#260](https://github.com/Azure/terraform-azurerm-aks/pull/260) ([lonegunmanb](https://github.com/lonegunmanb)) +- align pr-check with local machine by using docker command instead [\#259](https://github.com/Azure/terraform-azurerm-aks/pull/259) ([lonegunmanb](https://github.com/lonegunmanb)) +- bugfix: Make the Azure Defender clause robust against a non-existent … [\#258](https://github.com/Azure/terraform-azurerm-aks/pull/258) ([gzur](https://github.com/gzur)) +- Add support for `maintenance_window` [\#256](https://github.com/Azure/terraform-azurerm-aks/pull/256) ([lonegunmanb](https://github.com/lonegunmanb)) +- Updates terraform code to meet updated code style requirement [\#253](https://github.com/Azure/terraform-azurerm-aks/pull/253) ([lonegunmanb](https://github.com/lonegunmanb)) +- Output cluster's fqdn [\#251](https://github.com/Azure/terraform-azurerm-aks/pull/251) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix example path in readme file. [\#249](https://github.com/Azure/terraform-azurerm-aks/pull/249) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update azurerm provider's restriction. [\#248](https://github.com/Azure/terraform-azurerm-aks/pull/248) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for optional Ultra disks [\#245](https://github.com/Azure/terraform-azurerm-aks/pull/245) ([digiserg](https://github.com/digiserg)) +- add aci\_connector addon [\#230](https://github.com/Azure/terraform-azurerm-aks/pull/230) ([zioproto](https://github.com/zioproto)) + +## [6.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.0.0) (2022-09-13) + +**Merged pull requests:** + +- Add outputs for created Log Analytics workspace [\#243](https://github.com/Azure/terraform-azurerm-aks/pull/243) ([zioproto](https://github.com/zioproto)) +- Prepare v6.0 and new CI pipeline. [\#241](https://github.com/Azure/terraform-azurerm-aks/pull/241) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update hashicorp/terraform-provider-azurerm to version 3.21.0 \(fixes for AKS 1.24\) [\#238](https://github.com/Azure/terraform-azurerm-aks/pull/238) ([zioproto](https://github.com/zioproto)) +- Output Kubernetes Cluster Name [\#234](https://github.com/Azure/terraform-azurerm-aks/pull/234) ([vermacodes](https://github.com/vermacodes)) +- feat\(aks\): add microsoft defender support [\#232](https://github.com/Azure/terraform-azurerm-aks/pull/232) ([eyenx](https://github.com/eyenx)) +- fix: mark outputs as sensitive [\#231](https://github.com/Azure/terraform-azurerm-aks/pull/231) ([jvelasquez](https://github.com/jvelasquez)) +- Loose the restriction on tls provider's version to include major version greater than 3.0 [\#229](https://github.com/Azure/terraform-azurerm-aks/pull/229) ([lonegunmanb](https://github.com/lonegunmanb)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md new file mode 100644 index 000000000..67b2e2375 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md @@ -0,0 +1,93 @@ +# Changelog + +## [7.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.5.0) (2023-11-14) + +**Merged pull requests:** + +- Add support for `node_os_channel_upgrade` [\#474](https://github.com/Azure/terraform-azurerm-aks/pull/474) ([lonegunmanb](https://github.com/lonegunmanb)) +- use lowercase everywhere for network plugin mode overlay [\#472](https://github.com/Azure/terraform-azurerm-aks/pull/472) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.15.1-0.20230728050712-96e8615f5515 to 0.17.0 in /test [\#469](https://github.com/Azure/terraform-azurerm-aks/pull/469) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add support for `service_mesh_profile` block [\#468](https://github.com/Azure/terraform-azurerm-aks/pull/468) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for Image Cleaner [\#466](https://github.com/Azure/terraform-azurerm-aks/pull/466) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add `fips_enabled` support for `default_node_pool` block [\#464](https://github.com/Azure/terraform-azurerm-aks/pull/464) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add default empty list for `allowed` and `not_allowed` in `var.maintenance_window` [\#463](https://github.com/Azure/terraform-azurerm-aks/pull/463) ([lonegunmanb](https://github.com/lonegunmanb)) +- fix: correct wording of the doc [\#461](https://github.com/Azure/terraform-azurerm-aks/pull/461) ([meysam81](https://github.com/meysam81)) +- add run\_command\_enabled [\#452](https://github.com/Azure/terraform-azurerm-aks/pull/452) ([zioproto](https://github.com/zioproto)) +- add msi\_auth\_for\_monitoring\_enabled [\#446](https://github.com/Azure/terraform-azurerm-aks/pull/446) ([admincasper](https://github.com/admincasper)) +- Restore readme file by stop formatting markdown table [\#445](https://github.com/Azure/terraform-azurerm-aks/pull/445) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.4.0) (2023-09-18) + +**Merged pull requests:** + +- Support for creating nodepools from snapshots [\#442](https://github.com/Azure/terraform-azurerm-aks/pull/442) ([zioproto](https://github.com/zioproto)) +- Add multiple terraform-docs configs to generate a seperated markdown document for input variables [\#441](https://github.com/Azure/terraform-azurerm-aks/pull/441) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `maintenance_window_node_os` block [\#440](https://github.com/Azure/terraform-azurerm-aks/pull/440) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.3.2](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.2) (2023-09-07) + +**Merged pull requests:** + +- Hide input variables in readme to boost the rendering [\#437](https://github.com/Azure/terraform-azurerm-aks/pull/437) ([lonegunmanb](https://github.com/lonegunmanb)) +- Improve information to upgrade to 7.0 [\#432](https://github.com/Azure/terraform-azurerm-aks/pull/432) ([zioproto](https://github.com/zioproto)) +- Add confidential computing in aks module [\#423](https://github.com/Azure/terraform-azurerm-aks/pull/423) ([jiaweitao001](https://github.com/jiaweitao001)) + +## [7.3.1](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.1) (2023-08-10) + +**Merged pull requests:** + +- Bump k8s version in exmaples to pass e2e tests [\#422](https://github.com/Azure/terraform-azurerm-aks/pull/422) ([jiaweitao001](https://github.com/jiaweitao001)) + +## [7.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.0) (2023-08-03) + +**Merged pull requests:** + +- Add `location` and `resource_group_name` for `var.log_analytics_workspace` [\#412](https://github.com/Azure/terraform-azurerm-aks/pull/412) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix \#405 incorrect role assignment resource [\#410](https://github.com/Azure/terraform-azurerm-aks/pull/410) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.2.0) (2023-07-10) + +**Merged pull requests:** + +- Bump google.golang.org/grpc from 1.51.0 to 1.53.0 in /test [\#406](https://github.com/Azure/terraform-azurerm-aks/pull/406) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Support for Azure CNI Cilium [\#398](https://github.com/Azure/terraform-azurerm-aks/pull/398) ([JitseHijlkema](https://github.com/JitseHijlkema)) +- Use `lonegunmanb/public-ip/lonegunmanb` module to retrieve public ip [\#396](https://github.com/Azure/terraform-azurerm-aks/pull/396) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix incorrect e2e test code so it could pass on our local machine [\#395](https://github.com/Azure/terraform-azurerm-aks/pull/395) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for Proximity placement group for default node pool [\#392](https://github.com/Azure/terraform-azurerm-aks/pull/392) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add upgrade\_settings block for default nodepool [\#391](https://github.com/Azure/terraform-azurerm-aks/pull/391) ([CiucurDaniel](https://github.com/CiucurDaniel)) +- Bump github.com/Azure/terraform-module-test-helper from 0.13.0 to 0.14.0 in /test [\#386](https://github.com/Azure/terraform-azurerm-aks/pull/386) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [7.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.1.0) (2023-06-07) + +**Merged pull requests:** + +- Deprecate `api_server_authorized_ip_ranges` by using `api_server_access_profile` block [\#381](https://github.com/Azure/terraform-azurerm-aks/pull/381) ([lonegunmanb](https://github.com/lonegunmanb)) +- `oidc_issuer_enabled` must be set to `true` to enable Azure AD Worklo… [\#377](https://github.com/Azure/terraform-azurerm-aks/pull/377) ([zioproto](https://github.com/zioproto)) +- assign network contributor role to control plane identity [\#369](https://github.com/Azure/terraform-azurerm-aks/pull/369) ([zioproto](https://github.com/zioproto)) +- Add tracing tag toggle variables [\#362](https://github.com/Azure/terraform-azurerm-aks/pull/362) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for Azure CNI Overlay [\#354](https://github.com/Azure/terraform-azurerm-aks/pull/354) ([zioproto](https://github.com/zioproto)) +- Make `var.prefix` optional [\#382](https://github.com/Azure/terraform-azurerm-aks/pull/382) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove constraint on `authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#375](https://github.com/Azure/terraform-azurerm-aks/pull/375) ([lonegunmanb](https://github.com/lonegunmanb)) +- Filter null value out from `local.subnet_ids` [\#374](https://github.com/Azure/terraform-azurerm-aks/pull/374) ([lonegunmanb](https://github.com/lonegunmanb)) +- User `location` returned from data source for log analytics solution. [\#349](https://github.com/Azure/terraform-azurerm-aks/pull/349) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.0.0) (2023-05-18) + +**Merged pull requests:** + +- Upgrade notice for v7.0 [\#367](https://github.com/Azure/terraform-azurerm-aks/pull/367) ([lonegunmanb](https://github.com/lonegunmanb)) +- Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#361](https://github.com/Azure/terraform-azurerm-aks/pull/361) ([lonegunmanb](https://github.com/lonegunmanb)) +- feat!: add create\_before\_destroy=true to node pools [\#357](https://github.com/Azure/terraform-azurerm-aks/pull/357) ([the-technat](https://github.com/the-technat)) +- Move breaking change details into separate docs. add notice on v7.0.0 [\#355](https://github.com/Azure/terraform-azurerm-aks/pull/355) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.12.0 to 0.13.0 in /test [\#352](https://github.com/Azure/terraform-azurerm-aks/pull/352) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Trivial: fix typo ingration -\> integration [\#351](https://github.com/Azure/terraform-azurerm-aks/pull/351) ([zioproto](https://github.com/zioproto)) +- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) +- \[Breaking\] Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard`. [\#346](https://github.com/Azure/terraform-azurerm-aks/pull/346) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] - Ignore changes on `kubernetes_version` from outside of Terraform [\#336](https://github.com/Azure/terraform-azurerm-aks/pull/336) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] - Fix \#315 by amending missing `linux_os_config` block [\#320](https://github.com/Azure/terraform-azurerm-aks/pull/320) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] Wrap `log_analytics_solution_id` to an object to fix \#263. [\#265](https://github.com/Azure/terraform-azurerm-aks/pull/265) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] Remove unused net\_profile\_docker\_bridge\_cidr [\#222](https://github.com/Azure/terraform-azurerm-aks/pull/222) ([zioproto](https://github.com/zioproto)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md new file mode 100644 index 000000000..2c035d842 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md @@ -0,0 +1,27 @@ +# Changelog + +**Merged pull requests:** + +- Add support for nodepool's `gpu_instance` [\#519](https://github.com/Azure/terraform-azurerm-aks/pull/519) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.17.0 to 0.18.0 in /test [\#516](https://github.com/Azure/terraform-azurerm-aks/pull/516) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add upgrade notice document [\#513](https://github.com/Azure/terraform-azurerm-aks/pull/513) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add retry when the ingress is not ready [\#510](https://github.com/Azure/terraform-azurerm-aks/pull/510) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `support_plan` and `Premium` sku tier. [\#508](https://github.com/Azure/terraform-azurerm-aks/pull/508) ([ecklm](https://github.com/ecklm)) +- Refactor code, split monolith tf config into multiple files [\#494](https://github.com/Azure/terraform-azurerm-aks/pull/494) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove `var.http_application_routing_enabled` [\#493](https://github.com/Azure/terraform-azurerm-aks/pull/493) ([lonegunmanb](https://github.com/lonegunmanb)) +- feat\(`http_proxy_config`\): Add `http_proxy_config` [\#492](https://github.com/Azure/terraform-azurerm-aks/pull/492) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove `public_network_access_enabled` entirely [\#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) ([lonegunmanb](https://github.com/lonegunmanb)) +- Ignore deprecated attribute `public_network_access_enabled` [\#485](https://github.com/Azure/terraform-azurerm-aks/pull/485) ([ishuar](https://github.com/ishuar)) +- feat: enable precondition on `default_node_pool` for autoscaling with node pool type [\#484](https://github.com/Azure/terraform-azurerm-aks/pull/484) ([ishuar](https://github.com/ishuar)) +- Add web\_app\_routing\_identity block to outputs [\#481](https://github.com/Azure/terraform-azurerm-aks/pull/481) ([bonddim](https://github.com/bonddim)) +- Add support for `kubelet_identity` nested block [\#479](https://github.com/Azure/terraform-azurerm-aks/pull/479) ([lonegunmanb](https://github.com/lonegunmanb)) +- Prepare for v8.0 [\#462](https://github.com/Azure/terraform-azurerm-aks/pull/462) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove precondition on extra node pool which prevent using windows pool with overlay [\#512](https://github.com/Azure/terraform-azurerm-aks/pull/512) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `maintenance_window_auto_upgrade` [\#505](https://github.com/Azure/terraform-azurerm-aks/pull/505) ([skolobov](https://github.com/skolobov)) +- Let the users decide whether adding a random suffix in cluster and pool's name or not. [\#496](https://github.com/Azure/terraform-azurerm-aks/pull/496) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add role assignments for ingress application gateway and corresponding example [\#426](https://github.com/Azure/terraform-azurerm-aks/pull/426) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for workload\_autoscaler\_profile settings [\#404](https://github.com/Azure/terraform-azurerm-aks/pull/404) ([bonddim](https://github.com/bonddim)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md new file mode 100644 index 000000000..05e2d7539 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md @@ -0,0 +1,76 @@ +# Changelog + +## [9.4.1](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.1) (2025-02-05) + +**Merged pull requests:** + +- Revert changes of `9.4.0` [\#635](https://github.com/Azure/terraform-azurerm-aks/pull/635) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [9.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.0) (2025-02-05) + +**Merged pull requests:** + +- Bump azapi provider to \>=2.0, \< 3.0 [\#632](https://github.com/Azure/terraform-azurerm-aks/pull/632) ([zioproto](https://github.com/zioproto)) +- Dependabot 624 626 [\#627](https://github.com/Azure/terraform-azurerm-aks/pull/627) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.28.0 to 0.30.0 in /test [\#626](https://github.com/Azure/terraform-azurerm-aks/pull/626) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.48.0 to 0.48.1 in /test [\#624](https://github.com/Azure/terraform-azurerm-aks/pull/624) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Dependabot changes from PR 609 619 620 [\#621](https://github.com/Azure/terraform-azurerm-aks/pull/621) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.27.0 to 0.28.0 in /test [\#620](https://github.com/Azure/terraform-azurerm-aks/pull/620) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.47.2 to 0.48.0 in /test [\#619](https://github.com/Azure/terraform-azurerm-aks/pull/619) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#616](https://github.com/Azure/terraform-azurerm-aks/pull/616) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#615](https://github.com/Azure/terraform-azurerm-aks/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /test [\#609](https://github.com/Azure/terraform-azurerm-aks/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [9.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.3.0) (2024-12-11) + +**Merged pull requests:** + +- Support of oms\_agent\_enabled add-on [\#613](https://github.com/Azure/terraform-azurerm-aks/pull/613) ([lonegunmanb](https://github.com/lonegunmanb)) +- Implement node\_network\_profile for default node pool [\#598](https://github.com/Azure/terraform-azurerm-aks/pull/598) ([zioproto](https://github.com/zioproto)) +- Bump examples to AKS 1.30 [\#595](https://github.com/Azure/terraform-azurerm-aks/pull/595) ([zioproto](https://github.com/zioproto)) +- Add `v4` sub-folder so this module could run with AzureRM provider both `v3` and `v4`. [\#594](https://github.com/Azure/terraform-azurerm-aks/pull/594) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [9.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.2.0) (2024-11-07) + +**Merged pull requests:** + +- Make the Azure Key Vault public because private Key Vault requires preview API [\#599](https://github.com/Azure/terraform-azurerm-aks/pull/599) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.25.0 to 0.26.0 in /test [\#593](https://github.com/Azure/terraform-azurerm-aks/pull/593) ([lonegunmanb](https://github.com/lonegunmanb)) +- Use oidc as authentication method [\#592](https://github.com/Azure/terraform-azurerm-aks/pull/592) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update README.md [\#589](https://github.com/Azure/terraform-azurerm-aks/pull/589) ([shailwx](https://github.com/shailwx)) +- Add `cost_analysis_enabled` option [\#583](https://github.com/Azure/terraform-azurerm-aks/pull/583) ([artificial-aidan](https://github.com/artificial-aidan)) +- Bump github.com/Azure/terraform-module-test-helper from 0.24.0 to 0.25.0 in /test [\#581](https://github.com/Azure/terraform-azurerm-aks/pull/581) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.46.15 to 0.47.0 in /test [\#579](https://github.com/Azure/terraform-azurerm-aks/pull/579) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/Azure/terraform-module-test-helper from 0.22.0 to 0.24.0 in /test [\#574](https://github.com/Azure/terraform-azurerm-aks/pull/574) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 in /test [\#562](https://github.com/Azure/terraform-azurerm-aks/pull/562) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [9.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.1.0) (2024-07-04) + +**Merged pull requests:** + +- Downgrade next major version back to v9 [\#577](https://github.com/Azure/terraform-azurerm-aks/pull/577) ([lonegunmanb](https://github.com/lonegunmanb)) +- Restore devcontainer [\#576](https://github.com/Azure/terraform-azurerm-aks/pull/576) ([zioproto](https://github.com/zioproto)) +- set drainTimeoutInMinutes default value to null [\#575](https://github.com/Azure/terraform-azurerm-aks/pull/575) ([zioproto](https://github.com/zioproto)) +- fix README.md format [\#570](https://github.com/Azure/terraform-azurerm-aks/pull/570) ([joaoestrela](https://github.com/joaoestrela)) +- Bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /test [\#569](https://github.com/Azure/terraform-azurerm-aks/pull/569) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Start new Changelog file for v10 [\#567](https://github.com/Azure/terraform-azurerm-aks/pull/567) ([zioproto](https://github.com/zioproto)) +- fixed inaccurate variable descriptions for azure cni in overlay mode [\#566](https://github.com/Azure/terraform-azurerm-aks/pull/566) ([Xelef2000](https://github.com/Xelef2000)) +- add drain\_timeout\_in\_minutes and node\_soak\_duration\_in\_minutes [\#564](https://github.com/Azure/terraform-azurerm-aks/pull/564) ([zioproto](https://github.com/zioproto)) + +## [9.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.0.0) (2024-06-07) + +**Merged pull requests:** + +- Compromise on e2e tests involving ingress, since it's not stable [\#558](https://github.com/Azure/terraform-azurerm-aks/pull/558) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add weekly-codeql action [\#555](https://github.com/Azure/terraform-azurerm-aks/pull/555) ([lonegunmanb](https://github.com/lonegunmanb)) +- Change default value for `var.agents_pool_max_surge` to 10% [\#554](https://github.com/Azure/terraform-azurerm-aks/pull/554) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update Microsoft.ContainerService managedClusters API version to 2024-02-01 [\#552](https://github.com/Azure/terraform-azurerm-aks/pull/552) ([olofmattsson-inriver](https://github.com/olofmattsson-inriver)) +- Bump github.com/Azure/terraform-module-test-helper from 0.19.0 to 0.22.0 in /test [\#549](https://github.com/Azure/terraform-azurerm-aks/pull/549) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Amending log analytics attributes [\#548](https://github.com/Azure/terraform-azurerm-aks/pull/548) ([lonegunmanb](https://github.com/lonegunmanb)) +- bump k8s version for example since 1.26 has been deprecated [\#540](https://github.com/Azure/terraform-azurerm-aks/pull/540) ([lonegunmanb](https://github.com/lonegunmanb)) +- fix\(typo\): typo in output variable [\#537](https://github.com/Azure/terraform-azurerm-aks/pull/537) ([mbaykara](https://github.com/mbaykara)) +- Bump github.com/Azure/terraform-module-test-helper from 0.18.0 to 0.19.0 in /test [\#521](https://github.com/Azure/terraform-azurerm-aks/pull/521) ([dependabot[bot]](https://github.com/apps/dependabot)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md new file mode 100644 index 000000000..9996f9928 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## Important Notice + +* fix: add back `private_cluster_enabled` variable by @tobiasehlert [#667](https://github.com/Azure/terraform-azurerm-aks/pull/667) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..af8b0207d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. + +Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile new file mode 100644 index 000000000..3db7ccd9d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile @@ -0,0 +1,4 @@ +SHELL := /bin/bash + +$(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) +-include tfvmmakefile \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE new file mode 100644 index 000000000..21071075c --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md new file mode 100644 index 000000000..f611a6a75 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md @@ -0,0 +1,53 @@ +# Notice on Upgrade to v10.x + +## AzAPI provider version constraint has been updated to `>=2.0, < 3.0`. + +## [`var.web_app_routing` type change](https://github.com/Azure/terraform-azurerm-aks/pull/606) + +`var.web_app_routing.dns_zone_id` has been replaced by `var.web_app_routing.dns_zone_ids`. The new variable is a list of DNS zone IDs. This change allows for the specification of multiple DNS zones for routing. + +## [`data.azurerm_resource_group.main` in this module has been removed, `var.location` is a required variable now.](https://github.com/Azure/terraform-azurerm-aks/pull/644) + +## [Create log analytics workspace would also create required monitor data collection rule now](https://github.com/Azure/terraform-azurerm-aks/pull/623) + +The changes in this pull request introduce support for a Data Collection Rule (DCR) for Azure Monitor Container Insights in the Terraform module. + +## [`CHANGELOG.md` file is no longer maintained, please read release note in GitHub repository instead](https://github.com/Azure/terraform-azurerm-aks/pull/651) + +[New release notes](https://github.com/Azure/terraform-azurerm-aks/releases). + +## [The following variables have been removed:](https://github.com/Azure/terraform-azurerm-aks/pull/652) + +* `agents_taints` +* `api_server_subnet_id` +* `private_cluster_enabled` +* `rbac_aad_client_app_id` +* `rbac_aad_managed` +* `rbac_aad_server_app_id` +* `rbac_aad_server_app_secret` + +## `var.pod_subnet_id` has been replaced by `var.pod_subnet.id` + +## `var.vnet_subnet_id` has been replaced by `var.vnet_subnet.id` + +## `var.node_pools.pod_subnet_id` has been replaced by `var.node_pools.pod_subnet.id` + +## `var.node_pools.vnet_subnet_id` has been replaced by `var.node_pools.vnet_subnet.id` + +## `azurerm_role_assignment.network_contributor` will be re-created + +Since `for_each`'s target has been changed from a set of string to a map of object to avoid "Known after apply" values in iterator, we have to re-create the `azurerm_role_assignment.network_contributor` resource. This will cause the role assignment to be removed and re-added, which may result in a brief period of time where the role assignment is not present. + +## When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself. + +## `var.client_secret` now is `sensitive` + +## New interval between cluster creation and kubernetes version upgrade + +New variable `interval_before_cluster_update` was added. Sometimes when we tried to update cluster's kubernetes version after cluster creation, we got the error `Operation is not allowed because there's an in progress update managed cluster operation on the managed cluster started`. A `time_sleep` was added to avoid such potential conflict. You can set this variable to `null` to bypass the sleep. + +## @zioproto is no longer a maintainer of this module + +For personal reasons, @zioproto is no longer a maintainer of this module. I want to express my sincere gratitude for his contributions and support over the years. His dedication and hard work are invaluable to this module. + +THANK YOU @zioproto ! diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md new file mode 100644 index 000000000..4f31d8157 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md @@ -0,0 +1,93 @@ +# Notice on Upgrade to v5.x + +V5.0.0 is a major version upgrade and a lot of breaking changes have been introduced. Extreme caution must be taken during the upgrade to avoid resource replacement and downtime by accident. + +Running the `terraform plan` first to inspect the plan is strongly advised. + +## Terraform and terraform-provider-azurerm version restrictions + +Now Terraform core's lowest version is v1.2.0 and terraform-provider-azurerm's lowest version is v3.21.0. + +## variable `user_assigned_identity_id` has been renamed. + +variable `user_assigned_identity_id` has been renamed to `identity_ids` and it's type has been changed from `string` to `list(string)`. + +## `addon_profile` in outputs is no longer available. + +It has been broken into the following new outputs: + +* `aci_connector_linux` +* `aci_connector_linux_enabled` +* `azure_policy_enabled` +* `http_application_routing_enabled` +* `ingress_application_gateway` +* `ingress_application_gateway_enabled` +* `key_vault_secrets_provider` +* `key_vault_secrets_provider_enabled` +* `oms_agent` +* `oms_agent_enabled` +* `open_service_mesh_enabled` + +## The following variables have been renamed from `enable_xxx` to `xxx_enabled` + +* `enable_azure_policy` has been renamed to `azure_policy_enabled` +* `enable_http_application_routing` has been renamed to `http_application_routing_enabled` +* `enable_ingress_application_gateway` has been renamed to `ingress_application_gateway_enabled` +* `enable_log_analytics_workspace` has been renamed to `log_analytics_workspace_enabled` +* `enable_open_service_mesh` has been renamed to `open_service_mesh_enabled` +* `enable_role_based_access_control` has been renamed to `role_based_access_control_enabled` + +## `nullable = true` has been added to the following variables so setting them to `null` explicitly will use the default value + +* `log_analytics_workspace_enable` +* `os_disk_type` +* `private_cluster_enabled` +* `rbac_aad_managed` +* `rbac_aad_admin_group_object_ids` +* `network_policy` +* `enable_node_public_ip` + +## `var.admin_username`'s default value has been removed + +In v4.x `var.admin_username` has a default value `azureuser` and has been removed in V5.0.0. Since the `admin_username` argument in `linux_profile` block is a ForceNew argument, any value change to this argument will trigger a Kubernetes cluster replacement **SO THE EXTREME CAUTION MUST BE TAKEN**. The module's callers must set `var.admin_username` to `azureuser` explicitly if they didn't set it before. + +## `module.ssh-key` has been removed + +The file named `private_ssh_key` which contains the tls private key will be deleted since the `local_file` resource has been removed. Now the private key is exported via `generated_cluster_private_ssh_key` in output and the corresponding public key is exported via `generated_cluster_public_ssh_key` in output. + +A `moved` block has been added to relocate the existing `tls_private_key` resource to the new address. If the `var.admin_username` is not `null`, no action is needed. + +Resource `tls_private_key`'s creation now is conditional. Users may see the destruction of existing `tls_private_key` in the generated plan if `var.admin_username` is `null`. + +## `system_assigned_identity` in the output has been renamed to `cluster_identity` + +The `system_assigned_identity` was: + +```hcl +output "system_assigned_identity" { + value = azurerm_kubernetes_cluster.main.identity +} +``` + +Now it has been renamed to `cluster_identity`, and the block has been changed to: + +```hcl +output "cluster_identity" { + description = "The `azurerm_kubernetes_cluster`'s `identity` block." + value = try(azurerm_kubernetes_cluster.main.identity[0], null) +} +``` + +The callers who used to read the cluster's identity block need to remove the index in their expression, from `module.aks.system_assigned_identity[0]` to `module.aks.cluster_identity`. + +## The following outputs are now sensitive. All outputs referenced them must be declared as sensitive too + +* `client_certificate` +* `client_key` +* `cluster_ca_certificate` +* `generated_cluster_private_ssh_key` +* `host` +* `kube_admin_config_raw` +* `kube_config_raw` +* `password` +* `username` diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md new file mode 100644 index 000000000..e75b87ea3 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md @@ -0,0 +1,5 @@ +# Notice on Upgrade to v6.x + +We've added a CI pipeline for this module to speed up our code review and to enforce a high code quality standard, if you want to contribute by submitting a pull request, please read [Pre-Commit & Pr-Check & Test](#Pre-Commit--Pr-Check--Test) section, or your pull request might be rejected by CI pipeline. + +A pull request will be reviewed when it has passed Pre Pull Request Check in the pipeline, and will be merged when it has passed the acceptance tests. Once the ci Pipeline failed, please read the pipeline's output, thanks for your cooperation. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md new file mode 100644 index 000000000..e3c1f41a5 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md @@ -0,0 +1,52 @@ +# Notice on Upgrade to v7.x + +## Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard` + +AzureRM's minimum version is `>= 3.51, < 4.0` now. +[`var.sku_tier` cannot be set to `Paid` anymore](https://github.com/hashicorp/terraform-provider-azurerm/issues/20887), now possible values are `Free` and `Standard`. + +## Ignore changes on `kubernetes_version` from outside of Terraform + +Related issue: #335 + +Two new resources would be created when upgrading from v6.x to v7.x: + +* `null_resource.kubernetes_version_keeper` +* `azapi_update_resource.aks_cluster_post_create` + +`azurerm_kubernetes_cluster.main` resource would ignore change on `kubernetes_version` from outside of Terraform in case AKS cluster's patch version has been upgraded automatically. +When you change `var.kubernetes_version`'s value, it would trigger a re-creation of `null_resource.kubernetes_version_keeper` and re-creation of `azapi_update_resource.aks_cluster_post_create`, which would upgrade the AKS cluster's `kubernetes_version`. + +`azapi` provider is required to be configured in your Terraform configuration. + +## Fix #315 by amending missing `linux_os_config` block + +In v6.0, `default_node_pool.linux_os_config` block won't be added to `azurerm_kubernetes_cluster.main` resource when `var.enable_auto_scaling` is `true`. This bug has been fixed in v7.0.0 so you might see a diff on `azurerm_kubernetes_cluster.main` resource. + +## Wrap `log_analytics_solution_id` to an object to fix #263. + +`var.log_analytics_solution_id` is now an object with `id` attribute. This change is to fix #263. + +## Remove unused net_profile_docker_bridge_cidr + +`var.net_profile_docker_bridge_cidr` has been [deprecated](https://github.com/hashicorp/terraform-provider-azurerm/issues/18119) and is not used in the module anymore and has been removed. + +## Add `create_before_destroy=true` to node pools #357 + +Now `azurerm_kubernetes_cluster_node_pool.node_pool` resource has `create_before_destroy=true` to avoid downtime when upgrading node pools. Users must be aware that there would be a "random" suffix added into pool's name, this suffix's length is `4`, so your previous node pool's name `nodepool1` would be `nodepool1xxxx`. This suffix is calculated from node pool's config, the same configuration would lead to the same suffix. You might need to shorten your node pool's name because of this new added suffix. + +To enable this feature, we've also added new `null_resource.pool_name_keeper` to track node pool's name in case you've changed the name. + +## Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` #361 + +As the [document](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#public_network_access_enabled) described: + +>When `public_network_access_enabled` is set to true, `0.0.0.0/32` must be added to `authorized_ip_ranges` in the `api_server_access_profile block`. + +We'll add `api_server_access_profile` nested block after AzureRM provider's v4.0, but starting from v7.0 we'll enforce such pre-condition check. + +## Add `depends_on` to `azurerm_kubernetes_cluster_node_pool` resources #418 + +If you have `azurerm_kubernetes_cluster_node_pool` resources not managed with this module (`var.nodepools`) you +must have an explicit `depends_on` on those resources to avoid conflicting nodepools operations. +See issue #418 for more details. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md new file mode 100644 index 000000000..96077ba1a --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md @@ -0,0 +1,53 @@ +# Notice on Upgrade to v8.x + +## New variable `cluster_name_random_suffix` + +1. A new variable `cluster_name_random_suffix` is added. This allows users to decide whether they want to add a random suffix to a cluster's name. This is particularly useful when Terraform needs to recreate a resource that cannot be updated in-place, as it avoids naming conflicts. Because of [#357](https://github.com/Azure/terraform-azurerm-aks/pull/357), now the `azurerm_kubernetes_cluster` resource is `create_before_destroy = true` now, we cannot turn this feature off. If you want to recreate this cluster by one apply without any trouble, please turn this random naming suffix on to avoid the naming conflict. + +2. The `create_before_destroy` attribute is added to the `node_pools` variable as an object field. This attribute determines whether a new node pool should be created before the old one is destroyed during updates. By default, it is set to `true`. + +3. The naming of extra node pools has been updated. Now, a random UUID is used as the seed for the random suffix in the name of the node pool, instead of the JSON-encoded value of the node pool. **This naming suffix only apply for extra node pools that create before destroy.** + +You're recommended to set `var.cluster_name_random_suffix` to `true` explicitly, and you'll see a random suffix in your cluster's name. If you don't like this suffix, please remember now a new cluster with the same name would be created before the old one has been deleted. If you do want to recreate the cluster, please run `terraform destroy` first. + +## Remove `var.http_application_routing_enabled` + +According to the [document](https://learn.microsoft.com/en-us/azure/aks/http-application-routing), HTTP application routing add-on for AKS has been retired so we have to remove this feature from this module. + +1. The variable `http_application_routing_enabled` has been removed from the module. This variable was previously used to enable HTTP Application Routing Addon. + +2. The `http_application_routing_enabled` output has been removed from `outputs.tf`. This output was previously used to display whether HTTP Application Routing was enabled. + +3. The `http_application_routing_enabled` attribute has been removed from the `azurerm_kubernetes_cluster` resource in `main.tf`. This attribute was previously used to enable HTTP Application Routing for the Kubernetes cluster. + +4. The `http_application_routing_enabled` attribute has been added to the `ignore_changes` lifecycle block of the `azurerm_kubernetes_cluster` resource in `main.tf`. This means changes to this attribute will not trigger the resource to be updated. + +These changes mean that users of this module will no longer be able to enable HTTP Application Routing through this module. + +The new feature for the Ingress in AKS is [Managed NGINX ingress with the application routing add-on](https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default), you can enable this with `var.web_app_routing`. + +Users who were using this feature, please read this [Migrate document](https://learn.microsoft.com/en-us/azure/aks/app-routing-migration). + +## Remove `public_network_access_enabled` entirely + +According to this [announcement](https://github.com/Azure/AKS/issues/3690), now public network access for AKS is no longer supported. + +The primary impact [#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) is the complete removal of the `public_network_access_enabled` variable from the module. + +1. The `public_network_access_enabled` variable has been removed from the `variables.tf` file. This means that the module no longer supports the configuration of public network access at the Kubernetes cluster level. + +2. The `public_network_access_enabled` variable has also been removed from the `main.tf` file and all example files (`application_gateway_ingress/main.tf`, `multiple_node_pools/main.tf`, `named_cluster/main.tf`, `startup/main.tf`, `with_acr/main.tf`, `without_monitor/main.tf`). This indicates that the module no longer uses this variable in the creation of the Azure Kubernetes Service (AKS) resource. + +3. The `public_network_access_enabled` has been added into `azurerm_kubernetes_cluster`'s `ignore_changes` list. Any change to this attribute won't trigger update. + +## Add role assignments for ingress application gateway + +The `variables.tf` file is updated with new variables related to the application gateway for ingress, including `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress`. + +The `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress` variables are used to configure the Application Gateway Ingress for the Azure Kubernetes Service (AKS) in the Terraform module. + +1. `brown_field_application_gateway_for_ingress`: This variable is used when you want to use an existing Application Gateway as the ingress for the AKS cluster. It is an object that contains the ID of the Application Gateway (`id`) and the ID of the Subnet (`subnet_id`) which the Application Gateway is connected to. If this variable is set, the module will not create a new Application Gateway and will use the existing one instead. + +2. `green_field_application_gateway_for_ingress`: This variable is used when you want the module to create a new Application Gateway for the AKS cluster. It is an object that contains the name of the Application Gateway to be used or created in the Nodepool Resource Group (`name`), the subnet CIDR to be used to create an Application Gateway (`subnet_cidr`), and the ID of the subnet on which to create an Application Gateway (`subnet_id`). If this variable is set, the module will create a new Application Gateway with the provided configuration. + +3. `create_role_assignments_for_application_gateway`: This is a boolean variable that determines whether to create the corresponding role assignments for the application gateway or not. By default, it is set to `true`. Role assignments are necessary for the Application Gateway to function correctly with the AKS cluster. If set to `true`, the module will create the necessary role assignments on the Application Gateway. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md new file mode 100644 index 000000000..9bd796e2d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md @@ -0,0 +1,9 @@ +# Notice on Upgrade to v9.x + +## New default value for variable `agents_pool_max_surge` + +variable `agents_pool_max_surge` now has default value `10%`. This change might cause configuration drift. If you want to keep the old value, please set it explicitly in your configuration. + +## API version for `azapi_update_resource` resource has been upgraded from `Microsoft.ContainerService/managedClusters@2023-01-02-preview` to `Microsoft.ContainerService/managedClusters@2024-02-01`. + +After a test, it won't affect the existing Terraform state and cause configuration drift. The upgrade is caused by the retirement of original API. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md new file mode 100644 index 000000000..e754e5a7f --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md @@ -0,0 +1,490 @@ +# terraform-azurerm-aks + +## Deploys a Kubernetes cluster (AKS) on Azure with monitoring support through Azure Log Analytics + +This Terraform module deploys a Kubernetes cluster on Azure using AKS (Azure Kubernetes Service) and adds support for monitoring with Log Analytics. + +-> **NOTE:** If you have not assigned `client_id` or `client_secret`, A `SystemAssigned` identity will be created. + +-> **NOTE:** If you're using AzureRM `v4`, you can use this module by setting `source` to `Azure/aks/azurerm//v4`. + +## Notice on breaking changes + +Please be aware that major version(e.g., from 6.8.0 to 7.0.0) update contains breaking changes that may impact your infrastructure. It is crucial to review these changes with caution before proceeding with the upgrade. + +In most cases, you will need to adjust your Terraform code to accommodate the changes introduced in the new major version. We strongly recommend reviewing the changelog and migration guide to understand the modifications and ensure a smooth transition. + +To help you in this process, we have provided detailed documentation on the breaking changes, new features, and any deprecated functionalities. Please take the time to read through these resources to avoid any potential issues or disruptions to your infrastructure. + +* [Notice on Upgrade to v10.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov10.0.md) +* [Notice on Upgrade to v9.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov9.0.md) +* [Notice on Upgrade to v8.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov8.0.md) +* [Notice on Upgrade to v7.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov7.0.md) +* [Notice on Upgrade to v6.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov6.0.md) +* [Notice on Upgrade to v5.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov5.0.md) + +Remember, upgrading to a major version with breaking changes should be done carefully and thoroughly tested in your environment. If you have any questions or concerns, please don't hesitate to reach out to our support team for assistance. + +## Usage in Terraform 1.2.0 + +Please view folders in `examples`. + +The module supports some outputs that may be used to configure a kubernetes +provider after deploying an AKS cluster. + +```hcl +provider "kubernetes" { + host = module.aks.host + client_certificate = base64decode(module.aks.client_certificate) + client_key = base64decode(module.aks.client_key) + cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate) +} +``` + +There're some examples in the examples folder. You can execute `terraform apply` command in `examples`'s sub folder to try the module. These examples are tested against every PR with the [E2E Test](#Pre-Commit--Pr-Check--Test). + +## Enable or disable tracing tags + +We're using [BridgeCrew Yor](https://github.com/bridgecrewio/yor) and [yorbox](https://github.com/lonegunmanb/yorbox) to help manage tags consistently across infrastructure as code (IaC) frameworks. In this module you might see tags like: + +```hcl +resource "azurerm_resource_group" "rg" { + location = "eastus" + name = random_pet.name + tags = merge(var.tags, (/**/ (var.tracing_tags_enabled ? { for k, v in /**/ { + avm_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" + avm_git_file = "main.tf" + avm_git_last_modified_at = "2023-05-05 08:57:54" + avm_git_org = "lonegunmanb" + avm_git_repo = "terraform-yor-tag-test-module" + avm_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" + } /**/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /**/)) +} +``` + +To enable tracing tags, set the variable to true: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_enabled = true +} +``` + +The `tracing_tags_enabled` is default to `false`. + +To customize the prefix for your tracing tags, set the `tracing_tags_prefix` variable value in your Terraform configuration: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_prefix = "custom_prefix_" +} +``` + +The actual applied tags would be: + +```text +{ +custom_prefix_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" +custom_prefix_git_file = "main.tf" +custom_prefix_git_last_modified_at = "2023-05-05 08:57:54" +custom_prefix_git_org = "lonegunmanb" +custom_prefix_git_repo = "terraform-yor-tag-test-module" +custom_prefix_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" +} +``` + +## Pre-Commit & Pr-Check & Test + +### Configurations + +- [Configure Terraform for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/terraform-install-configure) + +We assumed that you have setup service principal's credentials in your environment variables like below: + +```shell +export ARM_SUBSCRIPTION_ID="" +export ARM_TENANT_ID="" +export ARM_CLIENT_ID="" +export ARM_CLIENT_SECRET="" +``` + +On Windows Powershell: + +```shell +$env:ARM_SUBSCRIPTION_ID="" +$env:ARM_TENANT_ID="" +$env:ARM_CLIENT_ID="" +$env:ARM_CLIENT_SECRET="" +``` + +We provide a docker image to run the pre-commit checks and tests for you: `mcr.microsoft.com/azterraform:latest` + +To run the pre-commit task, we can run the following command: + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +In pre-commit task, we will: + +1. Run `terraform fmt -recursive` command for your Terraform code. +2. Run `terrafmt fmt -f` command for markdown files and go code files to ensure that the Terraform code embedded in these files are well formatted. +3. Run `go mod tidy` and `go mod vendor` for test folder to ensure that all the dependencies have been synced. +4. Run `gofmt` for all go code files. +5. Run `gofumpt` for all go code files. +6. Run `terraform-docs` on `README.md` file, then run `markdown-table-formatter` to format markdown tables in `README.md`. + +Then we can run the pr-check task to check whether our code meets our pipeline's requirement(We strongly recommend you run the following command before you commit): + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +To run the e2e-test, we can run the following command: + +```text +docker run --rm -v $(pwd):/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: + +```text +docker run --rm -v ${pwd}:/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +To follow [**Ensure AKS uses disk encryption set**](https://docs.bridgecrew.io/docs/ensure-that-aks-uses-disk-encryption-set) policy we've used `azurerm_key_vault` in example codes, and to follow [**Key vault does not allow firewall rules settings**](https://docs.bridgecrew.io/docs/ensure-that-key-vault-allows-firewall-rules-settings) we've limited the ip cidr on it's `network_acls`. By default we'll use the ip returned by `https://api.ipify.org?format=json` api as your public ip, but in case you need to use another cidr, you can set an environment variable like below: + +```text +docker run --rm -v $(pwd):/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: +```text +docker run --rm -v ${pwd}:/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +#### Prerequisites + +- [Docker](https://www.docker.com/community-edition#/download) + +## Authors + +Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d) + +## License + +[MIT](LICENSE) + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Module Spec + +The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!** + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [azapi](#requirement\_azapi) | >=2.0, < 3.0 | +| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#requirement\_null) | >= 3.0 | +| [time](#requirement\_time) | >= 0.5 | +| [tls](#requirement\_tls) | >= 3.1 | + +## Providers + +| Name | Version | +|------|---------| +| [azapi](#provider\_azapi) | >=2.0, < 3.0 | +| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#provider\_null) | >= 3.0 | +| [time](#provider\_time) | >= 0.5 | +| [tls](#provider\_tls) | >= 3.1 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource | +| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource | +| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | +| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | +| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source | +| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no | +| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no | +| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no | +| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no | +| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no | +| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no | +| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no | +| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no | +| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) |
list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
}))
| `[]` | no | +| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) |
list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
| `[]` | no | +| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no | +| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no | +| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no | +| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no | +| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no | +| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no | +| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no | +| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no | +| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no | +| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no | +| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no | +| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no | +| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no | +| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no | +| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no | +| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no | +| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no | +| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no | +| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no | +| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**

By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no | +| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no | +| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. |
object({
id = string
subnet_id = string
})
| `null` | no | +| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no | +| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no | +| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no | +| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. |
object({
sgx_quote_helper_enabled = bool
})
| `null` | no | +| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no | +| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no | +| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no | +| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no | +| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 |
object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
})
|
{
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
}
| no | +| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no | +| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no | +| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no | +| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no | +| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no | +| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no | +| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. |
object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
})
| `null` | no | +| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. |
object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
})
| `null` | no | +| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no | +| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no | +| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no | +| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no | +| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no | +| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no | +| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no | +| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no | +| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no | +| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. |
object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
})
| `null` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no | +| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no | +| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no | +| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no | +| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no | +| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no | +| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes | +| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. |
object({
id = string
})
| `null` | no | +| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. |
object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
})
| `null` | no | +| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no | +| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no | +| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no | +| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no | +| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. |
object({
identity_ids = optional(set(string))
type = string
})
| `null` | no | +| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no | +| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no | +| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no | +| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no | +| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. |
object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
})
| `null` | no | +| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` |
[
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` |
[
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no | +| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` |
[
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no | +| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) |
object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
})
| `null` | no | +| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no | +| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. |
object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
})
| `null` | no | +| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no | +| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no | +| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no | +| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no | +| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no | +| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. |
object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
})
| `null` | no | +| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no | +| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) |
map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
}))
| `{}` | no | +| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no | +| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no | +| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no | +| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no | +| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no | +| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no | +| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no | +| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no | +| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no | +| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no | +| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no | +| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | +| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no | +| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no | +| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no | +| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no | +| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes | +| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no | +| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no | +| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no | +| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no | +| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no | +| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. |
object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
})
| `null` | no | +| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no | +| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no | +| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no | +| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no | +| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no | +| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no | +| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no | +| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no | +| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no | +| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no | +| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) |
object({
dns_zone_ids = list(string)
})
| `null` | no | +| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. |
object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
})
| `null` | no | +| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. | +| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? | +| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. | +| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. | +| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. | +| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. | +| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) | +| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace | +| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. | +| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. | +| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. | +| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). | +| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. | +| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. | +| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. | +| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? | +| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. | +| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? | +| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. | +| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. | +| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. | +| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. | +| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block | +| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. | +| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. | +| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. | +| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. | +| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? | +| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | +| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. | +| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. | + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md new file mode 100644 index 000000000..869fdfe2b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf new file mode 100644 index 000000000..7f368600b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf @@ -0,0 +1,317 @@ +moved { + from = azurerm_kubernetes_cluster_node_pool.node_pool + to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + for_each = local.node_pools_create_before_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + gpu_instance = each.value.gpu_instance + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + application_security_group_ids = each.value.node_network_profile.application_security_group_ids + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + create_before_destroy = true + ignore_changes = [ + name + ] + replace_triggered_by = [ + null_resource.pool_name_keeper[each.key], + ] + + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + for_each = local.node_pools_create_after_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = each.value.name + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "null_resource" "pool_name_keeper" { + for_each = var.node_pools + + triggers = { + pool_name = each.value.name + } + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids) + error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself." + } + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf new file mode 100644 index 000000000..500f27ece --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf @@ -0,0 +1,17 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf new file mode 100644 index 000000000..2b69dfe13 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf @@ -0,0 +1,74 @@ +locals { + # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. + auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete + # automatic upgrades are either: + # - null + # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null + # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null + automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( + (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || + (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) + ) + cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") + # Abstract the decision whether to create an Analytics Workspace or not. + create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null + create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null + default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) + # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 + existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) + existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] + existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) + existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) + # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 + existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) + existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) + existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) + existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) + ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress + # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. + # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled + # is set to `true`. + log_analytics_workspace = var.log_analytics_workspace_enabled ? ( + # The Log Analytics Workspace should be enabled: + var.log_analytics_workspace == null ? { + # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. + # Create an `azurerm_log_analytics_workspace` resource and use that. + id = local.azurerm_log_analytics_workspace_id + name = local.azurerm_log_analytics_workspace_name + location = local.azurerm_log_analytics_workspace_location + resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name + } : { + # `log_analytics_workspace` is supplied. Let's use that. + id = var.log_analytics_workspace.id + name = var.log_analytics_workspace.name + location = var.log_analytics_workspace.location + # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 + resource_group_name = split("/", var.log_analytics_workspace.id)[4] + } + ) : null # Finally, the Log Analytics Workspace should be disabled. + node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } + node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } + private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) + query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) + subnet_ids = [for _, s in local.subnets : s.id] + subnets = merge({ for k, v in merge( + [ + for key, pool in var.node_pools : { + "${key}-vnet-subnet" : pool.vnet_subnet, + "${key}-pod-subnet" : pool.pod_subnet, + } + ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { + "vnet-subnet" : { + id = var.vnet_subnet.id + } + }) + # subnet_ids = for id in local.potential_subnet_ids : id if id != null + use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null + use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null + valid_private_dns_zone_regexs = [ + "private\\.[a-z0-9]+\\.azmk8s\\.io", + "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + ] +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf new file mode 100644 index 000000000..fe51625be --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf @@ -0,0 +1,124 @@ +resource "azurerm_log_analytics_workspace" "main" { + count = local.create_analytics_workspace ? 1 : 0 + + location = var.location + name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") + resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) + allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions + cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced + daily_quota_gb = var.log_analytics_workspace_daily_quota_gb + data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id + immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled + internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled + internet_query_enabled = var.log_analytics_workspace_internet_query_enabled + local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled + reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day + retention_in_days = var.log_retention_in_days + sku = var.log_analytics_workspace_sku + tags = var.tags + + dynamic "identity" { + for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] + + content { + type = identity.value.type + identity_ids = identity.value.identity_ids + } + } + + lifecycle { + precondition { + condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) + error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." + } + } +} + +locals { + azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) + azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) + azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) + azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) +} + +data "azurerm_log_analytics_workspace" "main" { + count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 + + name = var.log_analytics_workspace.name + resource_group_name = local.log_analytics_workspace.resource_group_name +} + +resource "azurerm_log_analytics_solution" "main" { + count = local.create_analytics_solution ? 1 : 0 + + location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) + resource_group_name = local.log_analytics_workspace.resource_group_name + solution_name = "ContainerInsights" + workspace_name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + tags = var.tags + + plan { + product = "OMSGallery/ContainerInsights" + publisher = "Microsoft" + } +} + +locals { + dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) +} + +resource "azurerm_monitor_data_collection_rule" "dcr" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + location = local.dcr_location + name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" + resource_group_name = var.resource_group_name + description = "DCR for Azure Monitor Container Insights" + tags = var.tags + + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = var.monitor_data_collection_rule_extensions_streams + } + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = ["Microsoft-Syslog"] + } + destinations { + log_analytics { + name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + } + } + data_sources { + extension { + extension_name = "ContainerInsights" + name = "ContainerInsightsExtension" + streams = var.monitor_data_collection_rule_extensions_streams + extension_json = jsonencode({ + "dataCollectionSettings" : { + interval = var.data_collection_settings.data_collection_interval + namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection + namespaces = var.data_collection_settings.namespaces_for_data_collection + enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled + } + }) + } + syslog { + facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities + log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels + name = "sysLogsDataSource" + streams = ["Microsoft-Syslog"] + } + } +} + +resource "azurerm_monitor_data_collection_rule_association" "dcra" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + target_resource_id = azurerm_kubernetes_cluster.main.id + data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id + description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." + name = "ContainerInsightsExtension" +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf new file mode 100644 index 000000000..0a8dc8e59 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf @@ -0,0 +1,741 @@ +moved { + from = module.ssh-key.tls_private_key.ssh + to = tls_private_key.ssh[0] +} + +resource "tls_private_key" "ssh" { + count = var.admin_username == null ? 0 : 1 + + algorithm = "RSA" + rsa_bits = 2048 +} + +resource "azurerm_kubernetes_cluster" "main" { + location = var.location + name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}" + resource_group_name = var.resource_group_name + azure_policy_enabled = var.azure_policy_enabled + cost_analysis_enabled = var.cost_analysis_enabled + disk_encryption_set_id = var.disk_encryption_set_id + dns_prefix = var.prefix + dns_prefix_private_cluster = var.dns_prefix_private_cluster + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + kubernetes_version = var.kubernetes_version + local_account_disabled = var.local_account_disabled + node_resource_group = var.node_resource_group + oidc_issuer_enabled = var.oidc_issuer_enabled + open_service_mesh_enabled = var.open_service_mesh_enabled + private_cluster_enabled = var.private_cluster_enabled + private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled + private_dns_zone_id = var.private_dns_zone_id + role_based_access_control_enabled = var.role_based_access_control_enabled + run_command_enabled = var.run_command_enabled + sku_tier = var.sku_tier + support_plan = var.support_plan + tags = var.tags + workload_identity_enabled = var.workload_identity_enabled + + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = var.node_network_profile == null ? [] : [var.node_network_profile] + + content { + application_security_group_ids = node_network_profile.value.application_security_group_ids + node_public_ip_tags = node_network_profile.value.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] + + content { + subnet_name = var.aci_connector_linux_subnet_name + } + } + dynamic "api_server_access_profile" { + for_each = var.api_server_authorized_ip_ranges != null ? [ + "api_server_access_profile" + ] : [] + + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : [] + + content { + balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups + empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max + expander = var.auto_scaler_profile_expander + max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec + max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time + max_unready_nodes = var.auto_scaler_profile_max_unready_nodes + max_unready_percentage = var.auto_scaler_profile_max_unready_percentage + new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay + scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add + scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete + scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure + scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded + scale_down_unready = var.auto_scaler_profile_scale_down_unready + scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold + scan_interval = var.auto_scaler_profile_scan_interval + skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage + skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : [] + + content { + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled + managed = true + tenant_id = var.rbac_aad_tenant_id + } + } + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + dynamic "http_proxy_config" { + for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"] + + content { + http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy) + https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy) + no_proxy = var.http_proxy_config.no_proxy + trusted_ca = var.http_proxy_config.trusted_ca + } + } + dynamic "identity" { + for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : [] + + content { + type = var.identity_type + identity_ids = var.identity_ids + } + } + dynamic "ingress_application_gateway" { + for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : [] + + content { + gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null) + gateway_name = try(var.green_field_application_gateway_for_ingress.name, null) + subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null) + subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null) + } + } + dynamic "key_management_service" { + for_each = var.kms_enabled ? ["key_management_service"] : [] + + content { + key_vault_key_id = var.kms_key_vault_key_id + key_vault_network_access = var.kms_key_vault_network_access + } + } + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] + + content { + secret_rotation_enabled = var.secret_rotation_enabled + secret_rotation_interval = var.secret_rotation_interval + } + } + dynamic "kubelet_identity" { + for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] + + content { + client_id = kubelet_identity.value.client_id + object_id = kubelet_identity.value.object_id + user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id + } + } + dynamic "linux_profile" { + for_each = var.admin_username == null ? [] : ["linux_profile"] + + content { + admin_username = var.admin_username + + ssh_key { + key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "") + } + } + } + dynamic "maintenance_window" { + for_each = var.maintenance_window != null ? ["maintenance_window"] : [] + + content { + dynamic "allowed" { + for_each = var.maintenance_window.allowed + + content { + day = allowed.value.day + hours = allowed.value.hours + } + } + dynamic "not_allowed" { + for_each = var.maintenance_window.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + + content { + duration = maintenance_window_auto_upgrade.value.duration + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + start_date = maintenance_window_auto_upgrade.value.start_date + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + week_index = maintenance_window_auto_upgrade.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "microsoft_defender" { + for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + } + } + dynamic "monitor_metrics" { + for_each = var.monitor_metrics != null ? ["monitor_metrics"] : [] + + content { + annotations_allowed = var.monitor_metrics.annotations_allowed + labels_allowed = var.monitor_metrics.labels_allowed + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + ebpf_data_plane = var.ebpf_data_plane + ip_versions = var.network_ip_versions + load_balancer_sku = var.load_balancer_sku + network_data_plane = var.network_data_plane + network_mode = var.network_mode + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + pod_cidrs = var.net_profile_pod_cidrs + service_cidr = var.net_profile_service_cidr + service_cidrs = var.net_profile_service_cidrs + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + dynamic "nat_gateway_profile" { + for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile] + + content { + idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes + managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count + } + } + } + dynamic "oms_agent" { + for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled + } + } + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + + content { + mode = var.service_mesh_profile.mode + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + } + } + dynamic "service_principal" { + for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] + + content { + client_id = var.client_id + client_secret = var.client_secret + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile_blob_driver_enabled + disk_driver_enabled = var.storage_profile_disk_driver_enabled + disk_driver_version = var.storage_profile_disk_driver_version + file_driver_enabled = var.storage_profile_file_driver_enabled + snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled + } + } + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_ids + } + } + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] + + content { + keda_enabled = workload_autoscaler_profile.value.keda_enabled + vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled + } + } + + depends_on = [ + null_resource.pool_name_keeper, + ] + + lifecycle { + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + ] + replace_triggered_by = [ + null_resource.kubernetes_cluster_name_keeper.id + ] + + precondition { + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "") + error_message = "Either `client_id` and `client_secret` or `identity_type` must be set." + } + precondition { + # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128 + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0) + error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well." + } + precondition { + condition = var.identity_ids == null || var.client_id == "" + error_message = "Cannot set both `client_id` and `identity_ids`." + } + precondition { + condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium") + error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled." + } + precondition { + condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled) + error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true." + } + precondition { + condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard") + error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`" + } + precondition { + condition = local.automatic_channel_upgrade_check + error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`." + } + precondition { + condition = !(var.kms_enabled && var.identity_type != "UserAssigned") + error_message = "KMS etcd encryption doesn't work with system-assigned managed identity." + } + precondition { + condition = !var.workload_identity_enabled || var.oidc_issuer_enabled + error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity" + } + precondition { + condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure" + error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure." + } + precondition { + condition = var.network_policy != "azure" || var.network_plugin == "azure" + error_message = "network_policy must be `azure` when network_plugin is `azure`" + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure" + error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure." + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null + error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified." + } + precondition { + condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster)) + error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`." + } + precondition { + condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage" + error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`." + } + precondition { + condition = (var.kubelet_identity == null) || ( + (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0) + error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set." + } + precondition { + condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets" + error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes." + } + precondition { + condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null + error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`." + } + precondition { + condition = var.prefix == null || var.dns_prefix_private_cluster == null + error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled + error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != "" + error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone" + } + precondition { + condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)])) + error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`" + } + } +} + +resource "null_resource" "kubernetes_cluster_name_keeper" { + triggers = { + name = local.cluster_name + } +} + +resource "null_resource" "kubernetes_version_keeper" { + triggers = { + version = var.kubernetes_version + } +} + +resource "time_sleep" "interval_before_cluster_update" { + count = var.interval_before_cluster_update == null ? 0 : 1 + + create_duration = var.interval_before_cluster_update + + depends_on = [ + azurerm_kubernetes_cluster.main, + ] + + lifecycle { + replace_triggered_by = [ + null_resource.kubernetes_version_keeper.id, + ] + } +} + +resource "azapi_update_resource" "aks_cluster_post_create" { + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + kubernetesVersion = var.kubernetes_version + } + } + + depends_on = [ + time_sleep.interval_before_cluster_update, + ] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.kubernetes_version_keeper.id] + } +} + +resource "null_resource" "http_proxy_config_no_proxy_keeper" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + triggers = { + http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "") + } +} + +resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + httpProxyConfig = { + noProxy = var.http_proxy_config.no_proxy + } + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id] + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf new file mode 100644 index 000000000..a1f537658 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf @@ -0,0 +1,6 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster" "main" { + automatic_channel_upgrade = var.automatic_channel_upgrade + node_os_channel_upgrade = var.node_os_channel_upgrade +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf new file mode 100644 index 000000000..e3d37ce76 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf @@ -0,0 +1,231 @@ +output "aci_connector_linux" { + description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource." + value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null) +} + +output "aci_connector_linux_enabled" { + description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?" + value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0]) +} + +output "admin_client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "") +} + +output "admin_client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "") +} + +output "admin_cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "") +} + +output "admin_host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "") +} + +output "admin_password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "") +} + +output "admin_username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "") +} + +output "aks_id" { + description = "The `azurerm_kubernetes_cluster`'s id." + value = azurerm_kubernetes_cluster.main.id +} + +output "aks_name" { + description = "The `azurerm_kubernetes_cluster`'s name." + value = azurerm_kubernetes_cluster.main.name +} + +output "azure_policy_enabled" { + description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)" + value = azurerm_kubernetes_cluster.main.azure_policy_enabled +} + +output "azurerm_log_analytics_workspace_id" { + description = "The id of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].id, null) +} + +output "azurerm_log_analytics_workspace_name" { + description = "The name of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].name, null) +} + +output "azurerm_log_analytics_workspace_primary_shared_key" { + description = "Specifies the workspace key of the log analytics workspace" + sensitive = true + value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null) +} + +output "client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate +} + +output "client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_key +} + +output "cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate +} + +output "cluster_fqdn" { + description = "The FQDN of the Azure Kubernetes Managed Cluster." + value = azurerm_kubernetes_cluster.main.fqdn +} + +output "cluster_identity" { + description = "The `azurerm_kubernetes_cluster`'s `identity` block." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.identity[0], null) +} + +output "cluster_portal_fqdn" { + description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.portal_fqdn +} + +output "cluster_private_fqdn" { + description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.private_fqdn +} + +output "generated_cluster_private_ssh_key" { + description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null +} + +output "generated_cluster_public_ssh_key" { + description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)." + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null +} + +output "host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].host +} + +output "http_application_routing_zone_name" { + description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing." + value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : "" +} + +output "ingress_application_gateway" { + description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block." + value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null) +} + +output "ingress_application_gateway_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?" + value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0]) +} + +output "key_vault_secrets_provider" { + description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block." + value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null) +} + +output "key_vault_secrets_provider_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?" + value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0]) +} + +output "kube_admin_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_admin_config_raw +} + +output "kube_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config_raw +} + +output "kubelet_identity" { + description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block." + value = azurerm_kubernetes_cluster.main.kubelet_identity +} + +output "location" { + description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created." + value = azurerm_kubernetes_cluster.main.location +} + +output "network_profile" { + description = "The `azurerm_kubernetes_cluster`'s `network_profile` block" + value = azurerm_kubernetes_cluster.main.network_profile +} + +output "node_resource_group" { + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group +} + +output "node_resource_group_id" { + description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group_id +} + +output "oidc_issuer_url" { + description = "The OIDC issuer URL that is associated with the cluster." + value = azurerm_kubernetes_cluster.main.oidc_issuer_url +} + +output "oms_agent" { + description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument." + value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null) +} + +output "oms_agent_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?" + value = can(azurerm_kubernetes_cluster.main.oms_agent[0]) +} + +output "open_service_mesh_enabled" { + description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." + value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled +} + +output "password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].password +} + +output "username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].username +} + +output "web_app_routing_identity" { + description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object." + value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, []) +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf new file mode 100644 index 000000000..e9601eaf0 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf @@ -0,0 +1,126 @@ +resource "azurerm_role_assignment" "acr" { + for_each = var.attached_acr_id_map + + principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id + scope = each.value + role_definition_name = "AcrPull" + skip_service_principal_aad_check = true +} + +# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity +data "azurerm_user_assigned_identity" "cluster_identity" { + count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 + + name = split("/", var.identity_ids[0])[8] + resource_group_name = split("/", var.identity_ids[0])[4] +} + +# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) +# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets +# used by the system node pool and by any additional node pools. +# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites +# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites +# https://github.com/Azure/terraform-azurerm-aks/issues/178 +resource "azurerm_role_assignment" "network_contributor" { + for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value.id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +resource "azurerm_role_assignment" "network_contributor_on_subnet" { + for_each = var.network_contributor_role_assigned_subnet_ids + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +data "azurerm_client_config" "this" {} + +data "azurerm_virtual_network" "application_gateway_vnet" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_subnet_vnet_name + resource_group_name = local.existing_application_gateway_subnet_resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = data.azurerm_virtual_network.application_gateway_vnet[0].id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress + error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." + } + } +} + +resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) + error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." + } + } +} + +resource "azurerm_role_assignment" "existing_application_gateway_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = var.brown_field_application_gateway_for_ingress.id + role_definition_name = "Contributor" + + lifecycle { + precondition { + condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress + error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." + } + } +} + +data "azurerm_resource_group" "ingress_gw" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_resource_group_for_ingress +} + +data "azurerm_resource_group" "aks_rg" { + count = var.create_role_assignments_for_application_gateway ? 1 : 0 + + name = var.resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { + count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id + role_definition_name = "Reader" +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile new file mode 100644 index 000000000..7f28c53a5 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile @@ -0,0 +1,85 @@ +REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" + +fmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash + +fumpt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash + +gosec: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash + +tffmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash + +tffmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash + +tfvalidatecheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash + +terrafmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash + +gofmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash + +golint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash + +tflint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash + +lint: golint tflint gosec + +checkovcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash + +checkovplancheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash + +fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck + +pr-check: depscheck fmtcheck lint unit-test checkovcheck + +unit-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash + +e2e-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash + +version-upgrade-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash + +terrafmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash + +pre-commit: tffmt terrafmt depsensure fmt fumpt generate + +depsensure: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash + +depscheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash + +generate: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash + +gencheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash + +yor-tag: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash + +autofix: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash + +test: fmtcheck + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash + +build-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash + +.PHONY: fmt fmtcheck pr-check \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf new file mode 100644 index 000000000..c819f9b89 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf @@ -0,0 +1,1601 @@ +variable "location" { + type = string + description = "Location of cluster, if not defined it will be read from the resource-group" +} + +variable "resource_group_name" { + type = string + description = "The existing resource group name to use" +} + +variable "aci_connector_linux_enabled" { + type = bool + default = false + description = "Enable Virtual Node pool" +} + +variable "aci_connector_linux_subnet_name" { + type = string + default = null + description = "(Optional) aci_connector_linux subnet name" +} + +variable "admin_username" { + type = string + default = null + description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created." +} + +variable "agents_availability_zones" { + type = list(string) + default = null + description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." +} + +variable "agents_count" { + type = number + default = 2 + description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes." +} + +variable "agents_labels" { + type = map(string) + default = {} + description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." +} + +variable "agents_max_count" { + type = number + default = null + description = "Maximum number of nodes in a pool" +} + +variable "agents_max_pods" { + type = number + default = null + description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." +} + +variable "agents_min_count" { + type = number + default = null + description = "Minimum number of nodes in a pool" +} + +variable "agents_pool_drain_timeout_in_minutes" { + type = number + default = null + description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." +} + +variable "agents_pool_kubelet_configs" { + type = list(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool, true) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_line = optional(number) + pod_max_pid = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_linux_os_configs" { + type = list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })), []) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })), []) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_max_surge" { + type = string + default = "10%" + description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." +} + +variable "agents_pool_name" { + type = string + default = "nodepool" + description = "The default Azure AKS agentpool (nodepool) name." + nullable = false +} + +variable "agents_pool_node_soak_duration_in_minutes" { + type = number + default = 0 + description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." +} + +variable "agents_proximity_placement_group_id" { + type = string + default = null + description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." +} + +variable "agents_size" { + type = string + default = "Standard_D2s_v3" + description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." +} + +variable "agents_tags" { + type = map(string) + default = {} + description = "(Optional) A mapping of tags to assign to the Node Pool." +} + +variable "agents_type" { + type = string + default = "VirtualMachineScaleSets" + description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." +} + +variable "api_server_authorized_ip_ranges" { + type = set(string) + default = null + description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes." +} + +variable "attached_acr_id_map" { + type = map(string) + default = {} + description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created." + nullable = false +} + +variable "auto_scaler_profile_balance_similar_node_groups" { + type = bool + default = false + description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`." +} + +variable "auto_scaler_profile_empty_bulk_delete_max" { + type = number + default = 10 + description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`." +} + +variable "auto_scaler_profile_enabled" { + type = bool + default = false + description = "Enable configuring the auto scaler profile" + nullable = false +} + +variable "auto_scaler_profile_expander" { + type = string + default = "random" + description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`." + + validation { + condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander) + error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`." + } +} + +variable "auto_scaler_profile_max_graceful_termination_sec" { + type = string + default = "600" + description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`." +} + +variable "auto_scaler_profile_max_node_provisioning_time" { + type = string + default = "15m" + description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`." +} + +variable "auto_scaler_profile_max_unready_nodes" { + type = number + default = 3 + description = "Maximum Number of allowed unready nodes. Defaults to `3`." +} + +variable "auto_scaler_profile_max_unready_percentage" { + type = number + default = 45 + description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`." +} + +variable "auto_scaler_profile_new_pod_scale_up_delay" { + type = string + default = "10s" + description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`." +} + +variable "auto_scaler_profile_scale_down_delay_after_add" { + type = string + default = "10m" + description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_delay_after_delete" { + type = string + default = null + description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`." +} + +variable "auto_scaler_profile_scale_down_delay_after_failure" { + type = string + default = "3m" + description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`." +} + +variable "auto_scaler_profile_scale_down_unneeded" { + type = string + default = "10m" + description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_unready" { + type = string + default = "20m" + description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`." +} + +variable "auto_scaler_profile_scale_down_utilization_threshold" { + type = string + default = "0.5" + description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`." +} + +variable "auto_scaler_profile_scan_interval" { + type = string + default = "10s" + description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`." +} + +variable "auto_scaler_profile_skip_nodes_with_local_storage" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`." +} + +variable "auto_scaler_profile_skip_nodes_with_system_pods" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`." +} + +variable "automatic_channel_upgrade" { + type = string + default = null + description = <<-EOT + (Optional) Defines the automatic upgrade channel for the AKS cluster. + Possible values: + * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").** + * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.** + + By default, automatic upgrades are disabled. + More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster + EOT + + validation { + condition = var.automatic_channel_upgrade == null ? true : contains([ + "patch", "stable", "rapid", "node-image" + ], var.automatic_channel_upgrade) + error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`." + } +} + +variable "azure_policy_enabled" { + type = bool + default = false + description = "Enable Azure Policy Addon." +} + +variable "brown_field_application_gateway_for_ingress" { + type = object({ + id = string + subnet_id = string + }) + default = null + description = <<-EOT + [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing) + * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress. + * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. + EOT +} + +variable "client_id" { + type = string + default = "" + description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment" + nullable = false +} + +variable "client_secret" { + type = string + default = "" + description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment" + nullable = false + sensitive = true +} + +variable "cluster_log_analytics_workspace_name" { + type = string + default = null + description = "(Optional) The name of the Analytics workspace" +} + +variable "cluster_name" { + type = string + default = null + description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)" +} + +variable "cluster_name_random_suffix" { + type = bool + default = false + description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict." + nullable = false +} + +variable "confidential_computing" { + type = object({ + sgx_quote_helper_enabled = bool + }) + default = null + description = "(Optional) Enable Confidential Computing." +} + +variable "cost_analysis_enabled" { + type = bool + default = false + description = "(Optional) Enable Cost Analysis." +} + +variable "create_monitor_data_collection_rule" { + type = bool + default = true + description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`." + nullable = false +} + +variable "create_role_assignment_network_contributor" { + type = bool + default = false + description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster" + nullable = false +} + +variable "create_role_assignments_for_application_gateway" { + type = bool + default = true + description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`." + nullable = false +} + +variable "data_collection_settings" { + type = object({ + data_collection_interval = string + namespace_filtering_mode_for_data_collection = string + namespaces_for_data_collection = list(string) + container_log_v2_enabled = bool + }) + default = { + data_collection_interval = "1m" + namespace_filtering_mode_for_data_collection = "Off" + namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"] + container_log_v2_enabled = true + } + description = <<-EOT + `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m. + `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection. + `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode. + `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs. + See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 + EOT +} + +variable "default_node_pool_fips_enabled" { + type = bool + default = null + description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." +} + +variable "disk_encryption_set_id" { + type = string + default = null + description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created." +} + +variable "dns_prefix_private_cluster" { + type = string + default = null + description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created." +} + +variable "ebpf_data_plane" { + type = string + default = null + description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created." +} + +variable "enable_auto_scaling" { + type = bool + default = false + description = "Enable node pool autoscaling" +} + +variable "enable_host_encryption" { + type = bool + default = false + description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" +} + +variable "enable_node_public_ip" { + type = bool + default = false + description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." +} + +variable "green_field_application_gateway_for_ingress" { + type = object({ + name = optional(string) + subnet_cidr = optional(string) + subnet_id = optional(string) + }) + default = null + description = <<-EOT + [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new) + * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. +EOT + + validation { + condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr))) + error_message = "One of `subnet_cidr` and `subnet_id` must be specified." + } +} + +variable "http_proxy_config" { + type = object({ + http_proxy = optional(string) + https_proxy = optional(string) + no_proxy = optional(list(string)) + trusted_ca = optional(string) + }) + default = null + description = <<-EOT + optional(object({ + http_proxy = (Optional) The proxy address to be used when communicating over HTTP. + https_proxy = (Optional) The proxy address to be used when communicating over HTTPS. + no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field. + trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format. + })) + Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. +EOT + + validation { + condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)) + error_message = "`http_proxy` and `https_proxy` cannot be both empty." + } +} + +variable "identity_ids" { + type = list(string) + default = null + description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster." +} + +variable "identity_type" { + type = string + default = "SystemAssigned" + description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well." + + validation { + condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned" + error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`" + } +} + +variable "image_cleaner_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether Image Cleaner is enabled." +} + +variable "image_cleaner_interval_hours" { + type = number + default = 48 + description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`." +} + +variable "interval_before_cluster_update" { + type = string + default = "30s" + description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update." +} + +variable "key_vault_secrets_provider_enabled" { + type = bool + default = false + description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver" + nullable = false +} + +variable "kms_enabled" { + type = bool + default = false + description = "(Optional) Enable Azure KeyVault Key Management Service." + nullable = false +} + +variable "kms_key_vault_key_id" { + type = string + default = null + description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier." +} + +variable "kms_key_vault_network_access" { + type = string + default = "Public" + description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`." + + validation { + condition = contains(["Private", "Public"], var.kms_key_vault_network_access) + error_message = "Possible values are `Private` and `Public`" + } +} + +variable "kubelet_identity" { + type = object({ + client_id = optional(string) + object_id = optional(string) + user_assigned_identity_id = optional(string) + }) + default = null + description = <<-EOT + - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. +EOT +} + +variable "kubernetes_version" { + type = string + default = null + description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region" +} + +variable "load_balancer_profile_enabled" { + type = bool + default = false + description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." + nullable = false +} + +variable "load_balancer_profile_idle_timeout_in_minutes" { + type = number + default = 30 + description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." +} + +variable "load_balancer_profile_managed_outbound_ip_count" { + type = number + default = null + description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" +} + +variable "load_balancer_profile_managed_outbound_ipv6_count" { + type = number + default = null + description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" +} + +variable "load_balancer_profile_outbound_ip_address_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ip_prefix_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ports_allocated" { + type = number + default = 0 + description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" +} + +variable "load_balancer_sku" { + type = string + default = "standard" + description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." + + validation { + condition = contains(["basic", "standard"], var.load_balancer_sku) + error_message = "Possible values are `basic` and `standard`" + } +} + +variable "local_account_disabled" { + type = bool + default = null + description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information." +} + +variable "log_analytics_solution" { + type = object({ + id = string + }) + default = null + description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution." + + validation { + condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != "" + error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`." + } +} + +variable "log_analytics_workspace" { + type = object({ + id = string + name = string + location = optional(string) + resource_group_name = optional(string) + }) + default = null + description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace." +} + +variable "log_analytics_workspace_allow_resource_only_permissions" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`." +} + +variable "log_analytics_workspace_cmk_for_query_forced" { + type = bool + default = null + description = "(Optional) Is Customer Managed Storage mandatory for query management?" +} + +variable "log_analytics_workspace_daily_quota_gb" { + type = number + default = null + description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted." +} + +variable "log_analytics_workspace_data_collection_rule_id" { + type = string + default = null + description = "(Optional) The ID of the Data Collection Rule to use for this workspace." +} + +variable "log_analytics_workspace_enabled" { + type = bool + default = true + description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard" + nullable = false +} + +variable "log_analytics_workspace_identity" { + type = object({ + identity_ids = optional(set(string)) + type = string + }) + default = null + description = <<-EOT + - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`. + - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. +EOT +} + +variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" { + type = bool + default = null + description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days." +} + +variable "log_analytics_workspace_internet_ingestion_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_internet_query_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_local_authentication_disabled" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`." +} + +variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" { + type = number + default = null + description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`." +} + +variable "log_analytics_workspace_resource_group_name" { + type = string + default = null + description = "(Optional) Resource group name to create azurerm_log_analytics_solution." +} + +variable "log_analytics_workspace_sku" { + type = string + default = "PerGB2018" + description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018" +} + +variable "log_retention_in_days" { + type = number + default = 30 + description = "The retention period for the logs in days" +} + +variable "maintenance_window" { + type = object({ + allowed = optional(list(object({ + day = string + hours = set(number) + })), [ + ]), + not_allowed = optional(list(object({ + end = string + start = string + })), []), + }) + default = null + description = "(Optional) Maintenance configuration of the managed cluster." +} + +variable "maintenance_window_auto_upgrade" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "maintenance_window_node_os" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "microsoft_defender_enabled" { + type = bool + default = false + description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`." + nullable = false +} + +variable "monitor_data_collection_rule_data_sources_syslog_facilities" { + type = list(string) + default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"] + description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog" +} + +variable "monitor_data_collection_rule_data_sources_syslog_levels" { + type = list(string) + default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] + description = "List of syslog levels" +} + +variable "monitor_data_collection_rule_extensions_streams" { + type = list(any) + default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"] + description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr" +} + +variable "monitor_metrics" { + type = object({ + annotations_allowed = optional(string) + labels_allowed = optional(string) + }) + default = null + description = <<-EOT + (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster + object({ + annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric." + labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric." + }) +EOT +} + +variable "msi_auth_for_monitoring_enabled" { + type = bool + default = null + description = "(Optional) Is managed identity authentication for monitoring enabled?" +} + +variable "nat_gateway_profile" { + type = object({ + idle_timeout_in_minutes = optional(number) + managed_outbound_ip_count = optional(number) + }) + default = null + description = <<-EOT + `nat_gateway_profile` block supports the following: + - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`. + - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. +EOT +} + +variable "net_profile_dns_service_ip" { + type = string + default = null + description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created." +} + +variable "net_profile_outbound_type" { + type = string + default = "loadBalancer" + description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer." +} + +variable "net_profile_pod_cidr" { + type = string + default = null + description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created." +} + +variable "net_profile_pod_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidr" { + type = string + default = null + description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "network_contributor_role_assigned_subnet_ids" { + type = map(string) + default = {} + description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id" + nullable = false +} + +variable "network_data_plane" { + type = string + default = null + description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created." +} + +variable "network_ip_versions" { + type = list(string) + default = null + description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created." +} + +variable "network_mode" { + type = string + default = null + description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created." +} + +variable "network_plugin" { + type = string + default = "kubenet" + description = "Network plugin to use for networking." + nullable = false +} + +variable "network_plugin_mode" { + type = string + default = null + description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created." +} + +variable "network_policy" { + type = string + default = null + description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created." +} + +variable "node_network_profile" { + type = object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + }) + default = null + description = <<-EOT + - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. +--- + An `allowed_host_ports` block supports the following: + - `port_start`: (Optional) Specifies the start of the port range. + - `port_end`: (Optional) Specifies the end of the port range. + - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. +EOT +} + +variable "node_os_channel_upgrade" { + type = string + default = null + description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." +} + +variable "node_pools" { + type = map(object({ + name = string + node_count = optional(number) + tags = optional(map(string)) + vm_size = string + host_group_id = optional(string) + capacity_reservation_group_id = optional(string) + custom_ca_trust_enabled = optional(bool) + enable_auto_scaling = optional(bool) + enable_host_encryption = optional(bool) + enable_node_public_ip = optional(bool) + eviction_policy = optional(string) + gpu_instance = optional(string) + kubelet_config = optional(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_files = optional(number) + pod_max_pid = optional(number) + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + fips_enabled = optional(bool) + kubelet_disk_type = optional(string) + max_count = optional(number) + max_pods = optional(number) + message_of_the_day = optional(string) + mode = optional(string, "User") + min_count = optional(number) + node_network_profile = optional(object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + })) + node_labels = optional(map(string)) + node_public_ip_prefix_id = optional(string) + node_taints = optional(list(string)) + orchestrator_version = optional(string) + os_disk_size_gb = optional(number) + os_disk_type = optional(string, "Managed") + os_sku = optional(string) + os_type = optional(string, "Linux") + pod_subnet = optional(object({ + id = string + }), null) + priority = optional(string, "Regular") + proximity_placement_group_id = optional(string) + spot_max_price = optional(number) + scale_down_mode = optional(string, "Delete") + snapshot_id = optional(string) + ultra_ssd_enabled = optional(bool) + vnet_subnet = optional(object({ + id = string + }), null) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = optional(string) + zones = optional(set(string)) + create_before_destroy = optional(bool, true) + })) + default = {} + description = <<-EOT + A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: + map(object({ + name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. + node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. + tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. + vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. + enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). + enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. + eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. + gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + kubelet_config = optional(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + })) + fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). + kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. + max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. + max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. + min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + node_network_profile = optional(object({ + node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. + allowed_host_ports = optional(object({ + port_start = (Optional) Specifies the start of the port range. + port_end = (Optional) Specifies the end of the port range. + protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. + })) + })) + node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. + node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. + node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. + orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. + os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. + os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + pod_subnet = optional(object({ + id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + })) + priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. + proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). + spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. + scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. + snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. + vnet_subnet = optional(object({ + id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. + })) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) + zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. + })) + EOT + nullable = false +} + +variable "node_resource_group" { + type = string + default = null + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created." +} + +variable "oidc_issuer_enabled" { + type = bool + default = false + description = "Enable or Disable the OIDC issuer URL. Defaults to false." +} + +variable "oms_agent_enabled" { + type = bool + default = true + description = "Enable OMS Agent Addon." + nullable = false +} + +variable "only_critical_addons_enabled" { + type = bool + default = null + description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." +} + +variable "open_service_mesh_enabled" { + type = bool + default = null + description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." +} + +variable "orchestrator_version" { + type = string + default = null + description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" +} + +variable "os_disk_size_gb" { + type = number + default = 50 + description = "Disk size of nodes in GBs." +} + +variable "os_disk_type" { + type = string + default = "Managed" + description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." + nullable = false +} + +variable "os_sku" { + type = string + default = null + description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." +} + +variable "pod_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "prefix" { + type = string + default = "" + description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." +} + +variable "private_cluster_enabled" { + type = bool + default = false + description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet." +} + +variable "private_cluster_public_fqdn_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`." +} + +variable "private_dns_zone_id" { + type = string + default = null + description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created." +} + +variable "public_ssh_key" { + type = string + default = "" + description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created." +} + +variable "rbac_aad" { + type = bool + default = true + description = "(Optional) Is Azure Active Directory integration enabled?" + nullable = false +} + +variable "rbac_aad_admin_group_object_ids" { + type = list(string) + default = null + description = "Object ID of groups with admin access." +} + +variable "rbac_aad_azure_rbac_enabled" { + type = bool + default = null + description = "(Optional) Is Role Based Access Control based on Azure AD enabled?" +} + +variable "rbac_aad_tenant_id" { + type = string + default = null + description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used." +} + +variable "role_based_access_control_enabled" { + type = bool + default = false + description = "Enable Role Based Access Control." + nullable = false +} + +variable "run_command_enabled" { + type = bool + default = true + description = "(Optional) Whether to enable run command for the cluster or not." +} + +variable "scale_down_mode" { + type = string + default = "Delete" + description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." +} + +variable "secret_rotation_enabled" { + type = bool + default = false + description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`" + nullable = false +} + +variable "secret_rotation_interval" { + type = string + default = "2m" + description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`" + nullable = false +} + +variable "service_mesh_profile" { + type = object({ + mode = string + internal_ingress_gateway_enabled = optional(bool, true) + external_ingress_gateway_enabled = optional(bool, true) + }) + default = null + description = <<-EOT + `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. + `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. + `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. + EOT +} + +variable "sku_tier" { + type = string + default = "Free" + description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`" + + validation { + condition = contains(["Free", "Standard", "Premium"], var.sku_tier) + error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0." + } +} + +variable "snapshot_id" { + type = string + default = null + description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." +} + +variable "storage_profile_blob_driver_enabled" { + type = bool + default = false + description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`" +} + +variable "storage_profile_disk_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_disk_driver_version" { + type = string + default = "v1" + description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`." +} + +variable "storage_profile_enabled" { + type = bool + default = false + description = "Enable storage profile" + nullable = false +} + +variable "storage_profile_file_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the File CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_snapshot_controller_enabled" { + type = bool + default = true + description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`" +} + +variable "support_plan" { + type = string + default = "KubernetesOfficial" + description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." + + validation { + condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan) + error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`." + } +} + +variable "tags" { + type = map(string) + default = {} + description = "Any tags that should be present on the AKS cluster resources" +} + +variable "temporary_name_for_rotation" { + type = string + default = null + description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" +} + +variable "ultra_ssd_enabled" { + type = bool + default = false + description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." +} + +variable "vnet_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "web_app_routing" { + type = object({ + dns_zone_ids = list(string) + }) + default = null + description = <<-EOT + object({ + dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list." + }) +EOT +} + +variable "workload_autoscaler_profile" { + type = object({ + keda_enabled = optional(bool, false) + vertical_pod_autoscaler_enabled = optional(bool, false) + }) + default = null + description = <<-EOT + `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads. + `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. +EOT +} + +variable "workload_identity_enabled" { + type = bool + default = false + description = "Enable or Disable Workload Identity. Defaults to false." +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf new file mode 100644 index 000000000..7859b9fae --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + azapi = { + source = "Azure/azapi" + version = ">=2.0, < 3.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = ">= 3.107.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.5" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.1" + } + } +} From 6ed03aaae02d15afd0132f5861cdd5cd728077d8 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:35:37 +0530 Subject: [PATCH 11/36] remove remote k8smodule --- .../0.2/k8scluster/.checkov_config.yaml | 30 - .../azure_aks/0.2/k8scluster/CHANGELOG-v4.md | 20 - .../azure_aks/0.2/k8scluster/CHANGELOG-v5.md | 31 - .../azure_aks/0.2/k8scluster/CHANGELOG-v6.md | 122 -- .../azure_aks/0.2/k8scluster/CHANGELOG-v7.md | 93 - .../azure_aks/0.2/k8scluster/CHANGELOG-v8.md | 27 - .../azure_aks/0.2/k8scluster/CHANGELOG-v9.md | 76 - .../azure_aks/0.2/k8scluster/CHANGELOG.md | 5 - .../0.2/k8scluster/CODE_OF_CONDUCT.md | 5 - .../azure_aks/0.2/k8scluster/GNUmakefile | 4 - .../azure_aks/0.2/k8scluster/LICENSE | 21 - .../0.2/k8scluster/NoticeOnUpgradeTov10.0.md | 53 - .../0.2/k8scluster/NoticeOnUpgradeTov5.0.md | 93 - .../0.2/k8scluster/NoticeOnUpgradeTov6.0.md | 5 - .../0.2/k8scluster/NoticeOnUpgradeTov7.0.md | 52 - .../0.2/k8scluster/NoticeOnUpgradeTov8.0.md | 53 - .../0.2/k8scluster/NoticeOnUpgradeTov9.0.md | 9 - .../azure_aks/0.2/k8scluster/README.md | 490 ----- .../azure_aks/0.2/k8scluster/SECURITY.md | 41 - .../0.2/k8scluster/extra_node_pool.tf | 317 ---- .../k8scluster/extra_node_pool_override.tf | 17 - .../azure_aks/0.2/k8scluster/locals.tf | 74 - .../azure_aks/0.2/k8scluster/log_analytics.tf | 124 -- .../azure_aks/0.2/k8scluster/main.tf | 741 -------- .../azure_aks/0.2/k8scluster/main_override.tf | 6 - .../azure_aks/0.2/k8scluster/outputs.tf | 231 --- .../0.2/k8scluster/role_assignments.tf | 126 -- .../azure_aks/0.2/k8scluster/tfvmmakefile | 85 - .../azure_aks/0.2/k8scluster/variables.tf | 1601 ----------------- .../azure_aks/0.2/k8scluster/versions.tf | 26 - 30 files changed, 4578 deletions(-) delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml deleted file mode 100644 index b39c33402..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -block-list-secret-scan: [] -branch: master -directory: - - ./ -download-external-modules: false -evaluate-variables: true -external-modules-download-path: .external_modules -framework: - - all -quiet: true -secrets-scan-file-type: [] -skip-check: - - CKV_GHA_3 - - CKV_AZURE_5 - - CKV_AZURE_6 - - CKV_AZURE_112 - - CKV_AZURE_115 - - CKV_AZURE_116 - - CKV_AZURE_168 - - CKV_AZURE_170 - - CKV_AZURE_139 - - CKV_AZURE_165 - - CKV_AZURE_166 - - CKV_AZURE_164 -skip-framework: - - dockerfile - - kubernetes -skip-path: - - test/vendor -summary-position: top diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md deleted file mode 100644 index 42433d0ea..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md +++ /dev/null @@ -1,20 +0,0 @@ -## 4.15.0 (May 06, 2022) - -ENHANCEMENTS: - -* Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146)) -* Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136)) - -BUG FIXES: - -## 4.16.0 (June 02, 2022) - -ENHANCEMENTS: - -* Added output for `addon_profile` ([#151](https://github.com/Azure/terraform-azurerm-aks/pull/151)) -* Adding Microsoft SECURITY.MD ([#167](https://github.com/Azure/terraform-azurerm-aks/pull/167)) -* Added variable `os_disk_type` for default node pools ([#169](https://github.com/Azure/terraform-azurerm-aks/pull/169)) - -BUG FIXES: - -* Trivial fix to the example in the README ([#166](https://github.com/Azure/terraform-azurerm-aks/pull/166)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md deleted file mode 100644 index bda5b8027..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md +++ /dev/null @@ -1,31 +0,0 @@ -## 5.0.0 (July 14, 2022) - -ENHANCEMENTS: - -* Variable `enable_kube_dashboard` has been removed as [#181](https://github.com/Azure/terraform-azurerm-aks/issues/181) described. ([#187](https://github.com/Azure/terraform-azurerm-aks/pull/187)) -* Add new variable `location` so we can define location for the resources explicitly. ([#172](https://github.com/Azure/terraform-azurerm-aks/pull/172)) -* Bump AzureRM Provider version to 3.3.0. ([#157](https://github.com/Azure/terraform-azurerm-aks/pull/157)) -* Add new variable `private_dns_zone_id` to make argument `private_dns_zone_id` configurable. ([#174](https://github.com/Azure/terraform-azurerm-aks/pull/174)) -* Add new variable `open_service_mesh_enabled` to make argument `open_service_mesh_enabled` configurable. ([#132](https://github.com/Azure/terraform-azurerm-aks/pull/132)) -* Remove `addon_profile` in the outputs since the block has been removed from provider 3.x. Extract embedded blocks inside `addon_profile` block into standalone outputs. ([#188](https://github.com/Azure/terraform-azurerm-aks/pull/188)) -* Add `nullable = true` to some variables to simplify the conditional expressions. ([#193](https://github.com/Azure/terraform-azurerm-aks/pull/193)) -* Add new variable `oidc_issuer_enabled` to make argument `oidc_issuer_enabled` configurable. ([#205](https://github.com/Azure/terraform-azurerm-aks/pull/205) -* Add new output `oidc_issuer_url` to expose the created issuer URL from the module. [#206](https://github.com/Azure/terraform-azurerm-aks/pull/206)) -* Turn monitoring on in the test code. ([#201](https://github.com/Azure/terraform-azurerm-aks/pull/201)) -* Add new variables `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` to make arguments `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` configurable. ([#149](https://github.com/Azure/terraform-azurerm-aks/pull/149)) -* Remove `module.ssh-key` and moves resource `tls_private_key` inside the module to root directory, then outputs tls keys. ([#189](https://github.com/Azure/terraform-azurerm-aks/pull/189)) -* Add new variables `rbac_aad_azure_rbac_enabled` and `rbac_aad_tenant_id` to make arguments in `azure_active_directory_role_based_access_control` configurable. ([#199](https://github.com/Azure/terraform-azurerm-aks/pull/199)) -* Add `count` meta-argument to resource `tls_private_key` to avoid the unnecessary creation. ([#209](https://github.com/Azure/terraform-azurerm-aks/pull/209)) -* Add new variable `only_critical_addons_enabled` to make argument `only_critical_addons_enabled` in block `default_node_pool` configurable. ([#129](https://github.com/Azure/terraform-azurerm-aks/pull/129)) -* Add support for the argument `key_vault_secrets_provider`. ([#214](https://github.com/Azure/terraform-azurerm-aks/pull/214)) -* Provides a way to attach existing Log Analytics Workspace to AKS through Container Insights. ([#213](https://github.com/Azure/terraform-azurerm-aks/pull/213)) -* Add new variable `local_account_disabled` to make argument `local_account_disabled` configurable. ([#218](https://github.com/Azure/terraform-azurerm-aks/pull/218)) -* Set argument `private_cluster_enabled` to `true` in the test code. ([#219](https://github.com/Azure/terraform-azurerm-aks/pull/219)) -* Add new variable `disk_encryption_set_id` to make argument `disk_encryption_set_id` configurable. Create resource `azurerm_disk_encryption_set` in the test code to turn disk encryption on for the cluster. ([#195](https://github.com/Azure/terraform-azurerm-aks/pull/195)) -* Add new variable `api_server_authorized_ip_ranges` to make argument `api_server_authorized_ip_ranges` configurable. ([#220](https://github.com/Azure/terraform-azurerm-aks/pull/220)) -* Rename output `system_assigned_identity` to `cluster_identity` since it could be user assigned identity. Remove the index inside output's value expression. ([#197](https://github.com/Azure/terraform-azurerm-aks/pull/197)) -* Rename `var.enable_azure_policy` to `var.azure_policy_enabled` to meet the naming convention. Set `azure_policy_enabled` to `true` in test fixture code. ([#203](https://github.com/Azure/terraform-azurerm-aks/pull/203)) - -BUG FIXES: - -* Change the incorrect description of variable `tags`. ([#175](https://github.com/Azure/terraform-azurerm-aks/pull/175)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md deleted file mode 100644 index ed1f9f094..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md +++ /dev/null @@ -1,122 +0,0 @@ -# Changelog - -## [Unreleased](https://github.com/Azure/terraform-azurerm-aks/tree/HEAD) - -**Merged pull requests:** - -- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) - -## [6.8.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.8.0) (2023-04-04) - -**Merged pull requests:** - -- Add support for `monitor_metrics` [\#341](https://github.com/Azure/terraform-azurerm-aks/pull/341) ([zioproto](https://github.com/zioproto)) -- Support setting os\_sku for default\_node\_pool [\#339](https://github.com/Azure/terraform-azurerm-aks/pull/339) ([mjeco](https://github.com/mjeco)) -- Upgrade required Terraform version [\#338](https://github.com/Azure/terraform-azurerm-aks/pull/338) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support `temporary_name_for_rotation` [\#334](https://github.com/Azure/terraform-azurerm-aks/pull/334) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.9.1 to 0.12.0 in /test [\#330](https://github.com/Azure/terraform-azurerm-aks/pull/330) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Fix example multiple\_node\_pools [\#328](https://github.com/Azure/terraform-azurerm-aks/pull/328) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add Network Contributor role assignments scoped to AKS nodepools subnets [\#327](https://github.com/Azure/terraform-azurerm-aks/pull/327) ([zioproto](https://github.com/zioproto)) -- Add support for extra node pools [\#323](https://github.com/Azure/terraform-azurerm-aks/pull/323) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `default_node_pool.kubelet_config` [\#322](https://github.com/Azure/terraform-azurerm-aks/pull/322) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for `public_network_access_enabled` [\#314](https://github.com/Azure/terraform-azurerm-aks/pull/314) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.7.1](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.1) (2023-03-06) - -**Merged pull requests:** - -- Fix \#316 `current client lacks permissions to read Key Rotation Policy` issue [\#317](https://github.com/Azure/terraform-azurerm-aks/pull/317) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.7.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.0) (2023-02-27) - -**Merged pull requests:** - -- Add support for `linux_os_config` [\#309](https://github.com/Azure/terraform-azurerm-aks/pull/309) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/gruntwork-io/terratest from 0.41.10 to 0.41.11 in /test [\#307](https://github.com/Azure/terraform-azurerm-aks/pull/307) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/Azure/terraform-module-test-helper from 0.8.1 to 0.9.1 in /test [\#306](https://github.com/Azure/terraform-azurerm-aks/pull/306) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/net from 0.1.0 to 0.7.0 in /test [\#305](https://github.com/Azure/terraform-azurerm-aks/pull/305) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-getter from 1.6.1 to 1.7.0 in /test [\#304](https://github.com/Azure/terraform-azurerm-aks/pull/304) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-getter/v2 from 2.1.1 to 2.2.0 in /test [\#303](https://github.com/Azure/terraform-azurerm-aks/pull/303) ([dependabot[bot]](https://github.com/apps/dependabot)) -- fix: allow orchestrator\_version if auto-upgrade is 'patch' to allow default\_node\_pool upgrade [\#302](https://github.com/Azure/terraform-azurerm-aks/pull/302) ([aescrob](https://github.com/aescrob)) -- Add support for default node pool's `node_taints` [\#300](https://github.com/Azure/terraform-azurerm-aks/pull/300) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for acr attachment [\#298](https://github.com/Azure/terraform-azurerm-aks/pull/298) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `web_app_routing` [\#297](https://github.com/Azure/terraform-azurerm-aks/pull/297) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.7.1 to 0.8.1 in /test [\#295](https://github.com/Azure/terraform-azurerm-aks/pull/295) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [6.6.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.6.0) (2023-01-29) - -**Merged pull requests:** - -- Bump github.com/Azure/terraform-module-test-helper from 0.6.0 to 0.7.1 in /test [\#293](https://github.com/Azure/terraform-azurerm-aks/pull/293) ([dependabot[bot]](https://github.com/apps/dependabot)) -- identity type is either SystemAssigned or UserAssigned [\#292](https://github.com/Azure/terraform-azurerm-aks/pull/292) ([zioproto](https://github.com/zioproto)) -- Bump github.com/gruntwork-io/terratest from 0.41.7 to 0.41.9 in /test [\#290](https://github.com/Azure/terraform-azurerm-aks/pull/290) ([dependabot[bot]](https://github.com/apps/dependabot)) -- feat: Implement support for KMS arguments [\#288](https://github.com/Azure/terraform-azurerm-aks/pull/288) ([mkilchhofer](https://github.com/mkilchhofer)) -- feat: allow for configuring auto\_scaler\_profile [\#278](https://github.com/Azure/terraform-azurerm-aks/pull/278) ([DavidSpek](https://github.com/DavidSpek)) -- Azure AD RBAC enable/disable with variable rbac\_aad [\#269](https://github.com/Azure/terraform-azurerm-aks/pull/269) ([zioproto](https://github.com/zioproto)) - -## [6.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.5.0) (2023-01-03) - -**Merged pull requests:** - -- Bump github.com/Azure/terraform-module-test-helper from 0.4.0 to 0.6.0 in /test [\#287](https://github.com/Azure/terraform-azurerm-aks/pull/287) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.41.6 to 0.41.7 in /test [\#286](https://github.com/Azure/terraform-azurerm-aks/pull/286) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add support for `scale_down_mode` [\#285](https://github.com/Azure/terraform-azurerm-aks/pull/285) ([lonegunmanb](https://github.com/lonegunmanb)) -- auto-upgrade: variable orchestrator\_version to null [\#283](https://github.com/Azure/terraform-azurerm-aks/pull/283) ([zioproto](https://github.com/zioproto)) - -## [6.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.4.0) (2022-12-26) - -**Merged pull requests:** - -- feat\(storage\_profile\): add support for CSI arguments [\#282](https://github.com/Azure/terraform-azurerm-aks/pull/282) ([aescrob](https://github.com/aescrob)) - -## [6.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.3.0) (2022-12-20) - -**Merged pull requests:** - -- feat: add var automatic\_channel\_upgrade [\#281](https://github.com/Azure/terraform-azurerm-aks/pull/281) ([the-technat](https://github.com/the-technat)) -- Upgrade `terraform-module-test-helper` lib so we can get rid of override file to execute version upgrade test [\#279](https://github.com/Azure/terraform-azurerm-aks/pull/279) ([lonegunmanb](https://github.com/lonegunmanb)) -- Added support for load\_balancer\_profile [\#277](https://github.com/Azure/terraform-azurerm-aks/pull/277) ([mazilu88](https://github.com/mazilu88)) -- Add auto changelog update to this repo. [\#275](https://github.com/Azure/terraform-azurerm-aks/pull/275) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump test helper version [\#273](https://github.com/Azure/terraform-azurerm-aks/pull/273) ([lonegunmanb](https://github.com/lonegunmanb)) -- Ignore `scripts` soft link [\#272](https://github.com/Azure/terraform-azurerm-aks/pull/272) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for pod subnet [\#271](https://github.com/Azure/terraform-azurerm-aks/pull/271) ([mr-onion-2](https://github.com/mr-onion-2)) - -## [6.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.2.0) (2022-10-18) - -**Merged pull requests:** - -- Add breaking change detect CI step. [\#268](https://github.com/Azure/terraform-azurerm-aks/pull/268) ([lonegunmanb](https://github.com/lonegunmanb)) -- Workload Identity support [\#266](https://github.com/Azure/terraform-azurerm-aks/pull/266) ([nlamirault](https://github.com/nlamirault)) -- Add unit test for complex local logic [\#264](https://github.com/Azure/terraform-azurerm-aks/pull/264) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.1.0) (2022-09-30) - -**Merged pull requests:** - -- Improve placeholders for visibility in the UX [\#262](https://github.com/Azure/terraform-azurerm-aks/pull/262) ([zioproto](https://github.com/zioproto)) -- align acc test in CI pipeline with local machine by running e2e test … [\#260](https://github.com/Azure/terraform-azurerm-aks/pull/260) ([lonegunmanb](https://github.com/lonegunmanb)) -- align pr-check with local machine by using docker command instead [\#259](https://github.com/Azure/terraform-azurerm-aks/pull/259) ([lonegunmanb](https://github.com/lonegunmanb)) -- bugfix: Make the Azure Defender clause robust against a non-existent … [\#258](https://github.com/Azure/terraform-azurerm-aks/pull/258) ([gzur](https://github.com/gzur)) -- Add support for `maintenance_window` [\#256](https://github.com/Azure/terraform-azurerm-aks/pull/256) ([lonegunmanb](https://github.com/lonegunmanb)) -- Updates terraform code to meet updated code style requirement [\#253](https://github.com/Azure/terraform-azurerm-aks/pull/253) ([lonegunmanb](https://github.com/lonegunmanb)) -- Output cluster's fqdn [\#251](https://github.com/Azure/terraform-azurerm-aks/pull/251) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix example path in readme file. [\#249](https://github.com/Azure/terraform-azurerm-aks/pull/249) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update azurerm provider's restriction. [\#248](https://github.com/Azure/terraform-azurerm-aks/pull/248) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for optional Ultra disks [\#245](https://github.com/Azure/terraform-azurerm-aks/pull/245) ([digiserg](https://github.com/digiserg)) -- add aci\_connector addon [\#230](https://github.com/Azure/terraform-azurerm-aks/pull/230) ([zioproto](https://github.com/zioproto)) - -## [6.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.0.0) (2022-09-13) - -**Merged pull requests:** - -- Add outputs for created Log Analytics workspace [\#243](https://github.com/Azure/terraform-azurerm-aks/pull/243) ([zioproto](https://github.com/zioproto)) -- Prepare v6.0 and new CI pipeline. [\#241](https://github.com/Azure/terraform-azurerm-aks/pull/241) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update hashicorp/terraform-provider-azurerm to version 3.21.0 \(fixes for AKS 1.24\) [\#238](https://github.com/Azure/terraform-azurerm-aks/pull/238) ([zioproto](https://github.com/zioproto)) -- Output Kubernetes Cluster Name [\#234](https://github.com/Azure/terraform-azurerm-aks/pull/234) ([vermacodes](https://github.com/vermacodes)) -- feat\(aks\): add microsoft defender support [\#232](https://github.com/Azure/terraform-azurerm-aks/pull/232) ([eyenx](https://github.com/eyenx)) -- fix: mark outputs as sensitive [\#231](https://github.com/Azure/terraform-azurerm-aks/pull/231) ([jvelasquez](https://github.com/jvelasquez)) -- Loose the restriction on tls provider's version to include major version greater than 3.0 [\#229](https://github.com/Azure/terraform-azurerm-aks/pull/229) ([lonegunmanb](https://github.com/lonegunmanb)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md deleted file mode 100644 index 67b2e2375..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md +++ /dev/null @@ -1,93 +0,0 @@ -# Changelog - -## [7.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.5.0) (2023-11-14) - -**Merged pull requests:** - -- Add support for `node_os_channel_upgrade` [\#474](https://github.com/Azure/terraform-azurerm-aks/pull/474) ([lonegunmanb](https://github.com/lonegunmanb)) -- use lowercase everywhere for network plugin mode overlay [\#472](https://github.com/Azure/terraform-azurerm-aks/pull/472) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.15.1-0.20230728050712-96e8615f5515 to 0.17.0 in /test [\#469](https://github.com/Azure/terraform-azurerm-aks/pull/469) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add support for `service_mesh_profile` block [\#468](https://github.com/Azure/terraform-azurerm-aks/pull/468) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for Image Cleaner [\#466](https://github.com/Azure/terraform-azurerm-aks/pull/466) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add `fips_enabled` support for `default_node_pool` block [\#464](https://github.com/Azure/terraform-azurerm-aks/pull/464) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add default empty list for `allowed` and `not_allowed` in `var.maintenance_window` [\#463](https://github.com/Azure/terraform-azurerm-aks/pull/463) ([lonegunmanb](https://github.com/lonegunmanb)) -- fix: correct wording of the doc [\#461](https://github.com/Azure/terraform-azurerm-aks/pull/461) ([meysam81](https://github.com/meysam81)) -- add run\_command\_enabled [\#452](https://github.com/Azure/terraform-azurerm-aks/pull/452) ([zioproto](https://github.com/zioproto)) -- add msi\_auth\_for\_monitoring\_enabled [\#446](https://github.com/Azure/terraform-azurerm-aks/pull/446) ([admincasper](https://github.com/admincasper)) -- Restore readme file by stop formatting markdown table [\#445](https://github.com/Azure/terraform-azurerm-aks/pull/445) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.4.0) (2023-09-18) - -**Merged pull requests:** - -- Support for creating nodepools from snapshots [\#442](https://github.com/Azure/terraform-azurerm-aks/pull/442) ([zioproto](https://github.com/zioproto)) -- Add multiple terraform-docs configs to generate a seperated markdown document for input variables [\#441](https://github.com/Azure/terraform-azurerm-aks/pull/441) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `maintenance_window_node_os` block [\#440](https://github.com/Azure/terraform-azurerm-aks/pull/440) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.3.2](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.2) (2023-09-07) - -**Merged pull requests:** - -- Hide input variables in readme to boost the rendering [\#437](https://github.com/Azure/terraform-azurerm-aks/pull/437) ([lonegunmanb](https://github.com/lonegunmanb)) -- Improve information to upgrade to 7.0 [\#432](https://github.com/Azure/terraform-azurerm-aks/pull/432) ([zioproto](https://github.com/zioproto)) -- Add confidential computing in aks module [\#423](https://github.com/Azure/terraform-azurerm-aks/pull/423) ([jiaweitao001](https://github.com/jiaweitao001)) - -## [7.3.1](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.1) (2023-08-10) - -**Merged pull requests:** - -- Bump k8s version in exmaples to pass e2e tests [\#422](https://github.com/Azure/terraform-azurerm-aks/pull/422) ([jiaweitao001](https://github.com/jiaweitao001)) - -## [7.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.0) (2023-08-03) - -**Merged pull requests:** - -- Add `location` and `resource_group_name` for `var.log_analytics_workspace` [\#412](https://github.com/Azure/terraform-azurerm-aks/pull/412) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix \#405 incorrect role assignment resource [\#410](https://github.com/Azure/terraform-azurerm-aks/pull/410) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.2.0) (2023-07-10) - -**Merged pull requests:** - -- Bump google.golang.org/grpc from 1.51.0 to 1.53.0 in /test [\#406](https://github.com/Azure/terraform-azurerm-aks/pull/406) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Support for Azure CNI Cilium [\#398](https://github.com/Azure/terraform-azurerm-aks/pull/398) ([JitseHijlkema](https://github.com/JitseHijlkema)) -- Use `lonegunmanb/public-ip/lonegunmanb` module to retrieve public ip [\#396](https://github.com/Azure/terraform-azurerm-aks/pull/396) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix incorrect e2e test code so it could pass on our local machine [\#395](https://github.com/Azure/terraform-azurerm-aks/pull/395) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for Proximity placement group for default node pool [\#392](https://github.com/Azure/terraform-azurerm-aks/pull/392) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add upgrade\_settings block for default nodepool [\#391](https://github.com/Azure/terraform-azurerm-aks/pull/391) ([CiucurDaniel](https://github.com/CiucurDaniel)) -- Bump github.com/Azure/terraform-module-test-helper from 0.13.0 to 0.14.0 in /test [\#386](https://github.com/Azure/terraform-azurerm-aks/pull/386) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [7.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.1.0) (2023-06-07) - -**Merged pull requests:** - -- Deprecate `api_server_authorized_ip_ranges` by using `api_server_access_profile` block [\#381](https://github.com/Azure/terraform-azurerm-aks/pull/381) ([lonegunmanb](https://github.com/lonegunmanb)) -- `oidc_issuer_enabled` must be set to `true` to enable Azure AD Worklo… [\#377](https://github.com/Azure/terraform-azurerm-aks/pull/377) ([zioproto](https://github.com/zioproto)) -- assign network contributor role to control plane identity [\#369](https://github.com/Azure/terraform-azurerm-aks/pull/369) ([zioproto](https://github.com/zioproto)) -- Add tracing tag toggle variables [\#362](https://github.com/Azure/terraform-azurerm-aks/pull/362) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for Azure CNI Overlay [\#354](https://github.com/Azure/terraform-azurerm-aks/pull/354) ([zioproto](https://github.com/zioproto)) -- Make `var.prefix` optional [\#382](https://github.com/Azure/terraform-azurerm-aks/pull/382) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove constraint on `authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#375](https://github.com/Azure/terraform-azurerm-aks/pull/375) ([lonegunmanb](https://github.com/lonegunmanb)) -- Filter null value out from `local.subnet_ids` [\#374](https://github.com/Azure/terraform-azurerm-aks/pull/374) ([lonegunmanb](https://github.com/lonegunmanb)) -- User `location` returned from data source for log analytics solution. [\#349](https://github.com/Azure/terraform-azurerm-aks/pull/349) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.0.0) (2023-05-18) - -**Merged pull requests:** - -- Upgrade notice for v7.0 [\#367](https://github.com/Azure/terraform-azurerm-aks/pull/367) ([lonegunmanb](https://github.com/lonegunmanb)) -- Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#361](https://github.com/Azure/terraform-azurerm-aks/pull/361) ([lonegunmanb](https://github.com/lonegunmanb)) -- feat!: add create\_before\_destroy=true to node pools [\#357](https://github.com/Azure/terraform-azurerm-aks/pull/357) ([the-technat](https://github.com/the-technat)) -- Move breaking change details into separate docs. add notice on v7.0.0 [\#355](https://github.com/Azure/terraform-azurerm-aks/pull/355) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.12.0 to 0.13.0 in /test [\#352](https://github.com/Azure/terraform-azurerm-aks/pull/352) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Trivial: fix typo ingration -\> integration [\#351](https://github.com/Azure/terraform-azurerm-aks/pull/351) ([zioproto](https://github.com/zioproto)) -- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) -- \[Breaking\] Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard`. [\#346](https://github.com/Azure/terraform-azurerm-aks/pull/346) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] - Ignore changes on `kubernetes_version` from outside of Terraform [\#336](https://github.com/Azure/terraform-azurerm-aks/pull/336) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] - Fix \#315 by amending missing `linux_os_config` block [\#320](https://github.com/Azure/terraform-azurerm-aks/pull/320) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] Wrap `log_analytics_solution_id` to an object to fix \#263. [\#265](https://github.com/Azure/terraform-azurerm-aks/pull/265) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] Remove unused net\_profile\_docker\_bridge\_cidr [\#222](https://github.com/Azure/terraform-azurerm-aks/pull/222) ([zioproto](https://github.com/zioproto)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md deleted file mode 100644 index 2c035d842..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md +++ /dev/null @@ -1,27 +0,0 @@ -# Changelog - -**Merged pull requests:** - -- Add support for nodepool's `gpu_instance` [\#519](https://github.com/Azure/terraform-azurerm-aks/pull/519) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.17.0 to 0.18.0 in /test [\#516](https://github.com/Azure/terraform-azurerm-aks/pull/516) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add upgrade notice document [\#513](https://github.com/Azure/terraform-azurerm-aks/pull/513) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add retry when the ingress is not ready [\#510](https://github.com/Azure/terraform-azurerm-aks/pull/510) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `support_plan` and `Premium` sku tier. [\#508](https://github.com/Azure/terraform-azurerm-aks/pull/508) ([ecklm](https://github.com/ecklm)) -- Refactor code, split monolith tf config into multiple files [\#494](https://github.com/Azure/terraform-azurerm-aks/pull/494) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove `var.http_application_routing_enabled` [\#493](https://github.com/Azure/terraform-azurerm-aks/pull/493) ([lonegunmanb](https://github.com/lonegunmanb)) -- feat\(`http_proxy_config`\): Add `http_proxy_config` [\#492](https://github.com/Azure/terraform-azurerm-aks/pull/492) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove `public_network_access_enabled` entirely [\#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) ([lonegunmanb](https://github.com/lonegunmanb)) -- Ignore deprecated attribute `public_network_access_enabled` [\#485](https://github.com/Azure/terraform-azurerm-aks/pull/485) ([ishuar](https://github.com/ishuar)) -- feat: enable precondition on `default_node_pool` for autoscaling with node pool type [\#484](https://github.com/Azure/terraform-azurerm-aks/pull/484) ([ishuar](https://github.com/ishuar)) -- Add web\_app\_routing\_identity block to outputs [\#481](https://github.com/Azure/terraform-azurerm-aks/pull/481) ([bonddim](https://github.com/bonddim)) -- Add support for `kubelet_identity` nested block [\#479](https://github.com/Azure/terraform-azurerm-aks/pull/479) ([lonegunmanb](https://github.com/lonegunmanb)) -- Prepare for v8.0 [\#462](https://github.com/Azure/terraform-azurerm-aks/pull/462) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove precondition on extra node pool which prevent using windows pool with overlay [\#512](https://github.com/Azure/terraform-azurerm-aks/pull/512) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `maintenance_window_auto_upgrade` [\#505](https://github.com/Azure/terraform-azurerm-aks/pull/505) ([skolobov](https://github.com/skolobov)) -- Let the users decide whether adding a random suffix in cluster and pool's name or not. [\#496](https://github.com/Azure/terraform-azurerm-aks/pull/496) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add role assignments for ingress application gateway and corresponding example [\#426](https://github.com/Azure/terraform-azurerm-aks/pull/426) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for workload\_autoscaler\_profile settings [\#404](https://github.com/Azure/terraform-azurerm-aks/pull/404) ([bonddim](https://github.com/bonddim)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md deleted file mode 100644 index 05e2d7539..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md +++ /dev/null @@ -1,76 +0,0 @@ -# Changelog - -## [9.4.1](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.1) (2025-02-05) - -**Merged pull requests:** - -- Revert changes of `9.4.0` [\#635](https://github.com/Azure/terraform-azurerm-aks/pull/635) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [9.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.0) (2025-02-05) - -**Merged pull requests:** - -- Bump azapi provider to \>=2.0, \< 3.0 [\#632](https://github.com/Azure/terraform-azurerm-aks/pull/632) ([zioproto](https://github.com/zioproto)) -- Dependabot 624 626 [\#627](https://github.com/Azure/terraform-azurerm-aks/pull/627) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.28.0 to 0.30.0 in /test [\#626](https://github.com/Azure/terraform-azurerm-aks/pull/626) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.48.0 to 0.48.1 in /test [\#624](https://github.com/Azure/terraform-azurerm-aks/pull/624) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Dependabot changes from PR 609 619 620 [\#621](https://github.com/Azure/terraform-azurerm-aks/pull/621) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.27.0 to 0.28.0 in /test [\#620](https://github.com/Azure/terraform-azurerm-aks/pull/620) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.47.2 to 0.48.0 in /test [\#619](https://github.com/Azure/terraform-azurerm-aks/pull/619) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#616](https://github.com/Azure/terraform-azurerm-aks/pull/616) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#615](https://github.com/Azure/terraform-azurerm-aks/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /test [\#609](https://github.com/Azure/terraform-azurerm-aks/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [9.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.3.0) (2024-12-11) - -**Merged pull requests:** - -- Support of oms\_agent\_enabled add-on [\#613](https://github.com/Azure/terraform-azurerm-aks/pull/613) ([lonegunmanb](https://github.com/lonegunmanb)) -- Implement node\_network\_profile for default node pool [\#598](https://github.com/Azure/terraform-azurerm-aks/pull/598) ([zioproto](https://github.com/zioproto)) -- Bump examples to AKS 1.30 [\#595](https://github.com/Azure/terraform-azurerm-aks/pull/595) ([zioproto](https://github.com/zioproto)) -- Add `v4` sub-folder so this module could run with AzureRM provider both `v3` and `v4`. [\#594](https://github.com/Azure/terraform-azurerm-aks/pull/594) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [9.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.2.0) (2024-11-07) - -**Merged pull requests:** - -- Make the Azure Key Vault public because private Key Vault requires preview API [\#599](https://github.com/Azure/terraform-azurerm-aks/pull/599) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.25.0 to 0.26.0 in /test [\#593](https://github.com/Azure/terraform-azurerm-aks/pull/593) ([lonegunmanb](https://github.com/lonegunmanb)) -- Use oidc as authentication method [\#592](https://github.com/Azure/terraform-azurerm-aks/pull/592) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update README.md [\#589](https://github.com/Azure/terraform-azurerm-aks/pull/589) ([shailwx](https://github.com/shailwx)) -- Add `cost_analysis_enabled` option [\#583](https://github.com/Azure/terraform-azurerm-aks/pull/583) ([artificial-aidan](https://github.com/artificial-aidan)) -- Bump github.com/Azure/terraform-module-test-helper from 0.24.0 to 0.25.0 in /test [\#581](https://github.com/Azure/terraform-azurerm-aks/pull/581) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.46.15 to 0.47.0 in /test [\#579](https://github.com/Azure/terraform-azurerm-aks/pull/579) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/Azure/terraform-module-test-helper from 0.22.0 to 0.24.0 in /test [\#574](https://github.com/Azure/terraform-azurerm-aks/pull/574) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 in /test [\#562](https://github.com/Azure/terraform-azurerm-aks/pull/562) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [9.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.1.0) (2024-07-04) - -**Merged pull requests:** - -- Downgrade next major version back to v9 [\#577](https://github.com/Azure/terraform-azurerm-aks/pull/577) ([lonegunmanb](https://github.com/lonegunmanb)) -- Restore devcontainer [\#576](https://github.com/Azure/terraform-azurerm-aks/pull/576) ([zioproto](https://github.com/zioproto)) -- set drainTimeoutInMinutes default value to null [\#575](https://github.com/Azure/terraform-azurerm-aks/pull/575) ([zioproto](https://github.com/zioproto)) -- fix README.md format [\#570](https://github.com/Azure/terraform-azurerm-aks/pull/570) ([joaoestrela](https://github.com/joaoestrela)) -- Bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /test [\#569](https://github.com/Azure/terraform-azurerm-aks/pull/569) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Start new Changelog file for v10 [\#567](https://github.com/Azure/terraform-azurerm-aks/pull/567) ([zioproto](https://github.com/zioproto)) -- fixed inaccurate variable descriptions for azure cni in overlay mode [\#566](https://github.com/Azure/terraform-azurerm-aks/pull/566) ([Xelef2000](https://github.com/Xelef2000)) -- add drain\_timeout\_in\_minutes and node\_soak\_duration\_in\_minutes [\#564](https://github.com/Azure/terraform-azurerm-aks/pull/564) ([zioproto](https://github.com/zioproto)) - -## [9.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.0.0) (2024-06-07) - -**Merged pull requests:** - -- Compromise on e2e tests involving ingress, since it's not stable [\#558](https://github.com/Azure/terraform-azurerm-aks/pull/558) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add weekly-codeql action [\#555](https://github.com/Azure/terraform-azurerm-aks/pull/555) ([lonegunmanb](https://github.com/lonegunmanb)) -- Change default value for `var.agents_pool_max_surge` to 10% [\#554](https://github.com/Azure/terraform-azurerm-aks/pull/554) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update Microsoft.ContainerService managedClusters API version to 2024-02-01 [\#552](https://github.com/Azure/terraform-azurerm-aks/pull/552) ([olofmattsson-inriver](https://github.com/olofmattsson-inriver)) -- Bump github.com/Azure/terraform-module-test-helper from 0.19.0 to 0.22.0 in /test [\#549](https://github.com/Azure/terraform-azurerm-aks/pull/549) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Amending log analytics attributes [\#548](https://github.com/Azure/terraform-azurerm-aks/pull/548) ([lonegunmanb](https://github.com/lonegunmanb)) -- bump k8s version for example since 1.26 has been deprecated [\#540](https://github.com/Azure/terraform-azurerm-aks/pull/540) ([lonegunmanb](https://github.com/lonegunmanb)) -- fix\(typo\): typo in output variable [\#537](https://github.com/Azure/terraform-azurerm-aks/pull/537) ([mbaykara](https://github.com/mbaykara)) -- Bump github.com/Azure/terraform-module-test-helper from 0.18.0 to 0.19.0 in /test [\#521](https://github.com/Azure/terraform-azurerm-aks/pull/521) ([dependabot[bot]](https://github.com/apps/dependabot)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md deleted file mode 100644 index 9996f9928..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Changelog - -## Important Notice - -* fix: add back `private_cluster_enabled` variable by @tobiasehlert [#667](https://github.com/Azure/terraform-azurerm-aks/pull/667) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md deleted file mode 100644 index af8b0207d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. - -Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile deleted file mode 100644 index 3db7ccd9d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile +++ /dev/null @@ -1,4 +0,0 @@ -SHELL := /bin/bash - -$(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) --include tfvmmakefile \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE deleted file mode 100644 index 21071075c..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md deleted file mode 100644 index f611a6a75..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md +++ /dev/null @@ -1,53 +0,0 @@ -# Notice on Upgrade to v10.x - -## AzAPI provider version constraint has been updated to `>=2.0, < 3.0`. - -## [`var.web_app_routing` type change](https://github.com/Azure/terraform-azurerm-aks/pull/606) - -`var.web_app_routing.dns_zone_id` has been replaced by `var.web_app_routing.dns_zone_ids`. The new variable is a list of DNS zone IDs. This change allows for the specification of multiple DNS zones for routing. - -## [`data.azurerm_resource_group.main` in this module has been removed, `var.location` is a required variable now.](https://github.com/Azure/terraform-azurerm-aks/pull/644) - -## [Create log analytics workspace would also create required monitor data collection rule now](https://github.com/Azure/terraform-azurerm-aks/pull/623) - -The changes in this pull request introduce support for a Data Collection Rule (DCR) for Azure Monitor Container Insights in the Terraform module. - -## [`CHANGELOG.md` file is no longer maintained, please read release note in GitHub repository instead](https://github.com/Azure/terraform-azurerm-aks/pull/651) - -[New release notes](https://github.com/Azure/terraform-azurerm-aks/releases). - -## [The following variables have been removed:](https://github.com/Azure/terraform-azurerm-aks/pull/652) - -* `agents_taints` -* `api_server_subnet_id` -* `private_cluster_enabled` -* `rbac_aad_client_app_id` -* `rbac_aad_managed` -* `rbac_aad_server_app_id` -* `rbac_aad_server_app_secret` - -## `var.pod_subnet_id` has been replaced by `var.pod_subnet.id` - -## `var.vnet_subnet_id` has been replaced by `var.vnet_subnet.id` - -## `var.node_pools.pod_subnet_id` has been replaced by `var.node_pools.pod_subnet.id` - -## `var.node_pools.vnet_subnet_id` has been replaced by `var.node_pools.vnet_subnet.id` - -## `azurerm_role_assignment.network_contributor` will be re-created - -Since `for_each`'s target has been changed from a set of string to a map of object to avoid "Known after apply" values in iterator, we have to re-create the `azurerm_role_assignment.network_contributor` resource. This will cause the role assignment to be removed and re-added, which may result in a brief period of time where the role assignment is not present. - -## When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself. - -## `var.client_secret` now is `sensitive` - -## New interval between cluster creation and kubernetes version upgrade - -New variable `interval_before_cluster_update` was added. Sometimes when we tried to update cluster's kubernetes version after cluster creation, we got the error `Operation is not allowed because there's an in progress update managed cluster operation on the managed cluster started`. A `time_sleep` was added to avoid such potential conflict. You can set this variable to `null` to bypass the sleep. - -## @zioproto is no longer a maintainer of this module - -For personal reasons, @zioproto is no longer a maintainer of this module. I want to express my sincere gratitude for his contributions and support over the years. His dedication and hard work are invaluable to this module. - -THANK YOU @zioproto ! diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md deleted file mode 100644 index 4f31d8157..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md +++ /dev/null @@ -1,93 +0,0 @@ -# Notice on Upgrade to v5.x - -V5.0.0 is a major version upgrade and a lot of breaking changes have been introduced. Extreme caution must be taken during the upgrade to avoid resource replacement and downtime by accident. - -Running the `terraform plan` first to inspect the plan is strongly advised. - -## Terraform and terraform-provider-azurerm version restrictions - -Now Terraform core's lowest version is v1.2.0 and terraform-provider-azurerm's lowest version is v3.21.0. - -## variable `user_assigned_identity_id` has been renamed. - -variable `user_assigned_identity_id` has been renamed to `identity_ids` and it's type has been changed from `string` to `list(string)`. - -## `addon_profile` in outputs is no longer available. - -It has been broken into the following new outputs: - -* `aci_connector_linux` -* `aci_connector_linux_enabled` -* `azure_policy_enabled` -* `http_application_routing_enabled` -* `ingress_application_gateway` -* `ingress_application_gateway_enabled` -* `key_vault_secrets_provider` -* `key_vault_secrets_provider_enabled` -* `oms_agent` -* `oms_agent_enabled` -* `open_service_mesh_enabled` - -## The following variables have been renamed from `enable_xxx` to `xxx_enabled` - -* `enable_azure_policy` has been renamed to `azure_policy_enabled` -* `enable_http_application_routing` has been renamed to `http_application_routing_enabled` -* `enable_ingress_application_gateway` has been renamed to `ingress_application_gateway_enabled` -* `enable_log_analytics_workspace` has been renamed to `log_analytics_workspace_enabled` -* `enable_open_service_mesh` has been renamed to `open_service_mesh_enabled` -* `enable_role_based_access_control` has been renamed to `role_based_access_control_enabled` - -## `nullable = true` has been added to the following variables so setting them to `null` explicitly will use the default value - -* `log_analytics_workspace_enable` -* `os_disk_type` -* `private_cluster_enabled` -* `rbac_aad_managed` -* `rbac_aad_admin_group_object_ids` -* `network_policy` -* `enable_node_public_ip` - -## `var.admin_username`'s default value has been removed - -In v4.x `var.admin_username` has a default value `azureuser` and has been removed in V5.0.0. Since the `admin_username` argument in `linux_profile` block is a ForceNew argument, any value change to this argument will trigger a Kubernetes cluster replacement **SO THE EXTREME CAUTION MUST BE TAKEN**. The module's callers must set `var.admin_username` to `azureuser` explicitly if they didn't set it before. - -## `module.ssh-key` has been removed - -The file named `private_ssh_key` which contains the tls private key will be deleted since the `local_file` resource has been removed. Now the private key is exported via `generated_cluster_private_ssh_key` in output and the corresponding public key is exported via `generated_cluster_public_ssh_key` in output. - -A `moved` block has been added to relocate the existing `tls_private_key` resource to the new address. If the `var.admin_username` is not `null`, no action is needed. - -Resource `tls_private_key`'s creation now is conditional. Users may see the destruction of existing `tls_private_key` in the generated plan if `var.admin_username` is `null`. - -## `system_assigned_identity` in the output has been renamed to `cluster_identity` - -The `system_assigned_identity` was: - -```hcl -output "system_assigned_identity" { - value = azurerm_kubernetes_cluster.main.identity -} -``` - -Now it has been renamed to `cluster_identity`, and the block has been changed to: - -```hcl -output "cluster_identity" { - description = "The `azurerm_kubernetes_cluster`'s `identity` block." - value = try(azurerm_kubernetes_cluster.main.identity[0], null) -} -``` - -The callers who used to read the cluster's identity block need to remove the index in their expression, from `module.aks.system_assigned_identity[0]` to `module.aks.cluster_identity`. - -## The following outputs are now sensitive. All outputs referenced them must be declared as sensitive too - -* `client_certificate` -* `client_key` -* `cluster_ca_certificate` -* `generated_cluster_private_ssh_key` -* `host` -* `kube_admin_config_raw` -* `kube_config_raw` -* `password` -* `username` diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md deleted file mode 100644 index e75b87ea3..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md +++ /dev/null @@ -1,5 +0,0 @@ -# Notice on Upgrade to v6.x - -We've added a CI pipeline for this module to speed up our code review and to enforce a high code quality standard, if you want to contribute by submitting a pull request, please read [Pre-Commit & Pr-Check & Test](#Pre-Commit--Pr-Check--Test) section, or your pull request might be rejected by CI pipeline. - -A pull request will be reviewed when it has passed Pre Pull Request Check in the pipeline, and will be merged when it has passed the acceptance tests. Once the ci Pipeline failed, please read the pipeline's output, thanks for your cooperation. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md deleted file mode 100644 index e3c1f41a5..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md +++ /dev/null @@ -1,52 +0,0 @@ -# Notice on Upgrade to v7.x - -## Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard` - -AzureRM's minimum version is `>= 3.51, < 4.0` now. -[`var.sku_tier` cannot be set to `Paid` anymore](https://github.com/hashicorp/terraform-provider-azurerm/issues/20887), now possible values are `Free` and `Standard`. - -## Ignore changes on `kubernetes_version` from outside of Terraform - -Related issue: #335 - -Two new resources would be created when upgrading from v6.x to v7.x: - -* `null_resource.kubernetes_version_keeper` -* `azapi_update_resource.aks_cluster_post_create` - -`azurerm_kubernetes_cluster.main` resource would ignore change on `kubernetes_version` from outside of Terraform in case AKS cluster's patch version has been upgraded automatically. -When you change `var.kubernetes_version`'s value, it would trigger a re-creation of `null_resource.kubernetes_version_keeper` and re-creation of `azapi_update_resource.aks_cluster_post_create`, which would upgrade the AKS cluster's `kubernetes_version`. - -`azapi` provider is required to be configured in your Terraform configuration. - -## Fix #315 by amending missing `linux_os_config` block - -In v6.0, `default_node_pool.linux_os_config` block won't be added to `azurerm_kubernetes_cluster.main` resource when `var.enable_auto_scaling` is `true`. This bug has been fixed in v7.0.0 so you might see a diff on `azurerm_kubernetes_cluster.main` resource. - -## Wrap `log_analytics_solution_id` to an object to fix #263. - -`var.log_analytics_solution_id` is now an object with `id` attribute. This change is to fix #263. - -## Remove unused net_profile_docker_bridge_cidr - -`var.net_profile_docker_bridge_cidr` has been [deprecated](https://github.com/hashicorp/terraform-provider-azurerm/issues/18119) and is not used in the module anymore and has been removed. - -## Add `create_before_destroy=true` to node pools #357 - -Now `azurerm_kubernetes_cluster_node_pool.node_pool` resource has `create_before_destroy=true` to avoid downtime when upgrading node pools. Users must be aware that there would be a "random" suffix added into pool's name, this suffix's length is `4`, so your previous node pool's name `nodepool1` would be `nodepool1xxxx`. This suffix is calculated from node pool's config, the same configuration would lead to the same suffix. You might need to shorten your node pool's name because of this new added suffix. - -To enable this feature, we've also added new `null_resource.pool_name_keeper` to track node pool's name in case you've changed the name. - -## Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` #361 - -As the [document](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#public_network_access_enabled) described: - ->When `public_network_access_enabled` is set to true, `0.0.0.0/32` must be added to `authorized_ip_ranges` in the `api_server_access_profile block`. - -We'll add `api_server_access_profile` nested block after AzureRM provider's v4.0, but starting from v7.0 we'll enforce such pre-condition check. - -## Add `depends_on` to `azurerm_kubernetes_cluster_node_pool` resources #418 - -If you have `azurerm_kubernetes_cluster_node_pool` resources not managed with this module (`var.nodepools`) you -must have an explicit `depends_on` on those resources to avoid conflicting nodepools operations. -See issue #418 for more details. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md deleted file mode 100644 index 96077ba1a..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md +++ /dev/null @@ -1,53 +0,0 @@ -# Notice on Upgrade to v8.x - -## New variable `cluster_name_random_suffix` - -1. A new variable `cluster_name_random_suffix` is added. This allows users to decide whether they want to add a random suffix to a cluster's name. This is particularly useful when Terraform needs to recreate a resource that cannot be updated in-place, as it avoids naming conflicts. Because of [#357](https://github.com/Azure/terraform-azurerm-aks/pull/357), now the `azurerm_kubernetes_cluster` resource is `create_before_destroy = true` now, we cannot turn this feature off. If you want to recreate this cluster by one apply without any trouble, please turn this random naming suffix on to avoid the naming conflict. - -2. The `create_before_destroy` attribute is added to the `node_pools` variable as an object field. This attribute determines whether a new node pool should be created before the old one is destroyed during updates. By default, it is set to `true`. - -3. The naming of extra node pools has been updated. Now, a random UUID is used as the seed for the random suffix in the name of the node pool, instead of the JSON-encoded value of the node pool. **This naming suffix only apply for extra node pools that create before destroy.** - -You're recommended to set `var.cluster_name_random_suffix` to `true` explicitly, and you'll see a random suffix in your cluster's name. If you don't like this suffix, please remember now a new cluster with the same name would be created before the old one has been deleted. If you do want to recreate the cluster, please run `terraform destroy` first. - -## Remove `var.http_application_routing_enabled` - -According to the [document](https://learn.microsoft.com/en-us/azure/aks/http-application-routing), HTTP application routing add-on for AKS has been retired so we have to remove this feature from this module. - -1. The variable `http_application_routing_enabled` has been removed from the module. This variable was previously used to enable HTTP Application Routing Addon. - -2. The `http_application_routing_enabled` output has been removed from `outputs.tf`. This output was previously used to display whether HTTP Application Routing was enabled. - -3. The `http_application_routing_enabled` attribute has been removed from the `azurerm_kubernetes_cluster` resource in `main.tf`. This attribute was previously used to enable HTTP Application Routing for the Kubernetes cluster. - -4. The `http_application_routing_enabled` attribute has been added to the `ignore_changes` lifecycle block of the `azurerm_kubernetes_cluster` resource in `main.tf`. This means changes to this attribute will not trigger the resource to be updated. - -These changes mean that users of this module will no longer be able to enable HTTP Application Routing through this module. - -The new feature for the Ingress in AKS is [Managed NGINX ingress with the application routing add-on](https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default), you can enable this with `var.web_app_routing`. - -Users who were using this feature, please read this [Migrate document](https://learn.microsoft.com/en-us/azure/aks/app-routing-migration). - -## Remove `public_network_access_enabled` entirely - -According to this [announcement](https://github.com/Azure/AKS/issues/3690), now public network access for AKS is no longer supported. - -The primary impact [#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) is the complete removal of the `public_network_access_enabled` variable from the module. - -1. The `public_network_access_enabled` variable has been removed from the `variables.tf` file. This means that the module no longer supports the configuration of public network access at the Kubernetes cluster level. - -2. The `public_network_access_enabled` variable has also been removed from the `main.tf` file and all example files (`application_gateway_ingress/main.tf`, `multiple_node_pools/main.tf`, `named_cluster/main.tf`, `startup/main.tf`, `with_acr/main.tf`, `without_monitor/main.tf`). This indicates that the module no longer uses this variable in the creation of the Azure Kubernetes Service (AKS) resource. - -3. The `public_network_access_enabled` has been added into `azurerm_kubernetes_cluster`'s `ignore_changes` list. Any change to this attribute won't trigger update. - -## Add role assignments for ingress application gateway - -The `variables.tf` file is updated with new variables related to the application gateway for ingress, including `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress`. - -The `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress` variables are used to configure the Application Gateway Ingress for the Azure Kubernetes Service (AKS) in the Terraform module. - -1. `brown_field_application_gateway_for_ingress`: This variable is used when you want to use an existing Application Gateway as the ingress for the AKS cluster. It is an object that contains the ID of the Application Gateway (`id`) and the ID of the Subnet (`subnet_id`) which the Application Gateway is connected to. If this variable is set, the module will not create a new Application Gateway and will use the existing one instead. - -2. `green_field_application_gateway_for_ingress`: This variable is used when you want the module to create a new Application Gateway for the AKS cluster. It is an object that contains the name of the Application Gateway to be used or created in the Nodepool Resource Group (`name`), the subnet CIDR to be used to create an Application Gateway (`subnet_cidr`), and the ID of the subnet on which to create an Application Gateway (`subnet_id`). If this variable is set, the module will create a new Application Gateway with the provided configuration. - -3. `create_role_assignments_for_application_gateway`: This is a boolean variable that determines whether to create the corresponding role assignments for the application gateway or not. By default, it is set to `true`. Role assignments are necessary for the Application Gateway to function correctly with the AKS cluster. If set to `true`, the module will create the necessary role assignments on the Application Gateway. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md deleted file mode 100644 index 9bd796e2d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md +++ /dev/null @@ -1,9 +0,0 @@ -# Notice on Upgrade to v9.x - -## New default value for variable `agents_pool_max_surge` - -variable `agents_pool_max_surge` now has default value `10%`. This change might cause configuration drift. If you want to keep the old value, please set it explicitly in your configuration. - -## API version for `azapi_update_resource` resource has been upgraded from `Microsoft.ContainerService/managedClusters@2023-01-02-preview` to `Microsoft.ContainerService/managedClusters@2024-02-01`. - -After a test, it won't affect the existing Terraform state and cause configuration drift. The upgrade is caused by the retirement of original API. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md deleted file mode 100644 index e754e5a7f..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md +++ /dev/null @@ -1,490 +0,0 @@ -# terraform-azurerm-aks - -## Deploys a Kubernetes cluster (AKS) on Azure with monitoring support through Azure Log Analytics - -This Terraform module deploys a Kubernetes cluster on Azure using AKS (Azure Kubernetes Service) and adds support for monitoring with Log Analytics. - --> **NOTE:** If you have not assigned `client_id` or `client_secret`, A `SystemAssigned` identity will be created. - --> **NOTE:** If you're using AzureRM `v4`, you can use this module by setting `source` to `Azure/aks/azurerm//v4`. - -## Notice on breaking changes - -Please be aware that major version(e.g., from 6.8.0 to 7.0.0) update contains breaking changes that may impact your infrastructure. It is crucial to review these changes with caution before proceeding with the upgrade. - -In most cases, you will need to adjust your Terraform code to accommodate the changes introduced in the new major version. We strongly recommend reviewing the changelog and migration guide to understand the modifications and ensure a smooth transition. - -To help you in this process, we have provided detailed documentation on the breaking changes, new features, and any deprecated functionalities. Please take the time to read through these resources to avoid any potential issues or disruptions to your infrastructure. - -* [Notice on Upgrade to v10.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov10.0.md) -* [Notice on Upgrade to v9.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov9.0.md) -* [Notice on Upgrade to v8.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov8.0.md) -* [Notice on Upgrade to v7.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov7.0.md) -* [Notice on Upgrade to v6.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov6.0.md) -* [Notice on Upgrade to v5.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov5.0.md) - -Remember, upgrading to a major version with breaking changes should be done carefully and thoroughly tested in your environment. If you have any questions or concerns, please don't hesitate to reach out to our support team for assistance. - -## Usage in Terraform 1.2.0 - -Please view folders in `examples`. - -The module supports some outputs that may be used to configure a kubernetes -provider after deploying an AKS cluster. - -```hcl -provider "kubernetes" { - host = module.aks.host - client_certificate = base64decode(module.aks.client_certificate) - client_key = base64decode(module.aks.client_key) - cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate) -} -``` - -There're some examples in the examples folder. You can execute `terraform apply` command in `examples`'s sub folder to try the module. These examples are tested against every PR with the [E2E Test](#Pre-Commit--Pr-Check--Test). - -## Enable or disable tracing tags - -We're using [BridgeCrew Yor](https://github.com/bridgecrewio/yor) and [yorbox](https://github.com/lonegunmanb/yorbox) to help manage tags consistently across infrastructure as code (IaC) frameworks. In this module you might see tags like: - -```hcl -resource "azurerm_resource_group" "rg" { - location = "eastus" - name = random_pet.name - tags = merge(var.tags, (/**/ (var.tracing_tags_enabled ? { for k, v in /**/ { - avm_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" - avm_git_file = "main.tf" - avm_git_last_modified_at = "2023-05-05 08:57:54" - avm_git_org = "lonegunmanb" - avm_git_repo = "terraform-yor-tag-test-module" - avm_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" - } /**/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /**/)) -} -``` - -To enable tracing tags, set the variable to true: - -```hcl -module "example" { -source = "{module_source}" -... -tracing_tags_enabled = true -} -``` - -The `tracing_tags_enabled` is default to `false`. - -To customize the prefix for your tracing tags, set the `tracing_tags_prefix` variable value in your Terraform configuration: - -```hcl -module "example" { -source = "{module_source}" -... -tracing_tags_prefix = "custom_prefix_" -} -``` - -The actual applied tags would be: - -```text -{ -custom_prefix_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" -custom_prefix_git_file = "main.tf" -custom_prefix_git_last_modified_at = "2023-05-05 08:57:54" -custom_prefix_git_org = "lonegunmanb" -custom_prefix_git_repo = "terraform-yor-tag-test-module" -custom_prefix_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" -} -``` - -## Pre-Commit & Pr-Check & Test - -### Configurations - -- [Configure Terraform for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/terraform-install-configure) - -We assumed that you have setup service principal's credentials in your environment variables like below: - -```shell -export ARM_SUBSCRIPTION_ID="" -export ARM_TENANT_ID="" -export ARM_CLIENT_ID="" -export ARM_CLIENT_SECRET="" -``` - -On Windows Powershell: - -```shell -$env:ARM_SUBSCRIPTION_ID="" -$env:ARM_TENANT_ID="" -$env:ARM_CLIENT_ID="" -$env:ARM_CLIENT_SECRET="" -``` - -We provide a docker image to run the pre-commit checks and tests for you: `mcr.microsoft.com/azterraform:latest` - -To run the pre-commit task, we can run the following command: - -```shell -$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit -``` - -On Windows Powershell: - -```shell -$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit -``` - -In pre-commit task, we will: - -1. Run `terraform fmt -recursive` command for your Terraform code. -2. Run `terrafmt fmt -f` command for markdown files and go code files to ensure that the Terraform code embedded in these files are well formatted. -3. Run `go mod tidy` and `go mod vendor` for test folder to ensure that all the dependencies have been synced. -4. Run `gofmt` for all go code files. -5. Run `gofumpt` for all go code files. -6. Run `terraform-docs` on `README.md` file, then run `markdown-table-formatter` to format markdown tables in `README.md`. - -Then we can run the pr-check task to check whether our code meets our pipeline's requirement(We strongly recommend you run the following command before you commit): - -```shell -$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pr-check -``` - -On Windows Powershell: - -```shell -$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pr-check -``` - -To run the e2e-test, we can run the following command: - -```text -docker run --rm -v $(pwd):/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -On Windows Powershell: - -```text -docker run --rm -v ${pwd}:/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -To follow [**Ensure AKS uses disk encryption set**](https://docs.bridgecrew.io/docs/ensure-that-aks-uses-disk-encryption-set) policy we've used `azurerm_key_vault` in example codes, and to follow [**Key vault does not allow firewall rules settings**](https://docs.bridgecrew.io/docs/ensure-that-key-vault-allows-firewall-rules-settings) we've limited the ip cidr on it's `network_acls`. By default we'll use the ip returned by `https://api.ipify.org?format=json` api as your public ip, but in case you need to use another cidr, you can set an environment variable like below: - -```text -docker run --rm -v $(pwd):/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -On Windows Powershell: -```text -docker run --rm -v ${pwd}:/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -#### Prerequisites - -- [Docker](https://www.docker.com/community-edition#/download) - -## Authors - -Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d) - -## License - -[MIT](LICENSE) - -# Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Module Spec - -The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!** - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.3 | -| [azapi](#requirement\_azapi) | >=2.0, < 3.0 | -| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 | -| [null](#requirement\_null) | >= 3.0 | -| [time](#requirement\_time) | >= 0.5 | -| [tls](#requirement\_tls) | >= 3.1 | - -## Providers - -| Name | Version | -|------|---------| -| [azapi](#provider\_azapi) | >=2.0, < 3.0 | -| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 | -| [null](#provider\_null) | >= 3.0 | -| [time](#provider\_time) | >= 0.5 | -| [tls](#provider\_tls) | >= 3.1 | - -## Modules - -No modules. - -## Resources - -| Name | Type | -|------|------| -| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | -| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | -| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | -| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | -| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | -| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | -| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | -| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource | -| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource | -| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | -| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | -| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | -| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | -| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | -| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | -| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source | -| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no | -| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no | -| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no | -| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no | -| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no | -| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no | -| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no | -| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no | -| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) |
list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
}))
| `[]` | no | -| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) |
list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
| `[]` | no | -| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no | -| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no | -| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no | -| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no | -| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no | -| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no | -| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no | -| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no | -| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no | -| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no | -| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no | -| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no | -| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no | -| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no | -| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no | -| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no | -| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no | -| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no | -| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no | -| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no | -| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no | -| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no | -| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no | -| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no | -| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**

By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no | -| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no | -| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. |
object({
id = string
subnet_id = string
})
| `null` | no | -| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no | -| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no | -| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no | -| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no | -| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no | -| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. |
object({
sgx_quote_helper_enabled = bool
})
| `null` | no | -| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no | -| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no | -| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no | -| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no | -| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 |
object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
})
|
{
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
}
| no | -| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no | -| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no | -| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no | -| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no | -| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no | -| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no | -| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. |
object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
})
| `null` | no | -| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. |
object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
})
| `null` | no | -| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no | -| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no | -| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no | -| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no | -| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no | -| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no | -| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no | -| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no | -| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no | -| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. |
object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
})
| `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | -| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no | -| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no | -| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no | -| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no | -| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no | -| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no | -| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no | -| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no | -| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no | -| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes | -| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. |
object({
id = string
})
| `null` | no | -| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. |
object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
})
| `null` | no | -| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no | -| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no | -| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no | -| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no | -| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. |
object({
identity_ids = optional(set(string))
type = string
})
| `null` | no | -| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no | -| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no | -| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no | -| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no | -| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no | -| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. |
object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
})
| `null` | no | -| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | -| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | -| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no | -| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` |
[
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no | -| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` |
[
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no | -| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` |
[
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no | -| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) |
object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
})
| `null` | no | -| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no | -| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. |
object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
})
| `null` | no | -| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no | -| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no | -| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no | -| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no | -| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no | -| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. |
object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
})
| `null` | no | -| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no | -| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) |
map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
}))
| `{}` | no | -| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no | -| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no | -| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no | -| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no | -| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no | -| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | -| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no | -| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no | -| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no | -| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | -| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no | -| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no | -| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no | -| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | -| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no | -| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no | -| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no | -| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no | -| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no | -| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes | -| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no | -| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no | -| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no | -| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no | -| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no | -| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. |
object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
})
| `null` | no | -| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no | -| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no | -| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no | -| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no | -| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no | -| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no | -| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no | -| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no | -| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no | -| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no | -| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no | -| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no | -| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | -| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) |
object({
dns_zone_ids = list(string)
})
| `null` | no | -| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. |
object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
})
| `null` | no | -| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. | -| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? | -| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | -| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | -| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | -| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. | -| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. | -| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. | -| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. | -| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. | -| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) | -| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace | -| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace | -| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace | -| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | -| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | -| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | -| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. | -| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. | -| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | -| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | -| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. | -| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). | -| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. | -| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. | -| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. | -| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? | -| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. | -| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? | -| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. | -| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. | -| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. | -| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. | -| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block | -| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. | -| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. | -| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. | -| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. | -| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? | -| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | -| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. | -| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. | -| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. | - diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md deleted file mode 100644 index 869fdfe2b..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf deleted file mode 100644 index 7f368600b..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf +++ /dev/null @@ -1,317 +0,0 @@ -moved { - from = azurerm_kubernetes_cluster_node_pool.node_pool - to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { - for_each = local.node_pools_create_before_destroy - - kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" - capacity_reservation_group_id = each.value.capacity_reservation_group_id - eviction_policy = each.value.eviction_policy - fips_enabled = each.value.fips_enabled - gpu_instance = each.value.gpu_instance - host_group_id = each.value.host_group_id - kubelet_disk_type = each.value.kubelet_disk_type - max_count = each.value.max_count - max_pods = each.value.max_pods - min_count = each.value.min_count - mode = each.value.mode - node_count = each.value.node_count - node_labels = each.value.node_labels - node_public_ip_prefix_id = each.value.node_public_ip_prefix_id - node_taints = each.value.node_taints - orchestrator_version = each.value.orchestrator_version - os_disk_size_gb = each.value.os_disk_size_gb - os_disk_type = each.value.os_disk_type - os_sku = each.value.os_sku - os_type = each.value.os_type - pod_subnet_id = try(each.value.pod_subnet.id, null) - priority = each.value.priority - proximity_placement_group_id = each.value.proximity_placement_group_id - scale_down_mode = each.value.scale_down_mode - snapshot_id = each.value.snapshot_id - spot_max_price = each.value.spot_max_price - tags = each.value.tags - ultra_ssd_enabled = each.value.ultra_ssd_enabled - vm_size = each.value.vm_size - vnet_subnet_id = try(each.value.vnet_subnet.id, null) - workload_runtime = each.value.workload_runtime - zones = each.value.zones - - dynamic "kubelet_config" { - for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] - - content { - allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls - container_log_max_line = each.value.kubelet_config.container_log_max_files - container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb - cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled - cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period - cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy - image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold - image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold - pod_max_pid = each.value.kubelet_config.pod_max_pid - topology_manager_policy = each.value.kubelet_config.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] - - content { - swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb - transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag - transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] - - content { - fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr - fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max - fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches - fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open - kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max - net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog - net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max - net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default - net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max - net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn - net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default - net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max - net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max - vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count - vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness - vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] - - content { - application_security_group_ids = each.value.node_network_profile.application_security_group_ids - node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags - - dynamic "allowed_host_ports" { - for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports - - content { - port_end = allowed_host_ports.value.port_end - port_start = allowed_host_ports.value.port_start - protocol = allowed_host_ports.value.protocol - } - } - } - } - dynamic "upgrade_settings" { - for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] - - content { - max_surge = each.value.upgrade_settings.max_surge - drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes - node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes - } - } - dynamic "windows_profile" { - for_each = each.value.windows_profile == null ? [] : ["windows_profile"] - - content { - outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - create_before_destroy = true - ignore_changes = [ - name - ] - replace_triggered_by = [ - null_resource.pool_name_keeper[each.key], - ] - - precondition { - condition = can(regex("[a-z0-9]{1,8}", each.value.name)) - error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" - } - precondition { - condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) - error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " - } - precondition { - condition = var.agents_type == "VirtualMachineScaleSets" - error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." - } - } -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { - for_each = local.node_pools_create_after_destroy - - kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - name = each.value.name - capacity_reservation_group_id = each.value.capacity_reservation_group_id - eviction_policy = each.value.eviction_policy - fips_enabled = each.value.fips_enabled - host_group_id = each.value.host_group_id - kubelet_disk_type = each.value.kubelet_disk_type - max_count = each.value.max_count - max_pods = each.value.max_pods - min_count = each.value.min_count - mode = each.value.mode - node_count = each.value.node_count - node_labels = each.value.node_labels - node_public_ip_prefix_id = each.value.node_public_ip_prefix_id - node_taints = each.value.node_taints - orchestrator_version = each.value.orchestrator_version - os_disk_size_gb = each.value.os_disk_size_gb - os_disk_type = each.value.os_disk_type - os_sku = each.value.os_sku - os_type = each.value.os_type - pod_subnet_id = try(each.value.pod_subnet.id, null) - priority = each.value.priority - proximity_placement_group_id = each.value.proximity_placement_group_id - scale_down_mode = each.value.scale_down_mode - snapshot_id = each.value.snapshot_id - spot_max_price = each.value.spot_max_price - tags = each.value.tags - ultra_ssd_enabled = each.value.ultra_ssd_enabled - vm_size = each.value.vm_size - vnet_subnet_id = try(each.value.vnet_subnet.id, null) - workload_runtime = each.value.workload_runtime - zones = each.value.zones - - dynamic "kubelet_config" { - for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] - - content { - allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls - container_log_max_line = each.value.kubelet_config.container_log_max_files - container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb - cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled - cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period - cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy - image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold - image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold - pod_max_pid = each.value.kubelet_config.pod_max_pid - topology_manager_policy = each.value.kubelet_config.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] - - content { - swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb - transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag - transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] - - content { - fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr - fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max - fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches - fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open - kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max - net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog - net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max - net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default - net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max - net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn - net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default - net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max - net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max - vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count - vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness - vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] - - content { - node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags - } - } - dynamic "upgrade_settings" { - for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] - - content { - max_surge = each.value.upgrade_settings.max_surge - drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes - node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes - } - } - dynamic "windows_profile" { - for_each = each.value.windows_profile == null ? [] : ["windows_profile"] - - content { - outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - precondition { - condition = can(regex("[a-z0-9]{1,8}", each.value.name)) - error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" - } - precondition { - condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) - error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " - } - precondition { - condition = var.agents_type == "VirtualMachineScaleSets" - error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." - } - } -} - -resource "null_resource" "pool_name_keeper" { - for_each = var.node_pools - - triggers = { - pool_name = each.value.name - } - - lifecycle { - precondition { - condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids) - error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself." - } - } -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf deleted file mode 100644 index 500f27ece..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf +++ /dev/null @@ -1,17 +0,0 @@ -# tflint-ignore-file: azurerm_resource_tag - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip - message_of_the_day = each.value.message_of_the_day -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip - message_of_the_day = each.value.message_of_the_day -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf deleted file mode 100644 index 2b69dfe13..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf +++ /dev/null @@ -1,74 +0,0 @@ -locals { - # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. - auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete - # automatic upgrades are either: - # - null - # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null - # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null - automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( - (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || - (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) - ) - cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") - # Abstract the decision whether to create an Analytics Workspace or not. - create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null - create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null - default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) - # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 - existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) - existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] - existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) - existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) - # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 - existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) - existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) - existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) - existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) - ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress - # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. - # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled - # is set to `true`. - log_analytics_workspace = var.log_analytics_workspace_enabled ? ( - # The Log Analytics Workspace should be enabled: - var.log_analytics_workspace == null ? { - # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. - # Create an `azurerm_log_analytics_workspace` resource and use that. - id = local.azurerm_log_analytics_workspace_id - name = local.azurerm_log_analytics_workspace_name - location = local.azurerm_log_analytics_workspace_location - resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name - } : { - # `log_analytics_workspace` is supplied. Let's use that. - id = var.log_analytics_workspace.id - name = var.log_analytics_workspace.name - location = var.log_analytics_workspace.location - # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 - resource_group_name = split("/", var.log_analytics_workspace.id)[4] - } - ) : null # Finally, the Log Analytics Workspace should be disabled. - node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } - node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } - private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) - query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) - subnet_ids = [for _, s in local.subnets : s.id] - subnets = merge({ for k, v in merge( - [ - for key, pool in var.node_pools : { - "${key}-vnet-subnet" : pool.vnet_subnet, - "${key}-pod-subnet" : pool.pod_subnet, - } - ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { - "vnet-subnet" : { - id = var.vnet_subnet.id - } - }) - # subnet_ids = for id in local.potential_subnet_ids : id if id != null - use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null - use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null - valid_private_dns_zone_regexs = [ - "private\\.[a-z0-9]+\\.azmk8s\\.io", - "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", - "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io", - "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io", - ] -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf deleted file mode 100644 index fe51625be..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf +++ /dev/null @@ -1,124 +0,0 @@ -resource "azurerm_log_analytics_workspace" "main" { - count = local.create_analytics_workspace ? 1 : 0 - - location = var.location - name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") - resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) - allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions - cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced - daily_quota_gb = var.log_analytics_workspace_daily_quota_gb - data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id - immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled - internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled - internet_query_enabled = var.log_analytics_workspace_internet_query_enabled - local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled - reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day - retention_in_days = var.log_retention_in_days - sku = var.log_analytics_workspace_sku - tags = var.tags - - dynamic "identity" { - for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] - - content { - type = identity.value.type - identity_ids = identity.value.identity_ids - } - } - - lifecycle { - precondition { - condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) - error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." - } - } -} - -locals { - azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) - azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) - azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) - azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) -} - -data "azurerm_log_analytics_workspace" "main" { - count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 - - name = var.log_analytics_workspace.name - resource_group_name = local.log_analytics_workspace.resource_group_name -} - -resource "azurerm_log_analytics_solution" "main" { - count = local.create_analytics_solution ? 1 : 0 - - location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) - resource_group_name = local.log_analytics_workspace.resource_group_name - solution_name = "ContainerInsights" - workspace_name = local.log_analytics_workspace.name - workspace_resource_id = local.log_analytics_workspace.id - tags = var.tags - - plan { - product = "OMSGallery/ContainerInsights" - publisher = "Microsoft" - } -} - -locals { - dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) -} - -resource "azurerm_monitor_data_collection_rule" "dcr" { - count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 - - location = local.dcr_location - name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" - resource_group_name = var.resource_group_name - description = "DCR for Azure Monitor Container Insights" - tags = var.tags - - data_flow { - destinations = [local.log_analytics_workspace.name] - streams = var.monitor_data_collection_rule_extensions_streams - } - data_flow { - destinations = [local.log_analytics_workspace.name] - streams = ["Microsoft-Syslog"] - } - destinations { - log_analytics { - name = local.log_analytics_workspace.name - workspace_resource_id = local.log_analytics_workspace.id - } - } - data_sources { - extension { - extension_name = "ContainerInsights" - name = "ContainerInsightsExtension" - streams = var.monitor_data_collection_rule_extensions_streams - extension_json = jsonencode({ - "dataCollectionSettings" : { - interval = var.data_collection_settings.data_collection_interval - namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection - namespaces = var.data_collection_settings.namespaces_for_data_collection - enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled - } - }) - } - syslog { - facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities - log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels - name = "sysLogsDataSource" - streams = ["Microsoft-Syslog"] - } - } -} - -resource "azurerm_monitor_data_collection_rule_association" "dcra" { - count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 - - target_resource_id = azurerm_kubernetes_cluster.main.id - data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id - description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." - name = "ContainerInsightsExtension" -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf deleted file mode 100644 index 0a8dc8e59..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf +++ /dev/null @@ -1,741 +0,0 @@ -moved { - from = module.ssh-key.tls_private_key.ssh - to = tls_private_key.ssh[0] -} - -resource "tls_private_key" "ssh" { - count = var.admin_username == null ? 0 : 1 - - algorithm = "RSA" - rsa_bits = 2048 -} - -resource "azurerm_kubernetes_cluster" "main" { - location = var.location - name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}" - resource_group_name = var.resource_group_name - azure_policy_enabled = var.azure_policy_enabled - cost_analysis_enabled = var.cost_analysis_enabled - disk_encryption_set_id = var.disk_encryption_set_id - dns_prefix = var.prefix - dns_prefix_private_cluster = var.dns_prefix_private_cluster - image_cleaner_enabled = var.image_cleaner_enabled - image_cleaner_interval_hours = var.image_cleaner_interval_hours - kubernetes_version = var.kubernetes_version - local_account_disabled = var.local_account_disabled - node_resource_group = var.node_resource_group - oidc_issuer_enabled = var.oidc_issuer_enabled - open_service_mesh_enabled = var.open_service_mesh_enabled - private_cluster_enabled = var.private_cluster_enabled - private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled - private_dns_zone_id = var.private_dns_zone_id - role_based_access_control_enabled = var.role_based_access_control_enabled - run_command_enabled = var.run_command_enabled - sku_tier = var.sku_tier - support_plan = var.support_plan - tags = var.tags - workload_identity_enabled = var.workload_identity_enabled - - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] - - content { - name = var.agents_pool_name - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = null - max_pods = var.agents_max_pods - min_count = null - node_count = var.agents_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = try(var.pod_subnet.id, null) - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vm_size = var.agents_size - vnet_subnet_id = try(var.vnet_subnet.id, null) - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = var.node_network_profile == null ? [] : [var.node_network_profile] - - content { - application_security_group_ids = node_network_profile.value.application_security_group_ids - node_public_ip_tags = node_network_profile.value.node_public_ip_tags - - dynamic "allowed_host_ports" { - for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports - - content { - port_end = allowed_host_ports.value.port_end - port_start = allowed_host_ports.value.port_start - protocol = allowed_host_ports.value.protocol - } - } - } - } - dynamic "upgrade_settings" { - for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = var.agents_pool_max_surge - drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes - node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes - } - } - } - } - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] - - content { - name = var.agents_pool_name - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = var.agents_max_count - max_pods = var.agents_max_pods - min_count = var.agents_min_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = try(var.pod_subnet.id, null) - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vm_size = var.agents_size - vnet_subnet_id = try(var.vnet_subnet.id, null) - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "upgrade_settings" { - for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = var.agents_pool_max_surge - drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes - node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes - } - } - } - } - dynamic "aci_connector_linux" { - for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] - - content { - subnet_name = var.aci_connector_linux_subnet_name - } - } - dynamic "api_server_access_profile" { - for_each = var.api_server_authorized_ip_ranges != null ? [ - "api_server_access_profile" - ] : [] - - content { - authorized_ip_ranges = var.api_server_authorized_ip_ranges - } - } - dynamic "auto_scaler_profile" { - for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : [] - - content { - balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups - empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max - expander = var.auto_scaler_profile_expander - max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec - max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time - max_unready_nodes = var.auto_scaler_profile_max_unready_nodes - max_unready_percentage = var.auto_scaler_profile_max_unready_percentage - new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay - scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add - scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete - scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure - scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded - scale_down_unready = var.auto_scaler_profile_scale_down_unready - scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold - scan_interval = var.auto_scaler_profile_scan_interval - skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage - skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods - } - } - dynamic "azure_active_directory_role_based_access_control" { - for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : [] - - content { - admin_group_object_ids = var.rbac_aad_admin_group_object_ids - azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled - managed = true - tenant_id = var.rbac_aad_tenant_id - } - } - dynamic "confidential_computing" { - for_each = var.confidential_computing == null ? [] : [var.confidential_computing] - - content { - sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled - } - } - dynamic "http_proxy_config" { - for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"] - - content { - http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy) - https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy) - no_proxy = var.http_proxy_config.no_proxy - trusted_ca = var.http_proxy_config.trusted_ca - } - } - dynamic "identity" { - for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : [] - - content { - type = var.identity_type - identity_ids = var.identity_ids - } - } - dynamic "ingress_application_gateway" { - for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : [] - - content { - gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null) - gateway_name = try(var.green_field_application_gateway_for_ingress.name, null) - subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null) - subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null) - } - } - dynamic "key_management_service" { - for_each = var.kms_enabled ? ["key_management_service"] : [] - - content { - key_vault_key_id = var.kms_key_vault_key_id - key_vault_network_access = var.kms_key_vault_network_access - } - } - dynamic "key_vault_secrets_provider" { - for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] - - content { - secret_rotation_enabled = var.secret_rotation_enabled - secret_rotation_interval = var.secret_rotation_interval - } - } - dynamic "kubelet_identity" { - for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] - - content { - client_id = kubelet_identity.value.client_id - object_id = kubelet_identity.value.object_id - user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id - } - } - dynamic "linux_profile" { - for_each = var.admin_username == null ? [] : ["linux_profile"] - - content { - admin_username = var.admin_username - - ssh_key { - key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "") - } - } - } - dynamic "maintenance_window" { - for_each = var.maintenance_window != null ? ["maintenance_window"] : [] - - content { - dynamic "allowed" { - for_each = var.maintenance_window.allowed - - content { - day = allowed.value.day - hours = allowed.value.hours - } - } - dynamic "not_allowed" { - for_each = var.maintenance_window.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "maintenance_window_auto_upgrade" { - for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] - - content { - duration = maintenance_window_auto_upgrade.value.duration - frequency = maintenance_window_auto_upgrade.value.frequency - interval = maintenance_window_auto_upgrade.value.interval - day_of_month = maintenance_window_auto_upgrade.value.day_of_month - day_of_week = maintenance_window_auto_upgrade.value.day_of_week - start_date = maintenance_window_auto_upgrade.value.start_date - start_time = maintenance_window_auto_upgrade.value.start_time - utc_offset = maintenance_window_auto_upgrade.value.utc_offset - week_index = maintenance_window_auto_upgrade.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "maintenance_window_node_os" { - for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] - - content { - duration = maintenance_window_node_os.value.duration - frequency = maintenance_window_node_os.value.frequency - interval = maintenance_window_node_os.value.interval - day_of_month = maintenance_window_node_os.value.day_of_month - day_of_week = maintenance_window_node_os.value.day_of_week - start_date = maintenance_window_node_os.value.start_date - start_time = maintenance_window_node_os.value.start_time - utc_offset = maintenance_window_node_os.value.utc_offset - week_index = maintenance_window_node_os.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "microsoft_defender" { - for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] - - content { - log_analytics_workspace_id = local.log_analytics_workspace.id - } - } - dynamic "monitor_metrics" { - for_each = var.monitor_metrics != null ? ["monitor_metrics"] : [] - - content { - annotations_allowed = var.monitor_metrics.annotations_allowed - labels_allowed = var.monitor_metrics.labels_allowed - } - } - network_profile { - network_plugin = var.network_plugin - dns_service_ip = var.net_profile_dns_service_ip - ebpf_data_plane = var.ebpf_data_plane - ip_versions = var.network_ip_versions - load_balancer_sku = var.load_balancer_sku - network_data_plane = var.network_data_plane - network_mode = var.network_mode - network_plugin_mode = var.network_plugin_mode - network_policy = var.network_policy - outbound_type = var.net_profile_outbound_type - pod_cidr = var.net_profile_pod_cidr - pod_cidrs = var.net_profile_pod_cidrs - service_cidr = var.net_profile_service_cidr - service_cidrs = var.net_profile_service_cidrs - - dynamic "load_balancer_profile" { - for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ - "load_balancer_profile" - ] : [] - - content { - idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes - managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count - managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count - outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids - outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids - outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated - } - } - dynamic "nat_gateway_profile" { - for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile] - - content { - idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes - managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count - } - } - } - dynamic "oms_agent" { - for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : [] - - content { - log_analytics_workspace_id = local.log_analytics_workspace.id - msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled - } - } - dynamic "service_mesh_profile" { - for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] - - content { - mode = var.service_mesh_profile.mode - external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled - internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled - } - } - dynamic "service_principal" { - for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] - - content { - client_id = var.client_id - client_secret = var.client_secret - } - } - dynamic "storage_profile" { - for_each = var.storage_profile_enabled ? ["storage_profile"] : [] - - content { - blob_driver_enabled = var.storage_profile_blob_driver_enabled - disk_driver_enabled = var.storage_profile_disk_driver_enabled - disk_driver_version = var.storage_profile_disk_driver_version - file_driver_enabled = var.storage_profile_file_driver_enabled - snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled - } - } - dynamic "web_app_routing" { - for_each = var.web_app_routing == null ? [] : ["web_app_routing"] - - content { - dns_zone_ids = var.web_app_routing.dns_zone_ids - } - } - dynamic "workload_autoscaler_profile" { - for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] - - content { - keda_enabled = workload_autoscaler_profile.value.keda_enabled - vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled - } - } - - depends_on = [ - null_resource.pool_name_keeper, - ] - - lifecycle { - ignore_changes = [ - http_application_routing_enabled, - http_proxy_config[0].no_proxy, - kubernetes_version, - # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. - name, - ] - replace_triggered_by = [ - null_resource.kubernetes_cluster_name_keeper.id - ] - - precondition { - condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "") - error_message = "Either `client_id` and `client_secret` or `identity_type` must be set." - } - precondition { - # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128 - condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0) - error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well." - } - precondition { - condition = var.identity_ids == null || var.client_id == "" - error_message = "Cannot set both `client_id` and `identity_ids`." - } - precondition { - condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium") - error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled." - } - precondition { - condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled) - error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true." - } - precondition { - condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard") - error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`" - } - precondition { - condition = local.automatic_channel_upgrade_check - error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`." - } - precondition { - condition = !(var.kms_enabled && var.identity_type != "UserAssigned") - error_message = "KMS etcd encryption doesn't work with system-assigned managed identity." - } - precondition { - condition = !var.workload_identity_enabled || var.oidc_issuer_enabled - error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity" - } - precondition { - condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure" - error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure." - } - precondition { - condition = var.network_policy != "azure" || var.network_plugin == "azure" - error_message = "network_policy must be `azure` when network_plugin is `azure`" - } - precondition { - condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure" - error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure." - } - precondition { - condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null - error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified." - } - precondition { - condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster)) - error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`." - } - precondition { - condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage" - error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`." - } - precondition { - condition = (var.kubelet_identity == null) || ( - (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0) - error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set." - } - precondition { - condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets" - error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes." - } - precondition { - condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null - error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`." - } - precondition { - condition = var.prefix == null || var.dns_prefix_private_cluster == null - error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." - } - precondition { - condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled - error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`." - } - precondition { - condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != "" - error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone" - } - precondition { - condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)])) - error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`" - } - } -} - -resource "null_resource" "kubernetes_cluster_name_keeper" { - triggers = { - name = local.cluster_name - } -} - -resource "null_resource" "kubernetes_version_keeper" { - triggers = { - version = var.kubernetes_version - } -} - -resource "time_sleep" "interval_before_cluster_update" { - count = var.interval_before_cluster_update == null ? 0 : 1 - - create_duration = var.interval_before_cluster_update - - depends_on = [ - azurerm_kubernetes_cluster.main, - ] - - lifecycle { - replace_triggered_by = [ - null_resource.kubernetes_version_keeper.id, - ] - } -} - -resource "azapi_update_resource" "aks_cluster_post_create" { - resource_id = azurerm_kubernetes_cluster.main.id - type = "Microsoft.ContainerService/managedClusters@2024-02-01" - body = { - properties = { - kubernetesVersion = var.kubernetes_version - } - } - - depends_on = [ - time_sleep.interval_before_cluster_update, - ] - - lifecycle { - ignore_changes = all - replace_triggered_by = [null_resource.kubernetes_version_keeper.id] - } -} - -resource "null_resource" "http_proxy_config_no_proxy_keeper" { - count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 - - triggers = { - http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "") - } -} - -resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" { - count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 - - resource_id = azurerm_kubernetes_cluster.main.id - type = "Microsoft.ContainerService/managedClusters@2024-02-01" - body = { - properties = { - httpProxyConfig = { - noProxy = var.http_proxy_config.no_proxy - } - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - ignore_changes = all - replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id] - } -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf deleted file mode 100644 index a1f537658..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf +++ /dev/null @@ -1,6 +0,0 @@ -# tflint-ignore-file: azurerm_resource_tag - -resource "azurerm_kubernetes_cluster" "main" { - automatic_channel_upgrade = var.automatic_channel_upgrade - node_os_channel_upgrade = var.node_os_channel_upgrade -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf deleted file mode 100644 index e3d37ce76..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf +++ /dev/null @@ -1,231 +0,0 @@ -output "aci_connector_linux" { - description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource." - value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null) -} - -output "aci_connector_linux_enabled" { - description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?" - value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0]) -} - -output "admin_client_certificate" { - description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "") -} - -output "admin_client_key" { - description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "") -} - -output "admin_cluster_ca_certificate" { - description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "") -} - -output "admin_host" { - description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "") -} - -output "admin_password" { - description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "") -} - -output "admin_username" { - description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "") -} - -output "aks_id" { - description = "The `azurerm_kubernetes_cluster`'s id." - value = azurerm_kubernetes_cluster.main.id -} - -output "aks_name" { - description = "The `azurerm_kubernetes_cluster`'s name." - value = azurerm_kubernetes_cluster.main.name -} - -output "azure_policy_enabled" { - description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)" - value = azurerm_kubernetes_cluster.main.azure_policy_enabled -} - -output "azurerm_log_analytics_workspace_id" { - description = "The id of the created Log Analytics workspace" - value = try(azurerm_log_analytics_workspace.main[0].id, null) -} - -output "azurerm_log_analytics_workspace_name" { - description = "The name of the created Log Analytics workspace" - value = try(azurerm_log_analytics_workspace.main[0].name, null) -} - -output "azurerm_log_analytics_workspace_primary_shared_key" { - description = "Specifies the workspace key of the log analytics workspace" - sensitive = true - value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null) -} - -output "client_certificate" { - description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate -} - -output "client_key" { - description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].client_key -} - -output "cluster_ca_certificate" { - description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate -} - -output "cluster_fqdn" { - description = "The FQDN of the Azure Kubernetes Managed Cluster." - value = azurerm_kubernetes_cluster.main.fqdn -} - -output "cluster_identity" { - description = "The `azurerm_kubernetes_cluster`'s `identity` block." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.identity[0], null) -} - -output "cluster_portal_fqdn" { - description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.portal_fqdn -} - -output "cluster_private_fqdn" { - description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.private_fqdn -} - -output "generated_cluster_private_ssh_key" { - description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null -} - -output "generated_cluster_public_ssh_key" { - description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)." - value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null -} - -output "host" { - description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].host -} - -output "http_application_routing_zone_name" { - description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing." - value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : "" -} - -output "ingress_application_gateway" { - description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block." - value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null) -} - -output "ingress_application_gateway_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?" - value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0]) -} - -output "key_vault_secrets_provider" { - description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block." - value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null) -} - -output "key_vault_secrets_provider_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?" - value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0]) -} - -output "kube_admin_config_raw" { - description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_admin_config_raw -} - -output "kube_config_raw" { - description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config_raw -} - -output "kubelet_identity" { - description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block." - value = azurerm_kubernetes_cluster.main.kubelet_identity -} - -output "location" { - description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created." - value = azurerm_kubernetes_cluster.main.location -} - -output "network_profile" { - description = "The `azurerm_kubernetes_cluster`'s `network_profile` block" - value = azurerm_kubernetes_cluster.main.network_profile -} - -output "node_resource_group" { - description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.node_resource_group -} - -output "node_resource_group_id" { - description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.node_resource_group_id -} - -output "oidc_issuer_url" { - description = "The OIDC issuer URL that is associated with the cluster." - value = azurerm_kubernetes_cluster.main.oidc_issuer_url -} - -output "oms_agent" { - description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument." - value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null) -} - -output "oms_agent_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?" - value = can(azurerm_kubernetes_cluster.main.oms_agent[0]) -} - -output "open_service_mesh_enabled" { - description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." - value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled -} - -output "password" { - description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].password -} - -output "username" { - description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].username -} - -output "web_app_routing_identity" { - description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object." - value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, []) -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf deleted file mode 100644 index e9601eaf0..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf +++ /dev/null @@ -1,126 +0,0 @@ -resource "azurerm_role_assignment" "acr" { - for_each = var.attached_acr_id_map - - principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id - scope = each.value - role_definition_name = "AcrPull" - skip_service_principal_aad_check = true -} - -# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity -data "azurerm_user_assigned_identity" "cluster_identity" { - count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 - - name = split("/", var.identity_ids[0])[8] - resource_group_name = split("/", var.identity_ids[0])[4] -} - -# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) -# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets -# used by the system node pool and by any additional node pools. -# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites -# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites -# https://github.com/Azure/terraform-azurerm-aks/issues/178 -resource "azurerm_role_assignment" "network_contributor" { - for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} - - principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) - scope = each.value.id - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 - error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." - } - } -} - -resource "azurerm_role_assignment" "network_contributor_on_subnet" { - for_each = var.network_contributor_role_assigned_subnet_ids - - principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) - scope = each.value - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = !var.create_role_assignment_network_contributor - error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." - } - } -} - -data "azurerm_client_config" "this" {} - -data "azurerm_virtual_network" "application_gateway_vnet" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - name = local.existing_application_gateway_subnet_vnet_name - resource_group_name = local.existing_application_gateway_subnet_resource_group_name -} - -resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = data.azurerm_virtual_network.application_gateway_vnet[0].id - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress - error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." - } - } -} - -resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) - error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." - } - } -} - -resource "azurerm_role_assignment" "existing_application_gateway_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = var.brown_field_application_gateway_for_ingress.id - role_definition_name = "Contributor" - - lifecycle { - precondition { - condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress - error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." - } - } -} - -data "azurerm_resource_group" "ingress_gw" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - name = local.existing_application_gateway_resource_group_for_ingress -} - -data "azurerm_resource_group" "aks_rg" { - count = var.create_role_assignments_for_application_gateway ? 1 : 0 - - name = var.resource_group_name -} - -resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { - count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id - role_definition_name = "Reader" -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile deleted file mode 100644 index 7f28c53a5..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile +++ /dev/null @@ -1,85 +0,0 @@ -REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" - -fmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash - -fumpt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash - -gosec: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash - -tffmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash - -tffmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash - -tfvalidatecheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash - -terrafmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash - -gofmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash - -golint: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash - -tflint: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash - -lint: golint tflint gosec - -checkovcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash - -checkovplancheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash - -fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck - -pr-check: depscheck fmtcheck lint unit-test checkovcheck - -unit-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash - -e2e-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash - -version-upgrade-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash - -terrafmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash - -pre-commit: tffmt terrafmt depsensure fmt fumpt generate - -depsensure: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash - -depscheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash - -generate: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash - -gencheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash - -yor-tag: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash - -autofix: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash - -test: fmtcheck - @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash - @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash - -build-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash - -.PHONY: fmt fmtcheck pr-check \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf deleted file mode 100644 index c819f9b89..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf +++ /dev/null @@ -1,1601 +0,0 @@ -variable "location" { - type = string - description = "Location of cluster, if not defined it will be read from the resource-group" -} - -variable "resource_group_name" { - type = string - description = "The existing resource group name to use" -} - -variable "aci_connector_linux_enabled" { - type = bool - default = false - description = "Enable Virtual Node pool" -} - -variable "aci_connector_linux_subnet_name" { - type = string - default = null - description = "(Optional) aci_connector_linux subnet name" -} - -variable "admin_username" { - type = string - default = null - description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created." -} - -variable "agents_availability_zones" { - type = list(string) - default = null - description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." -} - -variable "agents_count" { - type = number - default = 2 - description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes." -} - -variable "agents_labels" { - type = map(string) - default = {} - description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." -} - -variable "agents_max_count" { - type = number - default = null - description = "Maximum number of nodes in a pool" -} - -variable "agents_max_pods" { - type = number - default = null - description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." -} - -variable "agents_min_count" { - type = number - default = null - description = "Minimum number of nodes in a pool" -} - -variable "agents_pool_drain_timeout_in_minutes" { - type = number - default = null - description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." -} - -variable "agents_pool_kubelet_configs" { - type = list(object({ - cpu_manager_policy = optional(string) - cpu_cfs_quota_enabled = optional(bool, true) - cpu_cfs_quota_period = optional(string) - image_gc_high_threshold = optional(number) - image_gc_low_threshold = optional(number) - topology_manager_policy = optional(string) - allowed_unsafe_sysctls = optional(set(string)) - container_log_max_size_mb = optional(number) - container_log_max_line = optional(number) - pod_max_pid = optional(number) - })) - default = [] - description = <<-EOT - list(object({ - cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. - cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. - cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. - image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. - image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. - topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. - allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. - container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. - container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. - pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. - })) -EOT - nullable = false -} - -variable "agents_pool_linux_os_configs" { - type = list(object({ - sysctl_configs = optional(list(object({ - fs_aio_max_nr = optional(number) - fs_file_max = optional(number) - fs_inotify_max_user_watches = optional(number) - fs_nr_open = optional(number) - kernel_threads_max = optional(number) - net_core_netdev_max_backlog = optional(number) - net_core_optmem_max = optional(number) - net_core_rmem_default = optional(number) - net_core_rmem_max = optional(number) - net_core_somaxconn = optional(number) - net_core_wmem_default = optional(number) - net_core_wmem_max = optional(number) - net_ipv4_ip_local_port_range_min = optional(number) - net_ipv4_ip_local_port_range_max = optional(number) - net_ipv4_neigh_default_gc_thresh1 = optional(number) - net_ipv4_neigh_default_gc_thresh2 = optional(number) - net_ipv4_neigh_default_gc_thresh3 = optional(number) - net_ipv4_tcp_fin_timeout = optional(number) - net_ipv4_tcp_keepalive_intvl = optional(number) - net_ipv4_tcp_keepalive_probes = optional(number) - net_ipv4_tcp_keepalive_time = optional(number) - net_ipv4_tcp_max_syn_backlog = optional(number) - net_ipv4_tcp_max_tw_buckets = optional(number) - net_ipv4_tcp_tw_reuse = optional(bool) - net_netfilter_nf_conntrack_buckets = optional(number) - net_netfilter_nf_conntrack_max = optional(number) - vm_max_map_count = optional(number) - vm_swappiness = optional(number) - vm_vfs_cache_pressure = optional(number) - })), []) - transparent_huge_page_enabled = optional(string) - transparent_huge_page_defrag = optional(string) - swap_file_size_mb = optional(number) - })) - default = [] - description = <<-EOT - list(object({ - sysctl_configs = optional(list(object({ - fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. - fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. - fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. - fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. - kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. - net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. - net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. - net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. - net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. - net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. - net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. - vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. - vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. - vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. - })), []) - transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. - transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. - swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created. - })) -EOT - nullable = false -} - -variable "agents_pool_max_surge" { - type = string - default = "10%" - description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." -} - -variable "agents_pool_name" { - type = string - default = "nodepool" - description = "The default Azure AKS agentpool (nodepool) name." - nullable = false -} - -variable "agents_pool_node_soak_duration_in_minutes" { - type = number - default = 0 - description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." -} - -variable "agents_proximity_placement_group_id" { - type = string - default = null - description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." -} - -variable "agents_size" { - type = string - default = "Standard_D2s_v3" - description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." -} - -variable "agents_tags" { - type = map(string) - default = {} - description = "(Optional) A mapping of tags to assign to the Node Pool." -} - -variable "agents_type" { - type = string - default = "VirtualMachineScaleSets" - description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." -} - -variable "api_server_authorized_ip_ranges" { - type = set(string) - default = null - description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes." -} - -variable "attached_acr_id_map" { - type = map(string) - default = {} - description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created." - nullable = false -} - -variable "auto_scaler_profile_balance_similar_node_groups" { - type = bool - default = false - description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`." -} - -variable "auto_scaler_profile_empty_bulk_delete_max" { - type = number - default = 10 - description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`." -} - -variable "auto_scaler_profile_enabled" { - type = bool - default = false - description = "Enable configuring the auto scaler profile" - nullable = false -} - -variable "auto_scaler_profile_expander" { - type = string - default = "random" - description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`." - - validation { - condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander) - error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`." - } -} - -variable "auto_scaler_profile_max_graceful_termination_sec" { - type = string - default = "600" - description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`." -} - -variable "auto_scaler_profile_max_node_provisioning_time" { - type = string - default = "15m" - description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`." -} - -variable "auto_scaler_profile_max_unready_nodes" { - type = number - default = 3 - description = "Maximum Number of allowed unready nodes. Defaults to `3`." -} - -variable "auto_scaler_profile_max_unready_percentage" { - type = number - default = 45 - description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`." -} - -variable "auto_scaler_profile_new_pod_scale_up_delay" { - type = string - default = "10s" - description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`." -} - -variable "auto_scaler_profile_scale_down_delay_after_add" { - type = string - default = "10m" - description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`." -} - -variable "auto_scaler_profile_scale_down_delay_after_delete" { - type = string - default = null - description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`." -} - -variable "auto_scaler_profile_scale_down_delay_after_failure" { - type = string - default = "3m" - description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`." -} - -variable "auto_scaler_profile_scale_down_unneeded" { - type = string - default = "10m" - description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`." -} - -variable "auto_scaler_profile_scale_down_unready" { - type = string - default = "20m" - description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`." -} - -variable "auto_scaler_profile_scale_down_utilization_threshold" { - type = string - default = "0.5" - description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`." -} - -variable "auto_scaler_profile_scan_interval" { - type = string - default = "10s" - description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`." -} - -variable "auto_scaler_profile_skip_nodes_with_local_storage" { - type = bool - default = true - description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`." -} - -variable "auto_scaler_profile_skip_nodes_with_system_pods" { - type = bool - default = true - description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`." -} - -variable "automatic_channel_upgrade" { - type = string - default = null - description = <<-EOT - (Optional) Defines the automatic upgrade channel for the AKS cluster. - Possible values: - * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").** - * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.** - - By default, automatic upgrades are disabled. - More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster - EOT - - validation { - condition = var.automatic_channel_upgrade == null ? true : contains([ - "patch", "stable", "rapid", "node-image" - ], var.automatic_channel_upgrade) - error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`." - } -} - -variable "azure_policy_enabled" { - type = bool - default = false - description = "Enable Azure Policy Addon." -} - -variable "brown_field_application_gateway_for_ingress" { - type = object({ - id = string - subnet_id = string - }) - default = null - description = <<-EOT - [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing) - * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress. - * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. - EOT -} - -variable "client_id" { - type = string - default = "" - description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment" - nullable = false -} - -variable "client_secret" { - type = string - default = "" - description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment" - nullable = false - sensitive = true -} - -variable "cluster_log_analytics_workspace_name" { - type = string - default = null - description = "(Optional) The name of the Analytics workspace" -} - -variable "cluster_name" { - type = string - default = null - description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)" -} - -variable "cluster_name_random_suffix" { - type = bool - default = false - description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict." - nullable = false -} - -variable "confidential_computing" { - type = object({ - sgx_quote_helper_enabled = bool - }) - default = null - description = "(Optional) Enable Confidential Computing." -} - -variable "cost_analysis_enabled" { - type = bool - default = false - description = "(Optional) Enable Cost Analysis." -} - -variable "create_monitor_data_collection_rule" { - type = bool - default = true - description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`." - nullable = false -} - -variable "create_role_assignment_network_contributor" { - type = bool - default = false - description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster" - nullable = false -} - -variable "create_role_assignments_for_application_gateway" { - type = bool - default = true - description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`." - nullable = false -} - -variable "data_collection_settings" { - type = object({ - data_collection_interval = string - namespace_filtering_mode_for_data_collection = string - namespaces_for_data_collection = list(string) - container_log_v2_enabled = bool - }) - default = { - data_collection_interval = "1m" - namespace_filtering_mode_for_data_collection = "Off" - namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"] - container_log_v2_enabled = true - } - description = <<-EOT - `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m. - `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection. - `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode. - `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs. - See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 - EOT -} - -variable "default_node_pool_fips_enabled" { - type = bool - default = null - description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." -} - -variable "disk_encryption_set_id" { - type = string - default = null - description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created." -} - -variable "dns_prefix_private_cluster" { - type = string - default = null - description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created." -} - -variable "ebpf_data_plane" { - type = string - default = null - description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created." -} - -variable "enable_auto_scaling" { - type = bool - default = false - description = "Enable node pool autoscaling" -} - -variable "enable_host_encryption" { - type = bool - default = false - description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" -} - -variable "enable_node_public_ip" { - type = bool - default = false - description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." -} - -variable "green_field_application_gateway_for_ingress" { - type = object({ - name = optional(string) - subnet_cidr = optional(string) - subnet_id = optional(string) - }) - default = null - description = <<-EOT - [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new) - * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. - * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. - * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. -EOT - - validation { - condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr))) - error_message = "One of `subnet_cidr` and `subnet_id` must be specified." - } -} - -variable "http_proxy_config" { - type = object({ - http_proxy = optional(string) - https_proxy = optional(string) - no_proxy = optional(list(string)) - trusted_ca = optional(string) - }) - default = null - description = <<-EOT - optional(object({ - http_proxy = (Optional) The proxy address to be used when communicating over HTTP. - https_proxy = (Optional) The proxy address to be used when communicating over HTTPS. - no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field. - trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format. - })) - Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. -EOT - - validation { - condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)) - error_message = "`http_proxy` and `https_proxy` cannot be both empty." - } -} - -variable "identity_ids" { - type = list(string) - default = null - description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster." -} - -variable "identity_type" { - type = string - default = "SystemAssigned" - description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well." - - validation { - condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned" - error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`" - } -} - -variable "image_cleaner_enabled" { - type = bool - default = false - description = "(Optional) Specifies whether Image Cleaner is enabled." -} - -variable "image_cleaner_interval_hours" { - type = number - default = 48 - description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`." -} - -variable "interval_before_cluster_update" { - type = string - default = "30s" - description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update." -} - -variable "key_vault_secrets_provider_enabled" { - type = bool - default = false - description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver" - nullable = false -} - -variable "kms_enabled" { - type = bool - default = false - description = "(Optional) Enable Azure KeyVault Key Management Service." - nullable = false -} - -variable "kms_key_vault_key_id" { - type = string - default = null - description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier." -} - -variable "kms_key_vault_network_access" { - type = string - default = "Public" - description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`." - - validation { - condition = contains(["Private", "Public"], var.kms_key_vault_network_access) - error_message = "Possible values are `Private` and `Public`" - } -} - -variable "kubelet_identity" { - type = object({ - client_id = optional(string) - object_id = optional(string) - user_assigned_identity_id = optional(string) - }) - default = null - description = <<-EOT - - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. - - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. - - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. -EOT -} - -variable "kubernetes_version" { - type = string - default = null - description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region" -} - -variable "load_balancer_profile_enabled" { - type = bool - default = false - description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." - nullable = false -} - -variable "load_balancer_profile_idle_timeout_in_minutes" { - type = number - default = 30 - description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." -} - -variable "load_balancer_profile_managed_outbound_ip_count" { - type = number - default = null - description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" -} - -variable "load_balancer_profile_managed_outbound_ipv6_count" { - type = number - default = null - description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" -} - -variable "load_balancer_profile_outbound_ip_address_ids" { - type = set(string) - default = null - description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." -} - -variable "load_balancer_profile_outbound_ip_prefix_ids" { - type = set(string) - default = null - description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." -} - -variable "load_balancer_profile_outbound_ports_allocated" { - type = number - default = 0 - description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" -} - -variable "load_balancer_sku" { - type = string - default = "standard" - description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." - - validation { - condition = contains(["basic", "standard"], var.load_balancer_sku) - error_message = "Possible values are `basic` and `standard`" - } -} - -variable "local_account_disabled" { - type = bool - default = null - description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information." -} - -variable "log_analytics_solution" { - type = object({ - id = string - }) - default = null - description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution." - - validation { - condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != "" - error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`." - } -} - -variable "log_analytics_workspace" { - type = object({ - id = string - name = string - location = optional(string) - resource_group_name = optional(string) - }) - default = null - description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace." -} - -variable "log_analytics_workspace_allow_resource_only_permissions" { - type = bool - default = null - description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`." -} - -variable "log_analytics_workspace_cmk_for_query_forced" { - type = bool - default = null - description = "(Optional) Is Customer Managed Storage mandatory for query management?" -} - -variable "log_analytics_workspace_daily_quota_gb" { - type = number - default = null - description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted." -} - -variable "log_analytics_workspace_data_collection_rule_id" { - type = string - default = null - description = "(Optional) The ID of the Data Collection Rule to use for this workspace." -} - -variable "log_analytics_workspace_enabled" { - type = bool - default = true - description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard" - nullable = false -} - -variable "log_analytics_workspace_identity" { - type = object({ - identity_ids = optional(set(string)) - type = string - }) - default = null - description = <<-EOT - - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`. - - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. -EOT -} - -variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" { - type = bool - default = null - description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days." -} - -variable "log_analytics_workspace_internet_ingestion_enabled" { - type = bool - default = null - description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`." -} - -variable "log_analytics_workspace_internet_query_enabled" { - type = bool - default = null - description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`." -} - -variable "log_analytics_workspace_local_authentication_disabled" { - type = bool - default = null - description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`." -} - -variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" { - type = number - default = null - description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`." -} - -variable "log_analytics_workspace_resource_group_name" { - type = string - default = null - description = "(Optional) Resource group name to create azurerm_log_analytics_solution." -} - -variable "log_analytics_workspace_sku" { - type = string - default = "PerGB2018" - description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018" -} - -variable "log_retention_in_days" { - type = number - default = 30 - description = "The retention period for the logs in days" -} - -variable "maintenance_window" { - type = object({ - allowed = optional(list(object({ - day = string - hours = set(number) - })), [ - ]), - not_allowed = optional(list(object({ - end = string - start = string - })), []), - }) - default = null - description = "(Optional) Maintenance configuration of the managed cluster." -} - -variable "maintenance_window_auto_upgrade" { - type = object({ - day_of_month = optional(number) - day_of_week = optional(string) - duration = number - frequency = string - interval = number - start_date = optional(string) - start_time = optional(string) - utc_offset = optional(string) - week_index = optional(string) - not_allowed = optional(set(object({ - end = string - start = string - }))) - }) - default = null - description = <<-EOT - - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). - - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. - - `duration` - (Required) The duration of the window for maintenance to run in hours. - - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. - - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. - - `start_date` - (Optional) The date on which the maintenance window begins to take effect. - - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. - - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. - - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. - - --- - `not_allowed` block supports the following: - - `end` - (Required) The end of a time span, formatted as an RFC3339 string. - - `start` - (Required) The start of a time span, formatted as an RFC3339 string. -EOT -} - -variable "maintenance_window_node_os" { - type = object({ - day_of_month = optional(number) - day_of_week = optional(string) - duration = number - frequency = string - interval = number - start_date = optional(string) - start_time = optional(string) - utc_offset = optional(string) - week_index = optional(string) - not_allowed = optional(set(object({ - end = string - start = string - }))) - }) - default = null - description = <<-EOT - - `day_of_month` - - - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. - - `duration` - (Required) The duration of the window for maintenance to run in hours. - - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. - - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. - - `start_date` - (Optional) The date on which the maintenance window begins to take effect. - - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. - - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. - - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. - - --- - `not_allowed` block supports the following: - - `end` - (Required) The end of a time span, formatted as an RFC3339 string. - - `start` - (Required) The start of a time span, formatted as an RFC3339 string. -EOT -} - -variable "microsoft_defender_enabled" { - type = bool - default = false - description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`." - nullable = false -} - -variable "monitor_data_collection_rule_data_sources_syslog_facilities" { - type = list(string) - default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"] - description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog" -} - -variable "monitor_data_collection_rule_data_sources_syslog_levels" { - type = list(string) - default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] - description = "List of syslog levels" -} - -variable "monitor_data_collection_rule_extensions_streams" { - type = list(any) - default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"] - description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr" -} - -variable "monitor_metrics" { - type = object({ - annotations_allowed = optional(string) - labels_allowed = optional(string) - }) - default = null - description = <<-EOT - (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster - object({ - annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric." - labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric." - }) -EOT -} - -variable "msi_auth_for_monitoring_enabled" { - type = bool - default = null - description = "(Optional) Is managed identity authentication for monitoring enabled?" -} - -variable "nat_gateway_profile" { - type = object({ - idle_timeout_in_minutes = optional(number) - managed_outbound_ip_count = optional(number) - }) - default = null - description = <<-EOT - `nat_gateway_profile` block supports the following: - - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`. - - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. -EOT -} - -variable "net_profile_dns_service_ip" { - type = string - default = null - description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created." -} - -variable "net_profile_outbound_type" { - type = string - default = "loadBalancer" - description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer." -} - -variable "net_profile_pod_cidr" { - type = string - default = null - description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created." -} - -variable "net_profile_pod_cidrs" { - type = list(string) - default = null - description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." -} - -variable "net_profile_service_cidr" { - type = string - default = null - description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created." -} - -variable "net_profile_service_cidrs" { - type = list(string) - default = null - description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." -} - -variable "network_contributor_role_assigned_subnet_ids" { - type = map(string) - default = {} - description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id" - nullable = false -} - -variable "network_data_plane" { - type = string - default = null - description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created." -} - -variable "network_ip_versions" { - type = list(string) - default = null - description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created." -} - -variable "network_mode" { - type = string - default = null - description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created." -} - -variable "network_plugin" { - type = string - default = "kubenet" - description = "Network plugin to use for networking." - nullable = false -} - -variable "network_plugin_mode" { - type = string - default = null - description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created." -} - -variable "network_policy" { - type = string - default = null - description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created." -} - -variable "node_network_profile" { - type = object({ - node_public_ip_tags = optional(map(string)) - application_security_group_ids = optional(list(string)) - allowed_host_ports = optional(list(object({ - port_start = optional(number) - port_end = optional(number) - protocol = optional(string) - }))) - }) - default = null - description = <<-EOT - - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. - - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. ---- - An `allowed_host_ports` block supports the following: - - `port_start`: (Optional) Specifies the start of the port range. - - `port_end`: (Optional) Specifies the end of the port range. - - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. -EOT -} - -variable "node_os_channel_upgrade" { - type = string - default = null - description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." -} - -variable "node_pools" { - type = map(object({ - name = string - node_count = optional(number) - tags = optional(map(string)) - vm_size = string - host_group_id = optional(string) - capacity_reservation_group_id = optional(string) - custom_ca_trust_enabled = optional(bool) - enable_auto_scaling = optional(bool) - enable_host_encryption = optional(bool) - enable_node_public_ip = optional(bool) - eviction_policy = optional(string) - gpu_instance = optional(string) - kubelet_config = optional(object({ - cpu_manager_policy = optional(string) - cpu_cfs_quota_enabled = optional(bool) - cpu_cfs_quota_period = optional(string) - image_gc_high_threshold = optional(number) - image_gc_low_threshold = optional(number) - topology_manager_policy = optional(string) - allowed_unsafe_sysctls = optional(set(string)) - container_log_max_size_mb = optional(number) - container_log_max_files = optional(number) - pod_max_pid = optional(number) - })) - linux_os_config = optional(object({ - sysctl_config = optional(object({ - fs_aio_max_nr = optional(number) - fs_file_max = optional(number) - fs_inotify_max_user_watches = optional(number) - fs_nr_open = optional(number) - kernel_threads_max = optional(number) - net_core_netdev_max_backlog = optional(number) - net_core_optmem_max = optional(number) - net_core_rmem_default = optional(number) - net_core_rmem_max = optional(number) - net_core_somaxconn = optional(number) - net_core_wmem_default = optional(number) - net_core_wmem_max = optional(number) - net_ipv4_ip_local_port_range_min = optional(number) - net_ipv4_ip_local_port_range_max = optional(number) - net_ipv4_neigh_default_gc_thresh1 = optional(number) - net_ipv4_neigh_default_gc_thresh2 = optional(number) - net_ipv4_neigh_default_gc_thresh3 = optional(number) - net_ipv4_tcp_fin_timeout = optional(number) - net_ipv4_tcp_keepalive_intvl = optional(number) - net_ipv4_tcp_keepalive_probes = optional(number) - net_ipv4_tcp_keepalive_time = optional(number) - net_ipv4_tcp_max_syn_backlog = optional(number) - net_ipv4_tcp_max_tw_buckets = optional(number) - net_ipv4_tcp_tw_reuse = optional(bool) - net_netfilter_nf_conntrack_buckets = optional(number) - net_netfilter_nf_conntrack_max = optional(number) - vm_max_map_count = optional(number) - vm_swappiness = optional(number) - vm_vfs_cache_pressure = optional(number) - })) - transparent_huge_page_enabled = optional(string) - transparent_huge_page_defrag = optional(string) - swap_file_size_mb = optional(number) - })) - fips_enabled = optional(bool) - kubelet_disk_type = optional(string) - max_count = optional(number) - max_pods = optional(number) - message_of_the_day = optional(string) - mode = optional(string, "User") - min_count = optional(number) - node_network_profile = optional(object({ - node_public_ip_tags = optional(map(string)) - application_security_group_ids = optional(list(string)) - allowed_host_ports = optional(list(object({ - port_start = optional(number) - port_end = optional(number) - protocol = optional(string) - }))) - })) - node_labels = optional(map(string)) - node_public_ip_prefix_id = optional(string) - node_taints = optional(list(string)) - orchestrator_version = optional(string) - os_disk_size_gb = optional(number) - os_disk_type = optional(string, "Managed") - os_sku = optional(string) - os_type = optional(string, "Linux") - pod_subnet = optional(object({ - id = string - }), null) - priority = optional(string, "Regular") - proximity_placement_group_id = optional(string) - spot_max_price = optional(number) - scale_down_mode = optional(string, "Delete") - snapshot_id = optional(string) - ultra_ssd_enabled = optional(bool) - vnet_subnet = optional(object({ - id = string - }), null) - upgrade_settings = optional(object({ - drain_timeout_in_minutes = number - node_soak_duration_in_minutes = number - max_surge = string - })) - windows_profile = optional(object({ - outbound_nat_enabled = optional(bool, true) - })) - workload_runtime = optional(string) - zones = optional(set(string)) - create_before_destroy = optional(bool, true) - })) - default = {} - description = <<-EOT - A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: - map(object({ - name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. - node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. - tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. - vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. - host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. - capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. - custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. - enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). - enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. - enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. - eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. - gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. - kubelet_config = optional(object({ - cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. - cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. - cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. - image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. - image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. - topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. - allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. - container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. - container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. - pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. - })) - linux_os_config = optional(object({ - sysctl_config = optional(object({ - fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. - fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. - fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. - fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. - kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. - net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. - net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. - net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. - net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. - net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. - net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. - vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. - vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. - vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. - })) - transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. - transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. - swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. - })) - fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). - kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. - max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. - max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. - message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. - mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. - min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. - node_network_profile = optional(object({ - node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. - application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. - allowed_host_ports = optional(object({ - port_start = (Optional) Specifies the start of the port range. - port_end = (Optional) Specifies the end of the port range. - protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. - })) - })) - node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. - node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. - node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. - orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. - os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. - os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. - os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. - os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. - pod_subnet = optional(object({ - id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. - })) - priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. - proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). - spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. - scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. - snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. - ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. - vnet_subnet = optional(object({ - id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. - })) - upgrade_settings = optional(object({ - drain_timeout_in_minutes = number - node_soak_duration_in_minutes = number - max_surge = string - })) - windows_profile = optional(object({ - outbound_nat_enabled = optional(bool, true) - })) - workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) - zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. - create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. - })) - EOT - nullable = false -} - -variable "node_resource_group" { - type = string - default = null - description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created." -} - -variable "oidc_issuer_enabled" { - type = bool - default = false - description = "Enable or Disable the OIDC issuer URL. Defaults to false." -} - -variable "oms_agent_enabled" { - type = bool - default = true - description = "Enable OMS Agent Addon." - nullable = false -} - -variable "only_critical_addons_enabled" { - type = bool - default = null - description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." -} - -variable "open_service_mesh_enabled" { - type = bool - default = null - description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." -} - -variable "orchestrator_version" { - type = string - default = null - description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" -} - -variable "os_disk_size_gb" { - type = number - default = 50 - description = "Disk size of nodes in GBs." -} - -variable "os_disk_type" { - type = string - default = "Managed" - description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." - nullable = false -} - -variable "os_sku" { - type = string - default = null - description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." -} - -variable "pod_subnet" { - type = object({ - id = string - }) - default = null - description = <<-EOT - object({ - id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. - }) -EOT -} - -variable "prefix" { - type = string - default = "" - description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." -} - -variable "private_cluster_enabled" { - type = bool - default = false - description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet." -} - -variable "private_cluster_public_fqdn_enabled" { - type = bool - default = false - description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`." -} - -variable "private_dns_zone_id" { - type = string - default = null - description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created." -} - -variable "public_ssh_key" { - type = string - default = "" - description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created." -} - -variable "rbac_aad" { - type = bool - default = true - description = "(Optional) Is Azure Active Directory integration enabled?" - nullable = false -} - -variable "rbac_aad_admin_group_object_ids" { - type = list(string) - default = null - description = "Object ID of groups with admin access." -} - -variable "rbac_aad_azure_rbac_enabled" { - type = bool - default = null - description = "(Optional) Is Role Based Access Control based on Azure AD enabled?" -} - -variable "rbac_aad_tenant_id" { - type = string - default = null - description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used." -} - -variable "role_based_access_control_enabled" { - type = bool - default = false - description = "Enable Role Based Access Control." - nullable = false -} - -variable "run_command_enabled" { - type = bool - default = true - description = "(Optional) Whether to enable run command for the cluster or not." -} - -variable "scale_down_mode" { - type = string - default = "Delete" - description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." -} - -variable "secret_rotation_enabled" { - type = bool - default = false - description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`" - nullable = false -} - -variable "secret_rotation_interval" { - type = string - default = "2m" - description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`" - nullable = false -} - -variable "service_mesh_profile" { - type = object({ - mode = string - internal_ingress_gateway_enabled = optional(bool, true) - external_ingress_gateway_enabled = optional(bool, true) - }) - default = null - description = <<-EOT - `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. - `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. - `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. - EOT -} - -variable "sku_tier" { - type = string - default = "Free" - description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`" - - validation { - condition = contains(["Free", "Standard", "Premium"], var.sku_tier) - error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0." - } -} - -variable "snapshot_id" { - type = string - default = null - description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." -} - -variable "storage_profile_blob_driver_enabled" { - type = bool - default = false - description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`" -} - -variable "storage_profile_disk_driver_enabled" { - type = bool - default = true - description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`" -} - -variable "storage_profile_disk_driver_version" { - type = string - default = "v1" - description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`." -} - -variable "storage_profile_enabled" { - type = bool - default = false - description = "Enable storage profile" - nullable = false -} - -variable "storage_profile_file_driver_enabled" { - type = bool - default = true - description = "(Optional) Is the File CSI driver enabled? Defaults to `true`" -} - -variable "storage_profile_snapshot_controller_enabled" { - type = bool - default = true - description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`" -} - -variable "support_plan" { - type = string - default = "KubernetesOfficial" - description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." - - validation { - condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan) - error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`." - } -} - -variable "tags" { - type = map(string) - default = {} - description = "Any tags that should be present on the AKS cluster resources" -} - -variable "temporary_name_for_rotation" { - type = string - default = null - description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" -} - -variable "ultra_ssd_enabled" { - type = bool - default = false - description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." -} - -variable "vnet_subnet" { - type = object({ - id = string - }) - default = null - description = <<-EOT - object({ - id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. - }) -EOT -} - -variable "web_app_routing" { - type = object({ - dns_zone_ids = list(string) - }) - default = null - description = <<-EOT - object({ - dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list." - }) -EOT -} - -variable "workload_autoscaler_profile" { - type = object({ - keda_enabled = optional(bool, false) - vertical_pod_autoscaler_enabled = optional(bool, false) - }) - default = null - description = <<-EOT - `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads. - `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. -EOT -} - -variable "workload_identity_enabled" { - type = bool - default = false - description = "Enable or Disable Workload Identity. Defaults to false." -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf deleted file mode 100644 index 7859b9fae..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf +++ /dev/null @@ -1,26 +0,0 @@ -terraform { - required_version = ">= 1.3" - - required_providers { - azapi = { - source = "Azure/azapi" - version = ">=2.0, < 3.0" - } - azurerm = { - source = "hashicorp/azurerm" - version = ">= 3.107.0" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" - } - time = { - source = "hashicorp/time" - version = ">= 0.5" - } - tls = { - source = "hashicorp/tls" - version = ">= 3.1" - } - } -} From 7fad074589a6a5a5a2df22f5459bad0bdf4a506e Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:39:18 +0530 Subject: [PATCH 12/36] added k8s module --- .../0.2/k8scluster/.checkov_config.yaml | 30 + .../azure_aks/0.2/k8scluster/CHANGELOG-v4.md | 20 + .../azure_aks/0.2/k8scluster/CHANGELOG-v5.md | 31 + .../azure_aks/0.2/k8scluster/CHANGELOG-v6.md | 122 ++ .../azure_aks/0.2/k8scluster/CHANGELOG-v7.md | 93 + .../azure_aks/0.2/k8scluster/CHANGELOG-v8.md | 27 + .../azure_aks/0.2/k8scluster/CHANGELOG-v9.md | 76 + .../azure_aks/0.2/k8scluster/CHANGELOG.md | 5 + .../0.2/k8scluster/CODE_OF_CONDUCT.md | 5 + .../azure_aks/0.2/k8scluster/GNUmakefile | 4 + .../azure_aks/0.2/k8scluster/LICENSE | 21 + .../0.2/k8scluster/NoticeOnUpgradeTov10.0.md | 53 + .../0.2/k8scluster/NoticeOnUpgradeTov5.0.md | 93 + .../0.2/k8scluster/NoticeOnUpgradeTov6.0.md | 5 + .../0.2/k8scluster/NoticeOnUpgradeTov7.0.md | 52 + .../0.2/k8scluster/NoticeOnUpgradeTov8.0.md | 53 + .../0.2/k8scluster/NoticeOnUpgradeTov9.0.md | 9 + .../azure_aks/0.2/k8scluster/README.md | 490 +++++ .../azure_aks/0.2/k8scluster/SECURITY.md | 41 + .../0.2/k8scluster/extra_node_pool.tf | 317 ++++ .../k8scluster/extra_node_pool_override.tf | 17 + .../azure_aks/0.2/k8scluster/locals.tf | 74 + .../azure_aks/0.2/k8scluster/log_analytics.tf | 124 ++ .../azure_aks/0.2/k8scluster/main.tf | 741 ++++++++ .../azure_aks/0.2/k8scluster/main_override.tf | 6 + .../azure_aks/0.2/k8scluster/outputs.tf | 231 +++ .../0.2/k8scluster/role_assignments.tf | 126 ++ .../azure_aks/0.2/k8scluster/tfvmmakefile | 85 + .../azure_aks/0.2/k8scluster/variables.tf | 1601 +++++++++++++++++ .../azure_aks/0.2/k8scluster/versions.tf | 26 + 30 files changed, 4578 insertions(+) create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml new file mode 100644 index 000000000..b39c33402 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml @@ -0,0 +1,30 @@ +block-list-secret-scan: [] +branch: master +directory: + - ./ +download-external-modules: false +evaluate-variables: true +external-modules-download-path: .external_modules +framework: + - all +quiet: true +secrets-scan-file-type: [] +skip-check: + - CKV_GHA_3 + - CKV_AZURE_5 + - CKV_AZURE_6 + - CKV_AZURE_112 + - CKV_AZURE_115 + - CKV_AZURE_116 + - CKV_AZURE_168 + - CKV_AZURE_170 + - CKV_AZURE_139 + - CKV_AZURE_165 + - CKV_AZURE_166 + - CKV_AZURE_164 +skip-framework: + - dockerfile + - kubernetes +skip-path: + - test/vendor +summary-position: top diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md new file mode 100644 index 000000000..42433d0ea --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md @@ -0,0 +1,20 @@ +## 4.15.0 (May 06, 2022) + +ENHANCEMENTS: + +* Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146)) +* Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136)) + +BUG FIXES: + +## 4.16.0 (June 02, 2022) + +ENHANCEMENTS: + +* Added output for `addon_profile` ([#151](https://github.com/Azure/terraform-azurerm-aks/pull/151)) +* Adding Microsoft SECURITY.MD ([#167](https://github.com/Azure/terraform-azurerm-aks/pull/167)) +* Added variable `os_disk_type` for default node pools ([#169](https://github.com/Azure/terraform-azurerm-aks/pull/169)) + +BUG FIXES: + +* Trivial fix to the example in the README ([#166](https://github.com/Azure/terraform-azurerm-aks/pull/166)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md new file mode 100644 index 000000000..bda5b8027 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md @@ -0,0 +1,31 @@ +## 5.0.0 (July 14, 2022) + +ENHANCEMENTS: + +* Variable `enable_kube_dashboard` has been removed as [#181](https://github.com/Azure/terraform-azurerm-aks/issues/181) described. ([#187](https://github.com/Azure/terraform-azurerm-aks/pull/187)) +* Add new variable `location` so we can define location for the resources explicitly. ([#172](https://github.com/Azure/terraform-azurerm-aks/pull/172)) +* Bump AzureRM Provider version to 3.3.0. ([#157](https://github.com/Azure/terraform-azurerm-aks/pull/157)) +* Add new variable `private_dns_zone_id` to make argument `private_dns_zone_id` configurable. ([#174](https://github.com/Azure/terraform-azurerm-aks/pull/174)) +* Add new variable `open_service_mesh_enabled` to make argument `open_service_mesh_enabled` configurable. ([#132](https://github.com/Azure/terraform-azurerm-aks/pull/132)) +* Remove `addon_profile` in the outputs since the block has been removed from provider 3.x. Extract embedded blocks inside `addon_profile` block into standalone outputs. ([#188](https://github.com/Azure/terraform-azurerm-aks/pull/188)) +* Add `nullable = true` to some variables to simplify the conditional expressions. ([#193](https://github.com/Azure/terraform-azurerm-aks/pull/193)) +* Add new variable `oidc_issuer_enabled` to make argument `oidc_issuer_enabled` configurable. ([#205](https://github.com/Azure/terraform-azurerm-aks/pull/205) +* Add new output `oidc_issuer_url` to expose the created issuer URL from the module. [#206](https://github.com/Azure/terraform-azurerm-aks/pull/206)) +* Turn monitoring on in the test code. ([#201](https://github.com/Azure/terraform-azurerm-aks/pull/201)) +* Add new variables `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` to make arguments `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` configurable. ([#149](https://github.com/Azure/terraform-azurerm-aks/pull/149)) +* Remove `module.ssh-key` and moves resource `tls_private_key` inside the module to root directory, then outputs tls keys. ([#189](https://github.com/Azure/terraform-azurerm-aks/pull/189)) +* Add new variables `rbac_aad_azure_rbac_enabled` and `rbac_aad_tenant_id` to make arguments in `azure_active_directory_role_based_access_control` configurable. ([#199](https://github.com/Azure/terraform-azurerm-aks/pull/199)) +* Add `count` meta-argument to resource `tls_private_key` to avoid the unnecessary creation. ([#209](https://github.com/Azure/terraform-azurerm-aks/pull/209)) +* Add new variable `only_critical_addons_enabled` to make argument `only_critical_addons_enabled` in block `default_node_pool` configurable. ([#129](https://github.com/Azure/terraform-azurerm-aks/pull/129)) +* Add support for the argument `key_vault_secrets_provider`. ([#214](https://github.com/Azure/terraform-azurerm-aks/pull/214)) +* Provides a way to attach existing Log Analytics Workspace to AKS through Container Insights. ([#213](https://github.com/Azure/terraform-azurerm-aks/pull/213)) +* Add new variable `local_account_disabled` to make argument `local_account_disabled` configurable. ([#218](https://github.com/Azure/terraform-azurerm-aks/pull/218)) +* Set argument `private_cluster_enabled` to `true` in the test code. ([#219](https://github.com/Azure/terraform-azurerm-aks/pull/219)) +* Add new variable `disk_encryption_set_id` to make argument `disk_encryption_set_id` configurable. Create resource `azurerm_disk_encryption_set` in the test code to turn disk encryption on for the cluster. ([#195](https://github.com/Azure/terraform-azurerm-aks/pull/195)) +* Add new variable `api_server_authorized_ip_ranges` to make argument `api_server_authorized_ip_ranges` configurable. ([#220](https://github.com/Azure/terraform-azurerm-aks/pull/220)) +* Rename output `system_assigned_identity` to `cluster_identity` since it could be user assigned identity. Remove the index inside output's value expression. ([#197](https://github.com/Azure/terraform-azurerm-aks/pull/197)) +* Rename `var.enable_azure_policy` to `var.azure_policy_enabled` to meet the naming convention. Set `azure_policy_enabled` to `true` in test fixture code. ([#203](https://github.com/Azure/terraform-azurerm-aks/pull/203)) + +BUG FIXES: + +* Change the incorrect description of variable `tags`. ([#175](https://github.com/Azure/terraform-azurerm-aks/pull/175)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md new file mode 100644 index 000000000..ed1f9f094 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md @@ -0,0 +1,122 @@ +# Changelog + +## [Unreleased](https://github.com/Azure/terraform-azurerm-aks/tree/HEAD) + +**Merged pull requests:** + +- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) + +## [6.8.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.8.0) (2023-04-04) + +**Merged pull requests:** + +- Add support for `monitor_metrics` [\#341](https://github.com/Azure/terraform-azurerm-aks/pull/341) ([zioproto](https://github.com/zioproto)) +- Support setting os\_sku for default\_node\_pool [\#339](https://github.com/Azure/terraform-azurerm-aks/pull/339) ([mjeco](https://github.com/mjeco)) +- Upgrade required Terraform version [\#338](https://github.com/Azure/terraform-azurerm-aks/pull/338) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support `temporary_name_for_rotation` [\#334](https://github.com/Azure/terraform-azurerm-aks/pull/334) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.9.1 to 0.12.0 in /test [\#330](https://github.com/Azure/terraform-azurerm-aks/pull/330) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Fix example multiple\_node\_pools [\#328](https://github.com/Azure/terraform-azurerm-aks/pull/328) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add Network Contributor role assignments scoped to AKS nodepools subnets [\#327](https://github.com/Azure/terraform-azurerm-aks/pull/327) ([zioproto](https://github.com/zioproto)) +- Add support for extra node pools [\#323](https://github.com/Azure/terraform-azurerm-aks/pull/323) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `default_node_pool.kubelet_config` [\#322](https://github.com/Azure/terraform-azurerm-aks/pull/322) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for `public_network_access_enabled` [\#314](https://github.com/Azure/terraform-azurerm-aks/pull/314) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.7.1](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.1) (2023-03-06) + +**Merged pull requests:** + +- Fix \#316 `current client lacks permissions to read Key Rotation Policy` issue [\#317](https://github.com/Azure/terraform-azurerm-aks/pull/317) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.7.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.0) (2023-02-27) + +**Merged pull requests:** + +- Add support for `linux_os_config` [\#309](https://github.com/Azure/terraform-azurerm-aks/pull/309) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/gruntwork-io/terratest from 0.41.10 to 0.41.11 in /test [\#307](https://github.com/Azure/terraform-azurerm-aks/pull/307) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/Azure/terraform-module-test-helper from 0.8.1 to 0.9.1 in /test [\#306](https://github.com/Azure/terraform-azurerm-aks/pull/306) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump golang.org/x/net from 0.1.0 to 0.7.0 in /test [\#305](https://github.com/Azure/terraform-azurerm-aks/pull/305) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-getter from 1.6.1 to 1.7.0 in /test [\#304](https://github.com/Azure/terraform-azurerm-aks/pull/304) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-getter/v2 from 2.1.1 to 2.2.0 in /test [\#303](https://github.com/Azure/terraform-azurerm-aks/pull/303) ([dependabot[bot]](https://github.com/apps/dependabot)) +- fix: allow orchestrator\_version if auto-upgrade is 'patch' to allow default\_node\_pool upgrade [\#302](https://github.com/Azure/terraform-azurerm-aks/pull/302) ([aescrob](https://github.com/aescrob)) +- Add support for default node pool's `node_taints` [\#300](https://github.com/Azure/terraform-azurerm-aks/pull/300) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for acr attachment [\#298](https://github.com/Azure/terraform-azurerm-aks/pull/298) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `web_app_routing` [\#297](https://github.com/Azure/terraform-azurerm-aks/pull/297) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.7.1 to 0.8.1 in /test [\#295](https://github.com/Azure/terraform-azurerm-aks/pull/295) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [6.6.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.6.0) (2023-01-29) + +**Merged pull requests:** + +- Bump github.com/Azure/terraform-module-test-helper from 0.6.0 to 0.7.1 in /test [\#293](https://github.com/Azure/terraform-azurerm-aks/pull/293) ([dependabot[bot]](https://github.com/apps/dependabot)) +- identity type is either SystemAssigned or UserAssigned [\#292](https://github.com/Azure/terraform-azurerm-aks/pull/292) ([zioproto](https://github.com/zioproto)) +- Bump github.com/gruntwork-io/terratest from 0.41.7 to 0.41.9 in /test [\#290](https://github.com/Azure/terraform-azurerm-aks/pull/290) ([dependabot[bot]](https://github.com/apps/dependabot)) +- feat: Implement support for KMS arguments [\#288](https://github.com/Azure/terraform-azurerm-aks/pull/288) ([mkilchhofer](https://github.com/mkilchhofer)) +- feat: allow for configuring auto\_scaler\_profile [\#278](https://github.com/Azure/terraform-azurerm-aks/pull/278) ([DavidSpek](https://github.com/DavidSpek)) +- Azure AD RBAC enable/disable with variable rbac\_aad [\#269](https://github.com/Azure/terraform-azurerm-aks/pull/269) ([zioproto](https://github.com/zioproto)) + +## [6.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.5.0) (2023-01-03) + +**Merged pull requests:** + +- Bump github.com/Azure/terraform-module-test-helper from 0.4.0 to 0.6.0 in /test [\#287](https://github.com/Azure/terraform-azurerm-aks/pull/287) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.41.6 to 0.41.7 in /test [\#286](https://github.com/Azure/terraform-azurerm-aks/pull/286) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add support for `scale_down_mode` [\#285](https://github.com/Azure/terraform-azurerm-aks/pull/285) ([lonegunmanb](https://github.com/lonegunmanb)) +- auto-upgrade: variable orchestrator\_version to null [\#283](https://github.com/Azure/terraform-azurerm-aks/pull/283) ([zioproto](https://github.com/zioproto)) + +## [6.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.4.0) (2022-12-26) + +**Merged pull requests:** + +- feat\(storage\_profile\): add support for CSI arguments [\#282](https://github.com/Azure/terraform-azurerm-aks/pull/282) ([aescrob](https://github.com/aescrob)) + +## [6.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.3.0) (2022-12-20) + +**Merged pull requests:** + +- feat: add var automatic\_channel\_upgrade [\#281](https://github.com/Azure/terraform-azurerm-aks/pull/281) ([the-technat](https://github.com/the-technat)) +- Upgrade `terraform-module-test-helper` lib so we can get rid of override file to execute version upgrade test [\#279](https://github.com/Azure/terraform-azurerm-aks/pull/279) ([lonegunmanb](https://github.com/lonegunmanb)) +- Added support for load\_balancer\_profile [\#277](https://github.com/Azure/terraform-azurerm-aks/pull/277) ([mazilu88](https://github.com/mazilu88)) +- Add auto changelog update to this repo. [\#275](https://github.com/Azure/terraform-azurerm-aks/pull/275) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump test helper version [\#273](https://github.com/Azure/terraform-azurerm-aks/pull/273) ([lonegunmanb](https://github.com/lonegunmanb)) +- Ignore `scripts` soft link [\#272](https://github.com/Azure/terraform-azurerm-aks/pull/272) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for pod subnet [\#271](https://github.com/Azure/terraform-azurerm-aks/pull/271) ([mr-onion-2](https://github.com/mr-onion-2)) + +## [6.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.2.0) (2022-10-18) + +**Merged pull requests:** + +- Add breaking change detect CI step. [\#268](https://github.com/Azure/terraform-azurerm-aks/pull/268) ([lonegunmanb](https://github.com/lonegunmanb)) +- Workload Identity support [\#266](https://github.com/Azure/terraform-azurerm-aks/pull/266) ([nlamirault](https://github.com/nlamirault)) +- Add unit test for complex local logic [\#264](https://github.com/Azure/terraform-azurerm-aks/pull/264) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [6.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.1.0) (2022-09-30) + +**Merged pull requests:** + +- Improve placeholders for visibility in the UX [\#262](https://github.com/Azure/terraform-azurerm-aks/pull/262) ([zioproto](https://github.com/zioproto)) +- align acc test in CI pipeline with local machine by running e2e test … [\#260](https://github.com/Azure/terraform-azurerm-aks/pull/260) ([lonegunmanb](https://github.com/lonegunmanb)) +- align pr-check with local machine by using docker command instead [\#259](https://github.com/Azure/terraform-azurerm-aks/pull/259) ([lonegunmanb](https://github.com/lonegunmanb)) +- bugfix: Make the Azure Defender clause robust against a non-existent … [\#258](https://github.com/Azure/terraform-azurerm-aks/pull/258) ([gzur](https://github.com/gzur)) +- Add support for `maintenance_window` [\#256](https://github.com/Azure/terraform-azurerm-aks/pull/256) ([lonegunmanb](https://github.com/lonegunmanb)) +- Updates terraform code to meet updated code style requirement [\#253](https://github.com/Azure/terraform-azurerm-aks/pull/253) ([lonegunmanb](https://github.com/lonegunmanb)) +- Output cluster's fqdn [\#251](https://github.com/Azure/terraform-azurerm-aks/pull/251) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix example path in readme file. [\#249](https://github.com/Azure/terraform-azurerm-aks/pull/249) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update azurerm provider's restriction. [\#248](https://github.com/Azure/terraform-azurerm-aks/pull/248) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for optional Ultra disks [\#245](https://github.com/Azure/terraform-azurerm-aks/pull/245) ([digiserg](https://github.com/digiserg)) +- add aci\_connector addon [\#230](https://github.com/Azure/terraform-azurerm-aks/pull/230) ([zioproto](https://github.com/zioproto)) + +## [6.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.0.0) (2022-09-13) + +**Merged pull requests:** + +- Add outputs for created Log Analytics workspace [\#243](https://github.com/Azure/terraform-azurerm-aks/pull/243) ([zioproto](https://github.com/zioproto)) +- Prepare v6.0 and new CI pipeline. [\#241](https://github.com/Azure/terraform-azurerm-aks/pull/241) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update hashicorp/terraform-provider-azurerm to version 3.21.0 \(fixes for AKS 1.24\) [\#238](https://github.com/Azure/terraform-azurerm-aks/pull/238) ([zioproto](https://github.com/zioproto)) +- Output Kubernetes Cluster Name [\#234](https://github.com/Azure/terraform-azurerm-aks/pull/234) ([vermacodes](https://github.com/vermacodes)) +- feat\(aks\): add microsoft defender support [\#232](https://github.com/Azure/terraform-azurerm-aks/pull/232) ([eyenx](https://github.com/eyenx)) +- fix: mark outputs as sensitive [\#231](https://github.com/Azure/terraform-azurerm-aks/pull/231) ([jvelasquez](https://github.com/jvelasquez)) +- Loose the restriction on tls provider's version to include major version greater than 3.0 [\#229](https://github.com/Azure/terraform-azurerm-aks/pull/229) ([lonegunmanb](https://github.com/lonegunmanb)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md new file mode 100644 index 000000000..67b2e2375 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md @@ -0,0 +1,93 @@ +# Changelog + +## [7.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.5.0) (2023-11-14) + +**Merged pull requests:** + +- Add support for `node_os_channel_upgrade` [\#474](https://github.com/Azure/terraform-azurerm-aks/pull/474) ([lonegunmanb](https://github.com/lonegunmanb)) +- use lowercase everywhere for network plugin mode overlay [\#472](https://github.com/Azure/terraform-azurerm-aks/pull/472) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.15.1-0.20230728050712-96e8615f5515 to 0.17.0 in /test [\#469](https://github.com/Azure/terraform-azurerm-aks/pull/469) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add support for `service_mesh_profile` block [\#468](https://github.com/Azure/terraform-azurerm-aks/pull/468) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for Image Cleaner [\#466](https://github.com/Azure/terraform-azurerm-aks/pull/466) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add `fips_enabled` support for `default_node_pool` block [\#464](https://github.com/Azure/terraform-azurerm-aks/pull/464) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add default empty list for `allowed` and `not_allowed` in `var.maintenance_window` [\#463](https://github.com/Azure/terraform-azurerm-aks/pull/463) ([lonegunmanb](https://github.com/lonegunmanb)) +- fix: correct wording of the doc [\#461](https://github.com/Azure/terraform-azurerm-aks/pull/461) ([meysam81](https://github.com/meysam81)) +- add run\_command\_enabled [\#452](https://github.com/Azure/terraform-azurerm-aks/pull/452) ([zioproto](https://github.com/zioproto)) +- add msi\_auth\_for\_monitoring\_enabled [\#446](https://github.com/Azure/terraform-azurerm-aks/pull/446) ([admincasper](https://github.com/admincasper)) +- Restore readme file by stop formatting markdown table [\#445](https://github.com/Azure/terraform-azurerm-aks/pull/445) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.4.0) (2023-09-18) + +**Merged pull requests:** + +- Support for creating nodepools from snapshots [\#442](https://github.com/Azure/terraform-azurerm-aks/pull/442) ([zioproto](https://github.com/zioproto)) +- Add multiple terraform-docs configs to generate a seperated markdown document for input variables [\#441](https://github.com/Azure/terraform-azurerm-aks/pull/441) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `maintenance_window_node_os` block [\#440](https://github.com/Azure/terraform-azurerm-aks/pull/440) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.3.2](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.2) (2023-09-07) + +**Merged pull requests:** + +- Hide input variables in readme to boost the rendering [\#437](https://github.com/Azure/terraform-azurerm-aks/pull/437) ([lonegunmanb](https://github.com/lonegunmanb)) +- Improve information to upgrade to 7.0 [\#432](https://github.com/Azure/terraform-azurerm-aks/pull/432) ([zioproto](https://github.com/zioproto)) +- Add confidential computing in aks module [\#423](https://github.com/Azure/terraform-azurerm-aks/pull/423) ([jiaweitao001](https://github.com/jiaweitao001)) + +## [7.3.1](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.1) (2023-08-10) + +**Merged pull requests:** + +- Bump k8s version in exmaples to pass e2e tests [\#422](https://github.com/Azure/terraform-azurerm-aks/pull/422) ([jiaweitao001](https://github.com/jiaweitao001)) + +## [7.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.0) (2023-08-03) + +**Merged pull requests:** + +- Add `location` and `resource_group_name` for `var.log_analytics_workspace` [\#412](https://github.com/Azure/terraform-azurerm-aks/pull/412) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix \#405 incorrect role assignment resource [\#410](https://github.com/Azure/terraform-azurerm-aks/pull/410) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.2.0) (2023-07-10) + +**Merged pull requests:** + +- Bump google.golang.org/grpc from 1.51.0 to 1.53.0 in /test [\#406](https://github.com/Azure/terraform-azurerm-aks/pull/406) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Support for Azure CNI Cilium [\#398](https://github.com/Azure/terraform-azurerm-aks/pull/398) ([JitseHijlkema](https://github.com/JitseHijlkema)) +- Use `lonegunmanb/public-ip/lonegunmanb` module to retrieve public ip [\#396](https://github.com/Azure/terraform-azurerm-aks/pull/396) ([lonegunmanb](https://github.com/lonegunmanb)) +- Fix incorrect e2e test code so it could pass on our local machine [\#395](https://github.com/Azure/terraform-azurerm-aks/pull/395) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for Proximity placement group for default node pool [\#392](https://github.com/Azure/terraform-azurerm-aks/pull/392) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add upgrade\_settings block for default nodepool [\#391](https://github.com/Azure/terraform-azurerm-aks/pull/391) ([CiucurDaniel](https://github.com/CiucurDaniel)) +- Bump github.com/Azure/terraform-module-test-helper from 0.13.0 to 0.14.0 in /test [\#386](https://github.com/Azure/terraform-azurerm-aks/pull/386) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [7.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.1.0) (2023-06-07) + +**Merged pull requests:** + +- Deprecate `api_server_authorized_ip_ranges` by using `api_server_access_profile` block [\#381](https://github.com/Azure/terraform-azurerm-aks/pull/381) ([lonegunmanb](https://github.com/lonegunmanb)) +- `oidc_issuer_enabled` must be set to `true` to enable Azure AD Worklo… [\#377](https://github.com/Azure/terraform-azurerm-aks/pull/377) ([zioproto](https://github.com/zioproto)) +- assign network contributor role to control plane identity [\#369](https://github.com/Azure/terraform-azurerm-aks/pull/369) ([zioproto](https://github.com/zioproto)) +- Add tracing tag toggle variables [\#362](https://github.com/Azure/terraform-azurerm-aks/pull/362) ([lonegunmanb](https://github.com/lonegunmanb)) +- Support for Azure CNI Overlay [\#354](https://github.com/Azure/terraform-azurerm-aks/pull/354) ([zioproto](https://github.com/zioproto)) +- Make `var.prefix` optional [\#382](https://github.com/Azure/terraform-azurerm-aks/pull/382) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove constraint on `authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#375](https://github.com/Azure/terraform-azurerm-aks/pull/375) ([lonegunmanb](https://github.com/lonegunmanb)) +- Filter null value out from `local.subnet_ids` [\#374](https://github.com/Azure/terraform-azurerm-aks/pull/374) ([lonegunmanb](https://github.com/lonegunmanb)) +- User `location` returned from data source for log analytics solution. [\#349](https://github.com/Azure/terraform-azurerm-aks/pull/349) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [7.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.0.0) (2023-05-18) + +**Merged pull requests:** + +- Upgrade notice for v7.0 [\#367](https://github.com/Azure/terraform-azurerm-aks/pull/367) ([lonegunmanb](https://github.com/lonegunmanb)) +- Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#361](https://github.com/Azure/terraform-azurerm-aks/pull/361) ([lonegunmanb](https://github.com/lonegunmanb)) +- feat!: add create\_before\_destroy=true to node pools [\#357](https://github.com/Azure/terraform-azurerm-aks/pull/357) ([the-technat](https://github.com/the-technat)) +- Move breaking change details into separate docs. add notice on v7.0.0 [\#355](https://github.com/Azure/terraform-azurerm-aks/pull/355) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.12.0 to 0.13.0 in /test [\#352](https://github.com/Azure/terraform-azurerm-aks/pull/352) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Trivial: fix typo ingration -\> integration [\#351](https://github.com/Azure/terraform-azurerm-aks/pull/351) ([zioproto](https://github.com/zioproto)) +- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) +- \[Breaking\] Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard`. [\#346](https://github.com/Azure/terraform-azurerm-aks/pull/346) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] - Ignore changes on `kubernetes_version` from outside of Terraform [\#336](https://github.com/Azure/terraform-azurerm-aks/pull/336) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] - Fix \#315 by amending missing `linux_os_config` block [\#320](https://github.com/Azure/terraform-azurerm-aks/pull/320) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] Wrap `log_analytics_solution_id` to an object to fix \#263. [\#265](https://github.com/Azure/terraform-azurerm-aks/pull/265) ([lonegunmanb](https://github.com/lonegunmanb)) +- \[Breaking\] Remove unused net\_profile\_docker\_bridge\_cidr [\#222](https://github.com/Azure/terraform-azurerm-aks/pull/222) ([zioproto](https://github.com/zioproto)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md new file mode 100644 index 000000000..2c035d842 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md @@ -0,0 +1,27 @@ +# Changelog + +**Merged pull requests:** + +- Add support for nodepool's `gpu_instance` [\#519](https://github.com/Azure/terraform-azurerm-aks/pull/519) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump github.com/Azure/terraform-module-test-helper from 0.17.0 to 0.18.0 in /test [\#516](https://github.com/Azure/terraform-azurerm-aks/pull/516) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add upgrade notice document [\#513](https://github.com/Azure/terraform-azurerm-aks/pull/513) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add retry when the ingress is not ready [\#510](https://github.com/Azure/terraform-azurerm-aks/pull/510) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `support_plan` and `Premium` sku tier. [\#508](https://github.com/Azure/terraform-azurerm-aks/pull/508) ([ecklm](https://github.com/ecklm)) +- Refactor code, split monolith tf config into multiple files [\#494](https://github.com/Azure/terraform-azurerm-aks/pull/494) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove `var.http_application_routing_enabled` [\#493](https://github.com/Azure/terraform-azurerm-aks/pull/493) ([lonegunmanb](https://github.com/lonegunmanb)) +- feat\(`http_proxy_config`\): Add `http_proxy_config` [\#492](https://github.com/Azure/terraform-azurerm-aks/pull/492) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove `public_network_access_enabled` entirely [\#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) ([lonegunmanb](https://github.com/lonegunmanb)) +- Ignore deprecated attribute `public_network_access_enabled` [\#485](https://github.com/Azure/terraform-azurerm-aks/pull/485) ([ishuar](https://github.com/ishuar)) +- feat: enable precondition on `default_node_pool` for autoscaling with node pool type [\#484](https://github.com/Azure/terraform-azurerm-aks/pull/484) ([ishuar](https://github.com/ishuar)) +- Add web\_app\_routing\_identity block to outputs [\#481](https://github.com/Azure/terraform-azurerm-aks/pull/481) ([bonddim](https://github.com/bonddim)) +- Add support for `kubelet_identity` nested block [\#479](https://github.com/Azure/terraform-azurerm-aks/pull/479) ([lonegunmanb](https://github.com/lonegunmanb)) +- Prepare for v8.0 [\#462](https://github.com/Azure/terraform-azurerm-aks/pull/462) ([lonegunmanb](https://github.com/lonegunmanb)) +- Remove precondition on extra node pool which prevent using windows pool with overlay [\#512](https://github.com/Azure/terraform-azurerm-aks/pull/512) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for `maintenance_window_auto_upgrade` [\#505](https://github.com/Azure/terraform-azurerm-aks/pull/505) ([skolobov](https://github.com/skolobov)) +- Let the users decide whether adding a random suffix in cluster and pool's name or not. [\#496](https://github.com/Azure/terraform-azurerm-aks/pull/496) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add role assignments for ingress application gateway and corresponding example [\#426](https://github.com/Azure/terraform-azurerm-aks/pull/426) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add support for workload\_autoscaler\_profile settings [\#404](https://github.com/Azure/terraform-azurerm-aks/pull/404) ([bonddim](https://github.com/bonddim)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md new file mode 100644 index 000000000..05e2d7539 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md @@ -0,0 +1,76 @@ +# Changelog + +## [9.4.1](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.1) (2025-02-05) + +**Merged pull requests:** + +- Revert changes of `9.4.0` [\#635](https://github.com/Azure/terraform-azurerm-aks/pull/635) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [9.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.0) (2025-02-05) + +**Merged pull requests:** + +- Bump azapi provider to \>=2.0, \< 3.0 [\#632](https://github.com/Azure/terraform-azurerm-aks/pull/632) ([zioproto](https://github.com/zioproto)) +- Dependabot 624 626 [\#627](https://github.com/Azure/terraform-azurerm-aks/pull/627) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.28.0 to 0.30.0 in /test [\#626](https://github.com/Azure/terraform-azurerm-aks/pull/626) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.48.0 to 0.48.1 in /test [\#624](https://github.com/Azure/terraform-azurerm-aks/pull/624) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Dependabot changes from PR 609 619 620 [\#621](https://github.com/Azure/terraform-azurerm-aks/pull/621) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.27.0 to 0.28.0 in /test [\#620](https://github.com/Azure/terraform-azurerm-aks/pull/620) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.47.2 to 0.48.0 in /test [\#619](https://github.com/Azure/terraform-azurerm-aks/pull/619) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#616](https://github.com/Azure/terraform-azurerm-aks/pull/616) ([lonegunmanb](https://github.com/lonegunmanb)) +- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#615](https://github.com/Azure/terraform-azurerm-aks/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /test [\#609](https://github.com/Azure/terraform-azurerm-aks/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [9.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.3.0) (2024-12-11) + +**Merged pull requests:** + +- Support of oms\_agent\_enabled add-on [\#613](https://github.com/Azure/terraform-azurerm-aks/pull/613) ([lonegunmanb](https://github.com/lonegunmanb)) +- Implement node\_network\_profile for default node pool [\#598](https://github.com/Azure/terraform-azurerm-aks/pull/598) ([zioproto](https://github.com/zioproto)) +- Bump examples to AKS 1.30 [\#595](https://github.com/Azure/terraform-azurerm-aks/pull/595) ([zioproto](https://github.com/zioproto)) +- Add `v4` sub-folder so this module could run with AzureRM provider both `v3` and `v4`. [\#594](https://github.com/Azure/terraform-azurerm-aks/pull/594) ([lonegunmanb](https://github.com/lonegunmanb)) + +## [9.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.2.0) (2024-11-07) + +**Merged pull requests:** + +- Make the Azure Key Vault public because private Key Vault requires preview API [\#599](https://github.com/Azure/terraform-azurerm-aks/pull/599) ([zioproto](https://github.com/zioproto)) +- Bump github.com/Azure/terraform-module-test-helper from 0.25.0 to 0.26.0 in /test [\#593](https://github.com/Azure/terraform-azurerm-aks/pull/593) ([lonegunmanb](https://github.com/lonegunmanb)) +- Use oidc as authentication method [\#592](https://github.com/Azure/terraform-azurerm-aks/pull/592) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update README.md [\#589](https://github.com/Azure/terraform-azurerm-aks/pull/589) ([shailwx](https://github.com/shailwx)) +- Add `cost_analysis_enabled` option [\#583](https://github.com/Azure/terraform-azurerm-aks/pull/583) ([artificial-aidan](https://github.com/artificial-aidan)) +- Bump github.com/Azure/terraform-module-test-helper from 0.24.0 to 0.25.0 in /test [\#581](https://github.com/Azure/terraform-azurerm-aks/pull/581) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/gruntwork-io/terratest from 0.46.15 to 0.47.0 in /test [\#579](https://github.com/Azure/terraform-azurerm-aks/pull/579) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/Azure/terraform-module-test-helper from 0.22.0 to 0.24.0 in /test [\#574](https://github.com/Azure/terraform-azurerm-aks/pull/574) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 in /test [\#562](https://github.com/Azure/terraform-azurerm-aks/pull/562) ([dependabot[bot]](https://github.com/apps/dependabot)) + +## [9.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.1.0) (2024-07-04) + +**Merged pull requests:** + +- Downgrade next major version back to v9 [\#577](https://github.com/Azure/terraform-azurerm-aks/pull/577) ([lonegunmanb](https://github.com/lonegunmanb)) +- Restore devcontainer [\#576](https://github.com/Azure/terraform-azurerm-aks/pull/576) ([zioproto](https://github.com/zioproto)) +- set drainTimeoutInMinutes default value to null [\#575](https://github.com/Azure/terraform-azurerm-aks/pull/575) ([zioproto](https://github.com/zioproto)) +- fix README.md format [\#570](https://github.com/Azure/terraform-azurerm-aks/pull/570) ([joaoestrela](https://github.com/joaoestrela)) +- Bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /test [\#569](https://github.com/Azure/terraform-azurerm-aks/pull/569) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Start new Changelog file for v10 [\#567](https://github.com/Azure/terraform-azurerm-aks/pull/567) ([zioproto](https://github.com/zioproto)) +- fixed inaccurate variable descriptions for azure cni in overlay mode [\#566](https://github.com/Azure/terraform-azurerm-aks/pull/566) ([Xelef2000](https://github.com/Xelef2000)) +- add drain\_timeout\_in\_minutes and node\_soak\_duration\_in\_minutes [\#564](https://github.com/Azure/terraform-azurerm-aks/pull/564) ([zioproto](https://github.com/zioproto)) + +## [9.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.0.0) (2024-06-07) + +**Merged pull requests:** + +- Compromise on e2e tests involving ingress, since it's not stable [\#558](https://github.com/Azure/terraform-azurerm-aks/pull/558) ([lonegunmanb](https://github.com/lonegunmanb)) +- Add weekly-codeql action [\#555](https://github.com/Azure/terraform-azurerm-aks/pull/555) ([lonegunmanb](https://github.com/lonegunmanb)) +- Change default value for `var.agents_pool_max_surge` to 10% [\#554](https://github.com/Azure/terraform-azurerm-aks/pull/554) ([lonegunmanb](https://github.com/lonegunmanb)) +- Update Microsoft.ContainerService managedClusters API version to 2024-02-01 [\#552](https://github.com/Azure/terraform-azurerm-aks/pull/552) ([olofmattsson-inriver](https://github.com/olofmattsson-inriver)) +- Bump github.com/Azure/terraform-module-test-helper from 0.19.0 to 0.22.0 in /test [\#549](https://github.com/Azure/terraform-azurerm-aks/pull/549) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Amending log analytics attributes [\#548](https://github.com/Azure/terraform-azurerm-aks/pull/548) ([lonegunmanb](https://github.com/lonegunmanb)) +- bump k8s version for example since 1.26 has been deprecated [\#540](https://github.com/Azure/terraform-azurerm-aks/pull/540) ([lonegunmanb](https://github.com/lonegunmanb)) +- fix\(typo\): typo in output variable [\#537](https://github.com/Azure/terraform-azurerm-aks/pull/537) ([mbaykara](https://github.com/mbaykara)) +- Bump github.com/Azure/terraform-module-test-helper from 0.18.0 to 0.19.0 in /test [\#521](https://github.com/Azure/terraform-azurerm-aks/pull/521) ([dependabot[bot]](https://github.com/apps/dependabot)) + + + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md new file mode 100644 index 000000000..9996f9928 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## Important Notice + +* fix: add back `private_cluster_enabled` variable by @tobiasehlert [#667](https://github.com/Azure/terraform-azurerm-aks/pull/667) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..af8b0207d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. + +Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile new file mode 100644 index 000000000..3db7ccd9d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile @@ -0,0 +1,4 @@ +SHELL := /bin/bash + +$(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) +-include tfvmmakefile \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE new file mode 100644 index 000000000..21071075c --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md new file mode 100644 index 000000000..f611a6a75 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md @@ -0,0 +1,53 @@ +# Notice on Upgrade to v10.x + +## AzAPI provider version constraint has been updated to `>=2.0, < 3.0`. + +## [`var.web_app_routing` type change](https://github.com/Azure/terraform-azurerm-aks/pull/606) + +`var.web_app_routing.dns_zone_id` has been replaced by `var.web_app_routing.dns_zone_ids`. The new variable is a list of DNS zone IDs. This change allows for the specification of multiple DNS zones for routing. + +## [`data.azurerm_resource_group.main` in this module has been removed, `var.location` is a required variable now.](https://github.com/Azure/terraform-azurerm-aks/pull/644) + +## [Create log analytics workspace would also create required monitor data collection rule now](https://github.com/Azure/terraform-azurerm-aks/pull/623) + +The changes in this pull request introduce support for a Data Collection Rule (DCR) for Azure Monitor Container Insights in the Terraform module. + +## [`CHANGELOG.md` file is no longer maintained, please read release note in GitHub repository instead](https://github.com/Azure/terraform-azurerm-aks/pull/651) + +[New release notes](https://github.com/Azure/terraform-azurerm-aks/releases). + +## [The following variables have been removed:](https://github.com/Azure/terraform-azurerm-aks/pull/652) + +* `agents_taints` +* `api_server_subnet_id` +* `private_cluster_enabled` +* `rbac_aad_client_app_id` +* `rbac_aad_managed` +* `rbac_aad_server_app_id` +* `rbac_aad_server_app_secret` + +## `var.pod_subnet_id` has been replaced by `var.pod_subnet.id` + +## `var.vnet_subnet_id` has been replaced by `var.vnet_subnet.id` + +## `var.node_pools.pod_subnet_id` has been replaced by `var.node_pools.pod_subnet.id` + +## `var.node_pools.vnet_subnet_id` has been replaced by `var.node_pools.vnet_subnet.id` + +## `azurerm_role_assignment.network_contributor` will be re-created + +Since `for_each`'s target has been changed from a set of string to a map of object to avoid "Known after apply" values in iterator, we have to re-create the `azurerm_role_assignment.network_contributor` resource. This will cause the role assignment to be removed and re-added, which may result in a brief period of time where the role assignment is not present. + +## When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself. + +## `var.client_secret` now is `sensitive` + +## New interval between cluster creation and kubernetes version upgrade + +New variable `interval_before_cluster_update` was added. Sometimes when we tried to update cluster's kubernetes version after cluster creation, we got the error `Operation is not allowed because there's an in progress update managed cluster operation on the managed cluster started`. A `time_sleep` was added to avoid such potential conflict. You can set this variable to `null` to bypass the sleep. + +## @zioproto is no longer a maintainer of this module + +For personal reasons, @zioproto is no longer a maintainer of this module. I want to express my sincere gratitude for his contributions and support over the years. His dedication and hard work are invaluable to this module. + +THANK YOU @zioproto ! diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md new file mode 100644 index 000000000..4f31d8157 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md @@ -0,0 +1,93 @@ +# Notice on Upgrade to v5.x + +V5.0.0 is a major version upgrade and a lot of breaking changes have been introduced. Extreme caution must be taken during the upgrade to avoid resource replacement and downtime by accident. + +Running the `terraform plan` first to inspect the plan is strongly advised. + +## Terraform and terraform-provider-azurerm version restrictions + +Now Terraform core's lowest version is v1.2.0 and terraform-provider-azurerm's lowest version is v3.21.0. + +## variable `user_assigned_identity_id` has been renamed. + +variable `user_assigned_identity_id` has been renamed to `identity_ids` and it's type has been changed from `string` to `list(string)`. + +## `addon_profile` in outputs is no longer available. + +It has been broken into the following new outputs: + +* `aci_connector_linux` +* `aci_connector_linux_enabled` +* `azure_policy_enabled` +* `http_application_routing_enabled` +* `ingress_application_gateway` +* `ingress_application_gateway_enabled` +* `key_vault_secrets_provider` +* `key_vault_secrets_provider_enabled` +* `oms_agent` +* `oms_agent_enabled` +* `open_service_mesh_enabled` + +## The following variables have been renamed from `enable_xxx` to `xxx_enabled` + +* `enable_azure_policy` has been renamed to `azure_policy_enabled` +* `enable_http_application_routing` has been renamed to `http_application_routing_enabled` +* `enable_ingress_application_gateway` has been renamed to `ingress_application_gateway_enabled` +* `enable_log_analytics_workspace` has been renamed to `log_analytics_workspace_enabled` +* `enable_open_service_mesh` has been renamed to `open_service_mesh_enabled` +* `enable_role_based_access_control` has been renamed to `role_based_access_control_enabled` + +## `nullable = true` has been added to the following variables so setting them to `null` explicitly will use the default value + +* `log_analytics_workspace_enable` +* `os_disk_type` +* `private_cluster_enabled` +* `rbac_aad_managed` +* `rbac_aad_admin_group_object_ids` +* `network_policy` +* `enable_node_public_ip` + +## `var.admin_username`'s default value has been removed + +In v4.x `var.admin_username` has a default value `azureuser` and has been removed in V5.0.0. Since the `admin_username` argument in `linux_profile` block is a ForceNew argument, any value change to this argument will trigger a Kubernetes cluster replacement **SO THE EXTREME CAUTION MUST BE TAKEN**. The module's callers must set `var.admin_username` to `azureuser` explicitly if they didn't set it before. + +## `module.ssh-key` has been removed + +The file named `private_ssh_key` which contains the tls private key will be deleted since the `local_file` resource has been removed. Now the private key is exported via `generated_cluster_private_ssh_key` in output and the corresponding public key is exported via `generated_cluster_public_ssh_key` in output. + +A `moved` block has been added to relocate the existing `tls_private_key` resource to the new address. If the `var.admin_username` is not `null`, no action is needed. + +Resource `tls_private_key`'s creation now is conditional. Users may see the destruction of existing `tls_private_key` in the generated plan if `var.admin_username` is `null`. + +## `system_assigned_identity` in the output has been renamed to `cluster_identity` + +The `system_assigned_identity` was: + +```hcl +output "system_assigned_identity" { + value = azurerm_kubernetes_cluster.main.identity +} +``` + +Now it has been renamed to `cluster_identity`, and the block has been changed to: + +```hcl +output "cluster_identity" { + description = "The `azurerm_kubernetes_cluster`'s `identity` block." + value = try(azurerm_kubernetes_cluster.main.identity[0], null) +} +``` + +The callers who used to read the cluster's identity block need to remove the index in their expression, from `module.aks.system_assigned_identity[0]` to `module.aks.cluster_identity`. + +## The following outputs are now sensitive. All outputs referenced them must be declared as sensitive too + +* `client_certificate` +* `client_key` +* `cluster_ca_certificate` +* `generated_cluster_private_ssh_key` +* `host` +* `kube_admin_config_raw` +* `kube_config_raw` +* `password` +* `username` diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md new file mode 100644 index 000000000..e75b87ea3 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md @@ -0,0 +1,5 @@ +# Notice on Upgrade to v6.x + +We've added a CI pipeline for this module to speed up our code review and to enforce a high code quality standard, if you want to contribute by submitting a pull request, please read [Pre-Commit & Pr-Check & Test](#Pre-Commit--Pr-Check--Test) section, or your pull request might be rejected by CI pipeline. + +A pull request will be reviewed when it has passed Pre Pull Request Check in the pipeline, and will be merged when it has passed the acceptance tests. Once the ci Pipeline failed, please read the pipeline's output, thanks for your cooperation. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md new file mode 100644 index 000000000..e3c1f41a5 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md @@ -0,0 +1,52 @@ +# Notice on Upgrade to v7.x + +## Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard` + +AzureRM's minimum version is `>= 3.51, < 4.0` now. +[`var.sku_tier` cannot be set to `Paid` anymore](https://github.com/hashicorp/terraform-provider-azurerm/issues/20887), now possible values are `Free` and `Standard`. + +## Ignore changes on `kubernetes_version` from outside of Terraform + +Related issue: #335 + +Two new resources would be created when upgrading from v6.x to v7.x: + +* `null_resource.kubernetes_version_keeper` +* `azapi_update_resource.aks_cluster_post_create` + +`azurerm_kubernetes_cluster.main` resource would ignore change on `kubernetes_version` from outside of Terraform in case AKS cluster's patch version has been upgraded automatically. +When you change `var.kubernetes_version`'s value, it would trigger a re-creation of `null_resource.kubernetes_version_keeper` and re-creation of `azapi_update_resource.aks_cluster_post_create`, which would upgrade the AKS cluster's `kubernetes_version`. + +`azapi` provider is required to be configured in your Terraform configuration. + +## Fix #315 by amending missing `linux_os_config` block + +In v6.0, `default_node_pool.linux_os_config` block won't be added to `azurerm_kubernetes_cluster.main` resource when `var.enable_auto_scaling` is `true`. This bug has been fixed in v7.0.0 so you might see a diff on `azurerm_kubernetes_cluster.main` resource. + +## Wrap `log_analytics_solution_id` to an object to fix #263. + +`var.log_analytics_solution_id` is now an object with `id` attribute. This change is to fix #263. + +## Remove unused net_profile_docker_bridge_cidr + +`var.net_profile_docker_bridge_cidr` has been [deprecated](https://github.com/hashicorp/terraform-provider-azurerm/issues/18119) and is not used in the module anymore and has been removed. + +## Add `create_before_destroy=true` to node pools #357 + +Now `azurerm_kubernetes_cluster_node_pool.node_pool` resource has `create_before_destroy=true` to avoid downtime when upgrading node pools. Users must be aware that there would be a "random" suffix added into pool's name, this suffix's length is `4`, so your previous node pool's name `nodepool1` would be `nodepool1xxxx`. This suffix is calculated from node pool's config, the same configuration would lead to the same suffix. You might need to shorten your node pool's name because of this new added suffix. + +To enable this feature, we've also added new `null_resource.pool_name_keeper` to track node pool's name in case you've changed the name. + +## Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` #361 + +As the [document](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#public_network_access_enabled) described: + +>When `public_network_access_enabled` is set to true, `0.0.0.0/32` must be added to `authorized_ip_ranges` in the `api_server_access_profile block`. + +We'll add `api_server_access_profile` nested block after AzureRM provider's v4.0, but starting from v7.0 we'll enforce such pre-condition check. + +## Add `depends_on` to `azurerm_kubernetes_cluster_node_pool` resources #418 + +If you have `azurerm_kubernetes_cluster_node_pool` resources not managed with this module (`var.nodepools`) you +must have an explicit `depends_on` on those resources to avoid conflicting nodepools operations. +See issue #418 for more details. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md new file mode 100644 index 000000000..96077ba1a --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md @@ -0,0 +1,53 @@ +# Notice on Upgrade to v8.x + +## New variable `cluster_name_random_suffix` + +1. A new variable `cluster_name_random_suffix` is added. This allows users to decide whether they want to add a random suffix to a cluster's name. This is particularly useful when Terraform needs to recreate a resource that cannot be updated in-place, as it avoids naming conflicts. Because of [#357](https://github.com/Azure/terraform-azurerm-aks/pull/357), now the `azurerm_kubernetes_cluster` resource is `create_before_destroy = true` now, we cannot turn this feature off. If you want to recreate this cluster by one apply without any trouble, please turn this random naming suffix on to avoid the naming conflict. + +2. The `create_before_destroy` attribute is added to the `node_pools` variable as an object field. This attribute determines whether a new node pool should be created before the old one is destroyed during updates. By default, it is set to `true`. + +3. The naming of extra node pools has been updated. Now, a random UUID is used as the seed for the random suffix in the name of the node pool, instead of the JSON-encoded value of the node pool. **This naming suffix only apply for extra node pools that create before destroy.** + +You're recommended to set `var.cluster_name_random_suffix` to `true` explicitly, and you'll see a random suffix in your cluster's name. If you don't like this suffix, please remember now a new cluster with the same name would be created before the old one has been deleted. If you do want to recreate the cluster, please run `terraform destroy` first. + +## Remove `var.http_application_routing_enabled` + +According to the [document](https://learn.microsoft.com/en-us/azure/aks/http-application-routing), HTTP application routing add-on for AKS has been retired so we have to remove this feature from this module. + +1. The variable `http_application_routing_enabled` has been removed from the module. This variable was previously used to enable HTTP Application Routing Addon. + +2. The `http_application_routing_enabled` output has been removed from `outputs.tf`. This output was previously used to display whether HTTP Application Routing was enabled. + +3. The `http_application_routing_enabled` attribute has been removed from the `azurerm_kubernetes_cluster` resource in `main.tf`. This attribute was previously used to enable HTTP Application Routing for the Kubernetes cluster. + +4. The `http_application_routing_enabled` attribute has been added to the `ignore_changes` lifecycle block of the `azurerm_kubernetes_cluster` resource in `main.tf`. This means changes to this attribute will not trigger the resource to be updated. + +These changes mean that users of this module will no longer be able to enable HTTP Application Routing through this module. + +The new feature for the Ingress in AKS is [Managed NGINX ingress with the application routing add-on](https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default), you can enable this with `var.web_app_routing`. + +Users who were using this feature, please read this [Migrate document](https://learn.microsoft.com/en-us/azure/aks/app-routing-migration). + +## Remove `public_network_access_enabled` entirely + +According to this [announcement](https://github.com/Azure/AKS/issues/3690), now public network access for AKS is no longer supported. + +The primary impact [#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) is the complete removal of the `public_network_access_enabled` variable from the module. + +1. The `public_network_access_enabled` variable has been removed from the `variables.tf` file. This means that the module no longer supports the configuration of public network access at the Kubernetes cluster level. + +2. The `public_network_access_enabled` variable has also been removed from the `main.tf` file and all example files (`application_gateway_ingress/main.tf`, `multiple_node_pools/main.tf`, `named_cluster/main.tf`, `startup/main.tf`, `with_acr/main.tf`, `without_monitor/main.tf`). This indicates that the module no longer uses this variable in the creation of the Azure Kubernetes Service (AKS) resource. + +3. The `public_network_access_enabled` has been added into `azurerm_kubernetes_cluster`'s `ignore_changes` list. Any change to this attribute won't trigger update. + +## Add role assignments for ingress application gateway + +The `variables.tf` file is updated with new variables related to the application gateway for ingress, including `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress`. + +The `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress` variables are used to configure the Application Gateway Ingress for the Azure Kubernetes Service (AKS) in the Terraform module. + +1. `brown_field_application_gateway_for_ingress`: This variable is used when you want to use an existing Application Gateway as the ingress for the AKS cluster. It is an object that contains the ID of the Application Gateway (`id`) and the ID of the Subnet (`subnet_id`) which the Application Gateway is connected to. If this variable is set, the module will not create a new Application Gateway and will use the existing one instead. + +2. `green_field_application_gateway_for_ingress`: This variable is used when you want the module to create a new Application Gateway for the AKS cluster. It is an object that contains the name of the Application Gateway to be used or created in the Nodepool Resource Group (`name`), the subnet CIDR to be used to create an Application Gateway (`subnet_cidr`), and the ID of the subnet on which to create an Application Gateway (`subnet_id`). If this variable is set, the module will create a new Application Gateway with the provided configuration. + +3. `create_role_assignments_for_application_gateway`: This is a boolean variable that determines whether to create the corresponding role assignments for the application gateway or not. By default, it is set to `true`. Role assignments are necessary for the Application Gateway to function correctly with the AKS cluster. If set to `true`, the module will create the necessary role assignments on the Application Gateway. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md new file mode 100644 index 000000000..9bd796e2d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md @@ -0,0 +1,9 @@ +# Notice on Upgrade to v9.x + +## New default value for variable `agents_pool_max_surge` + +variable `agents_pool_max_surge` now has default value `10%`. This change might cause configuration drift. If you want to keep the old value, please set it explicitly in your configuration. + +## API version for `azapi_update_resource` resource has been upgraded from `Microsoft.ContainerService/managedClusters@2023-01-02-preview` to `Microsoft.ContainerService/managedClusters@2024-02-01`. + +After a test, it won't affect the existing Terraform state and cause configuration drift. The upgrade is caused by the retirement of original API. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md new file mode 100644 index 000000000..e754e5a7f --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md @@ -0,0 +1,490 @@ +# terraform-azurerm-aks + +## Deploys a Kubernetes cluster (AKS) on Azure with monitoring support through Azure Log Analytics + +This Terraform module deploys a Kubernetes cluster on Azure using AKS (Azure Kubernetes Service) and adds support for monitoring with Log Analytics. + +-> **NOTE:** If you have not assigned `client_id` or `client_secret`, A `SystemAssigned` identity will be created. + +-> **NOTE:** If you're using AzureRM `v4`, you can use this module by setting `source` to `Azure/aks/azurerm//v4`. + +## Notice on breaking changes + +Please be aware that major version(e.g., from 6.8.0 to 7.0.0) update contains breaking changes that may impact your infrastructure. It is crucial to review these changes with caution before proceeding with the upgrade. + +In most cases, you will need to adjust your Terraform code to accommodate the changes introduced in the new major version. We strongly recommend reviewing the changelog and migration guide to understand the modifications and ensure a smooth transition. + +To help you in this process, we have provided detailed documentation on the breaking changes, new features, and any deprecated functionalities. Please take the time to read through these resources to avoid any potential issues or disruptions to your infrastructure. + +* [Notice on Upgrade to v10.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov10.0.md) +* [Notice on Upgrade to v9.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov9.0.md) +* [Notice on Upgrade to v8.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov8.0.md) +* [Notice on Upgrade to v7.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov7.0.md) +* [Notice on Upgrade to v6.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov6.0.md) +* [Notice on Upgrade to v5.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov5.0.md) + +Remember, upgrading to a major version with breaking changes should be done carefully and thoroughly tested in your environment. If you have any questions or concerns, please don't hesitate to reach out to our support team for assistance. + +## Usage in Terraform 1.2.0 + +Please view folders in `examples`. + +The module supports some outputs that may be used to configure a kubernetes +provider after deploying an AKS cluster. + +```hcl +provider "kubernetes" { + host = module.aks.host + client_certificate = base64decode(module.aks.client_certificate) + client_key = base64decode(module.aks.client_key) + cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate) +} +``` + +There're some examples in the examples folder. You can execute `terraform apply` command in `examples`'s sub folder to try the module. These examples are tested against every PR with the [E2E Test](#Pre-Commit--Pr-Check--Test). + +## Enable or disable tracing tags + +We're using [BridgeCrew Yor](https://github.com/bridgecrewio/yor) and [yorbox](https://github.com/lonegunmanb/yorbox) to help manage tags consistently across infrastructure as code (IaC) frameworks. In this module you might see tags like: + +```hcl +resource "azurerm_resource_group" "rg" { + location = "eastus" + name = random_pet.name + tags = merge(var.tags, (/**/ (var.tracing_tags_enabled ? { for k, v in /**/ { + avm_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" + avm_git_file = "main.tf" + avm_git_last_modified_at = "2023-05-05 08:57:54" + avm_git_org = "lonegunmanb" + avm_git_repo = "terraform-yor-tag-test-module" + avm_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" + } /**/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /**/)) +} +``` + +To enable tracing tags, set the variable to true: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_enabled = true +} +``` + +The `tracing_tags_enabled` is default to `false`. + +To customize the prefix for your tracing tags, set the `tracing_tags_prefix` variable value in your Terraform configuration: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_prefix = "custom_prefix_" +} +``` + +The actual applied tags would be: + +```text +{ +custom_prefix_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" +custom_prefix_git_file = "main.tf" +custom_prefix_git_last_modified_at = "2023-05-05 08:57:54" +custom_prefix_git_org = "lonegunmanb" +custom_prefix_git_repo = "terraform-yor-tag-test-module" +custom_prefix_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" +} +``` + +## Pre-Commit & Pr-Check & Test + +### Configurations + +- [Configure Terraform for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/terraform-install-configure) + +We assumed that you have setup service principal's credentials in your environment variables like below: + +```shell +export ARM_SUBSCRIPTION_ID="" +export ARM_TENANT_ID="" +export ARM_CLIENT_ID="" +export ARM_CLIENT_SECRET="" +``` + +On Windows Powershell: + +```shell +$env:ARM_SUBSCRIPTION_ID="" +$env:ARM_TENANT_ID="" +$env:ARM_CLIENT_ID="" +$env:ARM_CLIENT_SECRET="" +``` + +We provide a docker image to run the pre-commit checks and tests for you: `mcr.microsoft.com/azterraform:latest` + +To run the pre-commit task, we can run the following command: + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +In pre-commit task, we will: + +1. Run `terraform fmt -recursive` command for your Terraform code. +2. Run `terrafmt fmt -f` command for markdown files and go code files to ensure that the Terraform code embedded in these files are well formatted. +3. Run `go mod tidy` and `go mod vendor` for test folder to ensure that all the dependencies have been synced. +4. Run `gofmt` for all go code files. +5. Run `gofumpt` for all go code files. +6. Run `terraform-docs` on `README.md` file, then run `markdown-table-formatter` to format markdown tables in `README.md`. + +Then we can run the pr-check task to check whether our code meets our pipeline's requirement(We strongly recommend you run the following command before you commit): + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +To run the e2e-test, we can run the following command: + +```text +docker run --rm -v $(pwd):/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: + +```text +docker run --rm -v ${pwd}:/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +To follow [**Ensure AKS uses disk encryption set**](https://docs.bridgecrew.io/docs/ensure-that-aks-uses-disk-encryption-set) policy we've used `azurerm_key_vault` in example codes, and to follow [**Key vault does not allow firewall rules settings**](https://docs.bridgecrew.io/docs/ensure-that-key-vault-allows-firewall-rules-settings) we've limited the ip cidr on it's `network_acls`. By default we'll use the ip returned by `https://api.ipify.org?format=json` api as your public ip, but in case you need to use another cidr, you can set an environment variable like below: + +```text +docker run --rm -v $(pwd):/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: +```text +docker run --rm -v ${pwd}:/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +#### Prerequisites + +- [Docker](https://www.docker.com/community-edition#/download) + +## Authors + +Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d) + +## License + +[MIT](LICENSE) + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Module Spec + +The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!** + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [azapi](#requirement\_azapi) | >=2.0, < 3.0 | +| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#requirement\_null) | >= 3.0 | +| [time](#requirement\_time) | >= 0.5 | +| [tls](#requirement\_tls) | >= 3.1 | + +## Providers + +| Name | Version | +|------|---------| +| [azapi](#provider\_azapi) | >=2.0, < 3.0 | +| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#provider\_null) | >= 3.0 | +| [time](#provider\_time) | >= 0.5 | +| [tls](#provider\_tls) | >= 3.1 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource | +| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource | +| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | +| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | +| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source | +| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no | +| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no | +| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no | +| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no | +| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no | +| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no | +| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no | +| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no | +| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) |
list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
}))
| `[]` | no | +| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) |
list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
| `[]` | no | +| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no | +| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no | +| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no | +| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no | +| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no | +| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no | +| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no | +| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no | +| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no | +| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no | +| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no | +| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no | +| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no | +| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no | +| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no | +| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no | +| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no | +| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no | +| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no | +| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**

By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no | +| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no | +| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. |
object({
id = string
subnet_id = string
})
| `null` | no | +| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no | +| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no | +| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no | +| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. |
object({
sgx_quote_helper_enabled = bool
})
| `null` | no | +| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no | +| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no | +| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no | +| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no | +| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 |
object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
})
|
{
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
}
| no | +| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no | +| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no | +| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no | +| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no | +| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no | +| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no | +| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. |
object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
})
| `null` | no | +| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. |
object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
})
| `null` | no | +| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no | +| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no | +| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no | +| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no | +| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no | +| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no | +| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no | +| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no | +| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no | +| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. |
object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
})
| `null` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no | +| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no | +| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no | +| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no | +| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no | +| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no | +| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes | +| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. |
object({
id = string
})
| `null` | no | +| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. |
object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
})
| `null` | no | +| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no | +| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no | +| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no | +| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no | +| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. |
object({
identity_ids = optional(set(string))
type = string
})
| `null` | no | +| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no | +| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no | +| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no | +| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no | +| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. |
object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
})
| `null` | no | +| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` |
[
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` |
[
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no | +| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` |
[
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no | +| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) |
object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
})
| `null` | no | +| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no | +| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. |
object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
})
| `null` | no | +| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no | +| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no | +| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no | +| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no | +| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no | +| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. |
object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
})
| `null` | no | +| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no | +| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) |
map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
}))
| `{}` | no | +| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no | +| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no | +| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no | +| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no | +| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no | +| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no | +| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no | +| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no | +| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no | +| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no | +| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no | +| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | +| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no | +| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no | +| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no | +| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no | +| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes | +| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no | +| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no | +| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no | +| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no | +| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no | +| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. |
object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
})
| `null` | no | +| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no | +| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no | +| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no | +| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no | +| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no | +| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no | +| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no | +| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no | +| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no | +| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no | +| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) |
object({
dns_zone_ids = list(string)
})
| `null` | no | +| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. |
object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
})
| `null` | no | +| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. | +| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? | +| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. | +| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. | +| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. | +| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. | +| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) | +| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace | +| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. | +| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. | +| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. | +| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). | +| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. | +| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. | +| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. | +| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? | +| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. | +| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? | +| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. | +| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. | +| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. | +| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. | +| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block | +| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. | +| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. | +| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. | +| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. | +| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? | +| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | +| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. | +| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. | + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md new file mode 100644 index 000000000..869fdfe2b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf new file mode 100644 index 000000000..7f368600b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf @@ -0,0 +1,317 @@ +moved { + from = azurerm_kubernetes_cluster_node_pool.node_pool + to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + for_each = local.node_pools_create_before_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + gpu_instance = each.value.gpu_instance + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + application_security_group_ids = each.value.node_network_profile.application_security_group_ids + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + create_before_destroy = true + ignore_changes = [ + name + ] + replace_triggered_by = [ + null_resource.pool_name_keeper[each.key], + ] + + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + for_each = local.node_pools_create_after_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = each.value.name + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "null_resource" "pool_name_keeper" { + for_each = var.node_pools + + triggers = { + pool_name = each.value.name + } + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids) + error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself." + } + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf new file mode 100644 index 000000000..500f27ece --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf @@ -0,0 +1,17 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf new file mode 100644 index 000000000..2b69dfe13 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf @@ -0,0 +1,74 @@ +locals { + # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. + auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete + # automatic upgrades are either: + # - null + # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null + # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null + automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( + (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || + (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) + ) + cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") + # Abstract the decision whether to create an Analytics Workspace or not. + create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null + create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null + default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) + # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 + existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) + existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] + existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) + existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) + # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 + existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) + existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) + existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) + existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) + ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress + # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. + # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled + # is set to `true`. + log_analytics_workspace = var.log_analytics_workspace_enabled ? ( + # The Log Analytics Workspace should be enabled: + var.log_analytics_workspace == null ? { + # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. + # Create an `azurerm_log_analytics_workspace` resource and use that. + id = local.azurerm_log_analytics_workspace_id + name = local.azurerm_log_analytics_workspace_name + location = local.azurerm_log_analytics_workspace_location + resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name + } : { + # `log_analytics_workspace` is supplied. Let's use that. + id = var.log_analytics_workspace.id + name = var.log_analytics_workspace.name + location = var.log_analytics_workspace.location + # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 + resource_group_name = split("/", var.log_analytics_workspace.id)[4] + } + ) : null # Finally, the Log Analytics Workspace should be disabled. + node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } + node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } + private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) + query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) + subnet_ids = [for _, s in local.subnets : s.id] + subnets = merge({ for k, v in merge( + [ + for key, pool in var.node_pools : { + "${key}-vnet-subnet" : pool.vnet_subnet, + "${key}-pod-subnet" : pool.pod_subnet, + } + ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { + "vnet-subnet" : { + id = var.vnet_subnet.id + } + }) + # subnet_ids = for id in local.potential_subnet_ids : id if id != null + use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null + use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null + valid_private_dns_zone_regexs = [ + "private\\.[a-z0-9]+\\.azmk8s\\.io", + "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + ] +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf new file mode 100644 index 000000000..fe51625be --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf @@ -0,0 +1,124 @@ +resource "azurerm_log_analytics_workspace" "main" { + count = local.create_analytics_workspace ? 1 : 0 + + location = var.location + name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") + resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) + allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions + cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced + daily_quota_gb = var.log_analytics_workspace_daily_quota_gb + data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id + immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled + internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled + internet_query_enabled = var.log_analytics_workspace_internet_query_enabled + local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled + reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day + retention_in_days = var.log_retention_in_days + sku = var.log_analytics_workspace_sku + tags = var.tags + + dynamic "identity" { + for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] + + content { + type = identity.value.type + identity_ids = identity.value.identity_ids + } + } + + lifecycle { + precondition { + condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) + error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." + } + } +} + +locals { + azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) + azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) + azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) + azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) +} + +data "azurerm_log_analytics_workspace" "main" { + count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 + + name = var.log_analytics_workspace.name + resource_group_name = local.log_analytics_workspace.resource_group_name +} + +resource "azurerm_log_analytics_solution" "main" { + count = local.create_analytics_solution ? 1 : 0 + + location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) + resource_group_name = local.log_analytics_workspace.resource_group_name + solution_name = "ContainerInsights" + workspace_name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + tags = var.tags + + plan { + product = "OMSGallery/ContainerInsights" + publisher = "Microsoft" + } +} + +locals { + dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) +} + +resource "azurerm_monitor_data_collection_rule" "dcr" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + location = local.dcr_location + name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" + resource_group_name = var.resource_group_name + description = "DCR for Azure Monitor Container Insights" + tags = var.tags + + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = var.monitor_data_collection_rule_extensions_streams + } + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = ["Microsoft-Syslog"] + } + destinations { + log_analytics { + name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + } + } + data_sources { + extension { + extension_name = "ContainerInsights" + name = "ContainerInsightsExtension" + streams = var.monitor_data_collection_rule_extensions_streams + extension_json = jsonencode({ + "dataCollectionSettings" : { + interval = var.data_collection_settings.data_collection_interval + namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection + namespaces = var.data_collection_settings.namespaces_for_data_collection + enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled + } + }) + } + syslog { + facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities + log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels + name = "sysLogsDataSource" + streams = ["Microsoft-Syslog"] + } + } +} + +resource "azurerm_monitor_data_collection_rule_association" "dcra" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + target_resource_id = azurerm_kubernetes_cluster.main.id + data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id + description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." + name = "ContainerInsightsExtension" +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf new file mode 100644 index 000000000..0a8dc8e59 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf @@ -0,0 +1,741 @@ +moved { + from = module.ssh-key.tls_private_key.ssh + to = tls_private_key.ssh[0] +} + +resource "tls_private_key" "ssh" { + count = var.admin_username == null ? 0 : 1 + + algorithm = "RSA" + rsa_bits = 2048 +} + +resource "azurerm_kubernetes_cluster" "main" { + location = var.location + name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}" + resource_group_name = var.resource_group_name + azure_policy_enabled = var.azure_policy_enabled + cost_analysis_enabled = var.cost_analysis_enabled + disk_encryption_set_id = var.disk_encryption_set_id + dns_prefix = var.prefix + dns_prefix_private_cluster = var.dns_prefix_private_cluster + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + kubernetes_version = var.kubernetes_version + local_account_disabled = var.local_account_disabled + node_resource_group = var.node_resource_group + oidc_issuer_enabled = var.oidc_issuer_enabled + open_service_mesh_enabled = var.open_service_mesh_enabled + private_cluster_enabled = var.private_cluster_enabled + private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled + private_dns_zone_id = var.private_dns_zone_id + role_based_access_control_enabled = var.role_based_access_control_enabled + run_command_enabled = var.run_command_enabled + sku_tier = var.sku_tier + support_plan = var.support_plan + tags = var.tags + workload_identity_enabled = var.workload_identity_enabled + + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = var.node_network_profile == null ? [] : [var.node_network_profile] + + content { + application_security_group_ids = node_network_profile.value.application_security_group_ids + node_public_ip_tags = node_network_profile.value.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] + + content { + subnet_name = var.aci_connector_linux_subnet_name + } + } + dynamic "api_server_access_profile" { + for_each = var.api_server_authorized_ip_ranges != null ? [ + "api_server_access_profile" + ] : [] + + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : [] + + content { + balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups + empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max + expander = var.auto_scaler_profile_expander + max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec + max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time + max_unready_nodes = var.auto_scaler_profile_max_unready_nodes + max_unready_percentage = var.auto_scaler_profile_max_unready_percentage + new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay + scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add + scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete + scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure + scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded + scale_down_unready = var.auto_scaler_profile_scale_down_unready + scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold + scan_interval = var.auto_scaler_profile_scan_interval + skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage + skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : [] + + content { + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled + managed = true + tenant_id = var.rbac_aad_tenant_id + } + } + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + dynamic "http_proxy_config" { + for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"] + + content { + http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy) + https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy) + no_proxy = var.http_proxy_config.no_proxy + trusted_ca = var.http_proxy_config.trusted_ca + } + } + dynamic "identity" { + for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : [] + + content { + type = var.identity_type + identity_ids = var.identity_ids + } + } + dynamic "ingress_application_gateway" { + for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : [] + + content { + gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null) + gateway_name = try(var.green_field_application_gateway_for_ingress.name, null) + subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null) + subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null) + } + } + dynamic "key_management_service" { + for_each = var.kms_enabled ? ["key_management_service"] : [] + + content { + key_vault_key_id = var.kms_key_vault_key_id + key_vault_network_access = var.kms_key_vault_network_access + } + } + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] + + content { + secret_rotation_enabled = var.secret_rotation_enabled + secret_rotation_interval = var.secret_rotation_interval + } + } + dynamic "kubelet_identity" { + for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] + + content { + client_id = kubelet_identity.value.client_id + object_id = kubelet_identity.value.object_id + user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id + } + } + dynamic "linux_profile" { + for_each = var.admin_username == null ? [] : ["linux_profile"] + + content { + admin_username = var.admin_username + + ssh_key { + key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "") + } + } + } + dynamic "maintenance_window" { + for_each = var.maintenance_window != null ? ["maintenance_window"] : [] + + content { + dynamic "allowed" { + for_each = var.maintenance_window.allowed + + content { + day = allowed.value.day + hours = allowed.value.hours + } + } + dynamic "not_allowed" { + for_each = var.maintenance_window.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + + content { + duration = maintenance_window_auto_upgrade.value.duration + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + start_date = maintenance_window_auto_upgrade.value.start_date + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + week_index = maintenance_window_auto_upgrade.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "microsoft_defender" { + for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + } + } + dynamic "monitor_metrics" { + for_each = var.monitor_metrics != null ? ["monitor_metrics"] : [] + + content { + annotations_allowed = var.monitor_metrics.annotations_allowed + labels_allowed = var.monitor_metrics.labels_allowed + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + ebpf_data_plane = var.ebpf_data_plane + ip_versions = var.network_ip_versions + load_balancer_sku = var.load_balancer_sku + network_data_plane = var.network_data_plane + network_mode = var.network_mode + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + pod_cidrs = var.net_profile_pod_cidrs + service_cidr = var.net_profile_service_cidr + service_cidrs = var.net_profile_service_cidrs + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + dynamic "nat_gateway_profile" { + for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile] + + content { + idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes + managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count + } + } + } + dynamic "oms_agent" { + for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled + } + } + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + + content { + mode = var.service_mesh_profile.mode + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + } + } + dynamic "service_principal" { + for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] + + content { + client_id = var.client_id + client_secret = var.client_secret + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile_blob_driver_enabled + disk_driver_enabled = var.storage_profile_disk_driver_enabled + disk_driver_version = var.storage_profile_disk_driver_version + file_driver_enabled = var.storage_profile_file_driver_enabled + snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled + } + } + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_ids + } + } + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] + + content { + keda_enabled = workload_autoscaler_profile.value.keda_enabled + vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled + } + } + + depends_on = [ + null_resource.pool_name_keeper, + ] + + lifecycle { + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + ] + replace_triggered_by = [ + null_resource.kubernetes_cluster_name_keeper.id + ] + + precondition { + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "") + error_message = "Either `client_id` and `client_secret` or `identity_type` must be set." + } + precondition { + # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128 + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0) + error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well." + } + precondition { + condition = var.identity_ids == null || var.client_id == "" + error_message = "Cannot set both `client_id` and `identity_ids`." + } + precondition { + condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium") + error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled." + } + precondition { + condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled) + error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true." + } + precondition { + condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard") + error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`" + } + precondition { + condition = local.automatic_channel_upgrade_check + error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`." + } + precondition { + condition = !(var.kms_enabled && var.identity_type != "UserAssigned") + error_message = "KMS etcd encryption doesn't work with system-assigned managed identity." + } + precondition { + condition = !var.workload_identity_enabled || var.oidc_issuer_enabled + error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity" + } + precondition { + condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure" + error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure." + } + precondition { + condition = var.network_policy != "azure" || var.network_plugin == "azure" + error_message = "network_policy must be `azure` when network_plugin is `azure`" + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure" + error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure." + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null + error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified." + } + precondition { + condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster)) + error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`." + } + precondition { + condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage" + error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`." + } + precondition { + condition = (var.kubelet_identity == null) || ( + (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0) + error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set." + } + precondition { + condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets" + error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes." + } + precondition { + condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null + error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`." + } + precondition { + condition = var.prefix == null || var.dns_prefix_private_cluster == null + error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled + error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != "" + error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone" + } + precondition { + condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)])) + error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`" + } + } +} + +resource "null_resource" "kubernetes_cluster_name_keeper" { + triggers = { + name = local.cluster_name + } +} + +resource "null_resource" "kubernetes_version_keeper" { + triggers = { + version = var.kubernetes_version + } +} + +resource "time_sleep" "interval_before_cluster_update" { + count = var.interval_before_cluster_update == null ? 0 : 1 + + create_duration = var.interval_before_cluster_update + + depends_on = [ + azurerm_kubernetes_cluster.main, + ] + + lifecycle { + replace_triggered_by = [ + null_resource.kubernetes_version_keeper.id, + ] + } +} + +resource "azapi_update_resource" "aks_cluster_post_create" { + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + kubernetesVersion = var.kubernetes_version + } + } + + depends_on = [ + time_sleep.interval_before_cluster_update, + ] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.kubernetes_version_keeper.id] + } +} + +resource "null_resource" "http_proxy_config_no_proxy_keeper" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + triggers = { + http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "") + } +} + +resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + httpProxyConfig = { + noProxy = var.http_proxy_config.no_proxy + } + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id] + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf new file mode 100644 index 000000000..a1f537658 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf @@ -0,0 +1,6 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster" "main" { + automatic_channel_upgrade = var.automatic_channel_upgrade + node_os_channel_upgrade = var.node_os_channel_upgrade +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf new file mode 100644 index 000000000..e3d37ce76 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf @@ -0,0 +1,231 @@ +output "aci_connector_linux" { + description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource." + value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null) +} + +output "aci_connector_linux_enabled" { + description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?" + value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0]) +} + +output "admin_client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "") +} + +output "admin_client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "") +} + +output "admin_cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "") +} + +output "admin_host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "") +} + +output "admin_password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "") +} + +output "admin_username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "") +} + +output "aks_id" { + description = "The `azurerm_kubernetes_cluster`'s id." + value = azurerm_kubernetes_cluster.main.id +} + +output "aks_name" { + description = "The `azurerm_kubernetes_cluster`'s name." + value = azurerm_kubernetes_cluster.main.name +} + +output "azure_policy_enabled" { + description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)" + value = azurerm_kubernetes_cluster.main.azure_policy_enabled +} + +output "azurerm_log_analytics_workspace_id" { + description = "The id of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].id, null) +} + +output "azurerm_log_analytics_workspace_name" { + description = "The name of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].name, null) +} + +output "azurerm_log_analytics_workspace_primary_shared_key" { + description = "Specifies the workspace key of the log analytics workspace" + sensitive = true + value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null) +} + +output "client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate +} + +output "client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_key +} + +output "cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate +} + +output "cluster_fqdn" { + description = "The FQDN of the Azure Kubernetes Managed Cluster." + value = azurerm_kubernetes_cluster.main.fqdn +} + +output "cluster_identity" { + description = "The `azurerm_kubernetes_cluster`'s `identity` block." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.identity[0], null) +} + +output "cluster_portal_fqdn" { + description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.portal_fqdn +} + +output "cluster_private_fqdn" { + description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.private_fqdn +} + +output "generated_cluster_private_ssh_key" { + description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null +} + +output "generated_cluster_public_ssh_key" { + description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)." + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null +} + +output "host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].host +} + +output "http_application_routing_zone_name" { + description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing." + value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : "" +} + +output "ingress_application_gateway" { + description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block." + value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null) +} + +output "ingress_application_gateway_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?" + value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0]) +} + +output "key_vault_secrets_provider" { + description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block." + value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null) +} + +output "key_vault_secrets_provider_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?" + value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0]) +} + +output "kube_admin_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_admin_config_raw +} + +output "kube_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config_raw +} + +output "kubelet_identity" { + description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block." + value = azurerm_kubernetes_cluster.main.kubelet_identity +} + +output "location" { + description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created." + value = azurerm_kubernetes_cluster.main.location +} + +output "network_profile" { + description = "The `azurerm_kubernetes_cluster`'s `network_profile` block" + value = azurerm_kubernetes_cluster.main.network_profile +} + +output "node_resource_group" { + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group +} + +output "node_resource_group_id" { + description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group_id +} + +output "oidc_issuer_url" { + description = "The OIDC issuer URL that is associated with the cluster." + value = azurerm_kubernetes_cluster.main.oidc_issuer_url +} + +output "oms_agent" { + description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument." + value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null) +} + +output "oms_agent_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?" + value = can(azurerm_kubernetes_cluster.main.oms_agent[0]) +} + +output "open_service_mesh_enabled" { + description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." + value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled +} + +output "password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].password +} + +output "username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].username +} + +output "web_app_routing_identity" { + description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object." + value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, []) +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf new file mode 100644 index 000000000..e9601eaf0 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf @@ -0,0 +1,126 @@ +resource "azurerm_role_assignment" "acr" { + for_each = var.attached_acr_id_map + + principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id + scope = each.value + role_definition_name = "AcrPull" + skip_service_principal_aad_check = true +} + +# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity +data "azurerm_user_assigned_identity" "cluster_identity" { + count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 + + name = split("/", var.identity_ids[0])[8] + resource_group_name = split("/", var.identity_ids[0])[4] +} + +# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) +# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets +# used by the system node pool and by any additional node pools. +# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites +# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites +# https://github.com/Azure/terraform-azurerm-aks/issues/178 +resource "azurerm_role_assignment" "network_contributor" { + for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value.id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +resource "azurerm_role_assignment" "network_contributor_on_subnet" { + for_each = var.network_contributor_role_assigned_subnet_ids + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +data "azurerm_client_config" "this" {} + +data "azurerm_virtual_network" "application_gateway_vnet" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_subnet_vnet_name + resource_group_name = local.existing_application_gateway_subnet_resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = data.azurerm_virtual_network.application_gateway_vnet[0].id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress + error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." + } + } +} + +resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) + error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." + } + } +} + +resource "azurerm_role_assignment" "existing_application_gateway_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = var.brown_field_application_gateway_for_ingress.id + role_definition_name = "Contributor" + + lifecycle { + precondition { + condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress + error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." + } + } +} + +data "azurerm_resource_group" "ingress_gw" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_resource_group_for_ingress +} + +data "azurerm_resource_group" "aks_rg" { + count = var.create_role_assignments_for_application_gateway ? 1 : 0 + + name = var.resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { + count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id + role_definition_name = "Reader" +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile new file mode 100644 index 000000000..7f28c53a5 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile @@ -0,0 +1,85 @@ +REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" + +fmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash + +fumpt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash + +gosec: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash + +tffmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash + +tffmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash + +tfvalidatecheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash + +terrafmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash + +gofmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash + +golint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash + +tflint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash + +lint: golint tflint gosec + +checkovcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash + +checkovplancheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash + +fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck + +pr-check: depscheck fmtcheck lint unit-test checkovcheck + +unit-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash + +e2e-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash + +version-upgrade-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash + +terrafmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash + +pre-commit: tffmt terrafmt depsensure fmt fumpt generate + +depsensure: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash + +depscheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash + +generate: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash + +gencheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash + +yor-tag: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash + +autofix: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash + +test: fmtcheck + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash + +build-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash + +.PHONY: fmt fmtcheck pr-check \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf new file mode 100644 index 000000000..c819f9b89 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf @@ -0,0 +1,1601 @@ +variable "location" { + type = string + description = "Location of cluster, if not defined it will be read from the resource-group" +} + +variable "resource_group_name" { + type = string + description = "The existing resource group name to use" +} + +variable "aci_connector_linux_enabled" { + type = bool + default = false + description = "Enable Virtual Node pool" +} + +variable "aci_connector_linux_subnet_name" { + type = string + default = null + description = "(Optional) aci_connector_linux subnet name" +} + +variable "admin_username" { + type = string + default = null + description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created." +} + +variable "agents_availability_zones" { + type = list(string) + default = null + description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." +} + +variable "agents_count" { + type = number + default = 2 + description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes." +} + +variable "agents_labels" { + type = map(string) + default = {} + description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." +} + +variable "agents_max_count" { + type = number + default = null + description = "Maximum number of nodes in a pool" +} + +variable "agents_max_pods" { + type = number + default = null + description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." +} + +variable "agents_min_count" { + type = number + default = null + description = "Minimum number of nodes in a pool" +} + +variable "agents_pool_drain_timeout_in_minutes" { + type = number + default = null + description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." +} + +variable "agents_pool_kubelet_configs" { + type = list(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool, true) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_line = optional(number) + pod_max_pid = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_linux_os_configs" { + type = list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })), []) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })), []) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_max_surge" { + type = string + default = "10%" + description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." +} + +variable "agents_pool_name" { + type = string + default = "nodepool" + description = "The default Azure AKS agentpool (nodepool) name." + nullable = false +} + +variable "agents_pool_node_soak_duration_in_minutes" { + type = number + default = 0 + description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." +} + +variable "agents_proximity_placement_group_id" { + type = string + default = null + description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." +} + +variable "agents_size" { + type = string + default = "Standard_D2s_v3" + description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." +} + +variable "agents_tags" { + type = map(string) + default = {} + description = "(Optional) A mapping of tags to assign to the Node Pool." +} + +variable "agents_type" { + type = string + default = "VirtualMachineScaleSets" + description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." +} + +variable "api_server_authorized_ip_ranges" { + type = set(string) + default = null + description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes." +} + +variable "attached_acr_id_map" { + type = map(string) + default = {} + description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created." + nullable = false +} + +variable "auto_scaler_profile_balance_similar_node_groups" { + type = bool + default = false + description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`." +} + +variable "auto_scaler_profile_empty_bulk_delete_max" { + type = number + default = 10 + description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`." +} + +variable "auto_scaler_profile_enabled" { + type = bool + default = false + description = "Enable configuring the auto scaler profile" + nullable = false +} + +variable "auto_scaler_profile_expander" { + type = string + default = "random" + description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`." + + validation { + condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander) + error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`." + } +} + +variable "auto_scaler_profile_max_graceful_termination_sec" { + type = string + default = "600" + description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`." +} + +variable "auto_scaler_profile_max_node_provisioning_time" { + type = string + default = "15m" + description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`." +} + +variable "auto_scaler_profile_max_unready_nodes" { + type = number + default = 3 + description = "Maximum Number of allowed unready nodes. Defaults to `3`." +} + +variable "auto_scaler_profile_max_unready_percentage" { + type = number + default = 45 + description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`." +} + +variable "auto_scaler_profile_new_pod_scale_up_delay" { + type = string + default = "10s" + description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`." +} + +variable "auto_scaler_profile_scale_down_delay_after_add" { + type = string + default = "10m" + description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_delay_after_delete" { + type = string + default = null + description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`." +} + +variable "auto_scaler_profile_scale_down_delay_after_failure" { + type = string + default = "3m" + description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`." +} + +variable "auto_scaler_profile_scale_down_unneeded" { + type = string + default = "10m" + description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_unready" { + type = string + default = "20m" + description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`." +} + +variable "auto_scaler_profile_scale_down_utilization_threshold" { + type = string + default = "0.5" + description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`." +} + +variable "auto_scaler_profile_scan_interval" { + type = string + default = "10s" + description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`." +} + +variable "auto_scaler_profile_skip_nodes_with_local_storage" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`." +} + +variable "auto_scaler_profile_skip_nodes_with_system_pods" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`." +} + +variable "automatic_channel_upgrade" { + type = string + default = null + description = <<-EOT + (Optional) Defines the automatic upgrade channel for the AKS cluster. + Possible values: + * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").** + * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.** + + By default, automatic upgrades are disabled. + More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster + EOT + + validation { + condition = var.automatic_channel_upgrade == null ? true : contains([ + "patch", "stable", "rapid", "node-image" + ], var.automatic_channel_upgrade) + error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`." + } +} + +variable "azure_policy_enabled" { + type = bool + default = false + description = "Enable Azure Policy Addon." +} + +variable "brown_field_application_gateway_for_ingress" { + type = object({ + id = string + subnet_id = string + }) + default = null + description = <<-EOT + [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing) + * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress. + * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. + EOT +} + +variable "client_id" { + type = string + default = "" + description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment" + nullable = false +} + +variable "client_secret" { + type = string + default = "" + description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment" + nullable = false + sensitive = true +} + +variable "cluster_log_analytics_workspace_name" { + type = string + default = null + description = "(Optional) The name of the Analytics workspace" +} + +variable "cluster_name" { + type = string + default = null + description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)" +} + +variable "cluster_name_random_suffix" { + type = bool + default = false + description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict." + nullable = false +} + +variable "confidential_computing" { + type = object({ + sgx_quote_helper_enabled = bool + }) + default = null + description = "(Optional) Enable Confidential Computing." +} + +variable "cost_analysis_enabled" { + type = bool + default = false + description = "(Optional) Enable Cost Analysis." +} + +variable "create_monitor_data_collection_rule" { + type = bool + default = true + description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`." + nullable = false +} + +variable "create_role_assignment_network_contributor" { + type = bool + default = false + description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster" + nullable = false +} + +variable "create_role_assignments_for_application_gateway" { + type = bool + default = true + description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`." + nullable = false +} + +variable "data_collection_settings" { + type = object({ + data_collection_interval = string + namespace_filtering_mode_for_data_collection = string + namespaces_for_data_collection = list(string) + container_log_v2_enabled = bool + }) + default = { + data_collection_interval = "1m" + namespace_filtering_mode_for_data_collection = "Off" + namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"] + container_log_v2_enabled = true + } + description = <<-EOT + `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m. + `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection. + `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode. + `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs. + See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 + EOT +} + +variable "default_node_pool_fips_enabled" { + type = bool + default = null + description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." +} + +variable "disk_encryption_set_id" { + type = string + default = null + description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created." +} + +variable "dns_prefix_private_cluster" { + type = string + default = null + description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created." +} + +variable "ebpf_data_plane" { + type = string + default = null + description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created." +} + +variable "enable_auto_scaling" { + type = bool + default = false + description = "Enable node pool autoscaling" +} + +variable "enable_host_encryption" { + type = bool + default = false + description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" +} + +variable "enable_node_public_ip" { + type = bool + default = false + description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." +} + +variable "green_field_application_gateway_for_ingress" { + type = object({ + name = optional(string) + subnet_cidr = optional(string) + subnet_id = optional(string) + }) + default = null + description = <<-EOT + [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new) + * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. +EOT + + validation { + condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr))) + error_message = "One of `subnet_cidr` and `subnet_id` must be specified." + } +} + +variable "http_proxy_config" { + type = object({ + http_proxy = optional(string) + https_proxy = optional(string) + no_proxy = optional(list(string)) + trusted_ca = optional(string) + }) + default = null + description = <<-EOT + optional(object({ + http_proxy = (Optional) The proxy address to be used when communicating over HTTP. + https_proxy = (Optional) The proxy address to be used when communicating over HTTPS. + no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field. + trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format. + })) + Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. +EOT + + validation { + condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)) + error_message = "`http_proxy` and `https_proxy` cannot be both empty." + } +} + +variable "identity_ids" { + type = list(string) + default = null + description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster." +} + +variable "identity_type" { + type = string + default = "SystemAssigned" + description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well." + + validation { + condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned" + error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`" + } +} + +variable "image_cleaner_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether Image Cleaner is enabled." +} + +variable "image_cleaner_interval_hours" { + type = number + default = 48 + description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`." +} + +variable "interval_before_cluster_update" { + type = string + default = "30s" + description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update." +} + +variable "key_vault_secrets_provider_enabled" { + type = bool + default = false + description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver" + nullable = false +} + +variable "kms_enabled" { + type = bool + default = false + description = "(Optional) Enable Azure KeyVault Key Management Service." + nullable = false +} + +variable "kms_key_vault_key_id" { + type = string + default = null + description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier." +} + +variable "kms_key_vault_network_access" { + type = string + default = "Public" + description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`." + + validation { + condition = contains(["Private", "Public"], var.kms_key_vault_network_access) + error_message = "Possible values are `Private` and `Public`" + } +} + +variable "kubelet_identity" { + type = object({ + client_id = optional(string) + object_id = optional(string) + user_assigned_identity_id = optional(string) + }) + default = null + description = <<-EOT + - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. +EOT +} + +variable "kubernetes_version" { + type = string + default = null + description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region" +} + +variable "load_balancer_profile_enabled" { + type = bool + default = false + description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." + nullable = false +} + +variable "load_balancer_profile_idle_timeout_in_minutes" { + type = number + default = 30 + description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." +} + +variable "load_balancer_profile_managed_outbound_ip_count" { + type = number + default = null + description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" +} + +variable "load_balancer_profile_managed_outbound_ipv6_count" { + type = number + default = null + description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" +} + +variable "load_balancer_profile_outbound_ip_address_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ip_prefix_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ports_allocated" { + type = number + default = 0 + description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" +} + +variable "load_balancer_sku" { + type = string + default = "standard" + description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." + + validation { + condition = contains(["basic", "standard"], var.load_balancer_sku) + error_message = "Possible values are `basic` and `standard`" + } +} + +variable "local_account_disabled" { + type = bool + default = null + description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information." +} + +variable "log_analytics_solution" { + type = object({ + id = string + }) + default = null + description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution." + + validation { + condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != "" + error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`." + } +} + +variable "log_analytics_workspace" { + type = object({ + id = string + name = string + location = optional(string) + resource_group_name = optional(string) + }) + default = null + description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace." +} + +variable "log_analytics_workspace_allow_resource_only_permissions" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`." +} + +variable "log_analytics_workspace_cmk_for_query_forced" { + type = bool + default = null + description = "(Optional) Is Customer Managed Storage mandatory for query management?" +} + +variable "log_analytics_workspace_daily_quota_gb" { + type = number + default = null + description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted." +} + +variable "log_analytics_workspace_data_collection_rule_id" { + type = string + default = null + description = "(Optional) The ID of the Data Collection Rule to use for this workspace." +} + +variable "log_analytics_workspace_enabled" { + type = bool + default = true + description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard" + nullable = false +} + +variable "log_analytics_workspace_identity" { + type = object({ + identity_ids = optional(set(string)) + type = string + }) + default = null + description = <<-EOT + - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`. + - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. +EOT +} + +variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" { + type = bool + default = null + description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days." +} + +variable "log_analytics_workspace_internet_ingestion_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_internet_query_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_local_authentication_disabled" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`." +} + +variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" { + type = number + default = null + description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`." +} + +variable "log_analytics_workspace_resource_group_name" { + type = string + default = null + description = "(Optional) Resource group name to create azurerm_log_analytics_solution." +} + +variable "log_analytics_workspace_sku" { + type = string + default = "PerGB2018" + description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018" +} + +variable "log_retention_in_days" { + type = number + default = 30 + description = "The retention period for the logs in days" +} + +variable "maintenance_window" { + type = object({ + allowed = optional(list(object({ + day = string + hours = set(number) + })), [ + ]), + not_allowed = optional(list(object({ + end = string + start = string + })), []), + }) + default = null + description = "(Optional) Maintenance configuration of the managed cluster." +} + +variable "maintenance_window_auto_upgrade" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "maintenance_window_node_os" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "microsoft_defender_enabled" { + type = bool + default = false + description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`." + nullable = false +} + +variable "monitor_data_collection_rule_data_sources_syslog_facilities" { + type = list(string) + default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"] + description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog" +} + +variable "monitor_data_collection_rule_data_sources_syslog_levels" { + type = list(string) + default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] + description = "List of syslog levels" +} + +variable "monitor_data_collection_rule_extensions_streams" { + type = list(any) + default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"] + description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr" +} + +variable "monitor_metrics" { + type = object({ + annotations_allowed = optional(string) + labels_allowed = optional(string) + }) + default = null + description = <<-EOT + (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster + object({ + annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric." + labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric." + }) +EOT +} + +variable "msi_auth_for_monitoring_enabled" { + type = bool + default = null + description = "(Optional) Is managed identity authentication for monitoring enabled?" +} + +variable "nat_gateway_profile" { + type = object({ + idle_timeout_in_minutes = optional(number) + managed_outbound_ip_count = optional(number) + }) + default = null + description = <<-EOT + `nat_gateway_profile` block supports the following: + - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`. + - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. +EOT +} + +variable "net_profile_dns_service_ip" { + type = string + default = null + description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created." +} + +variable "net_profile_outbound_type" { + type = string + default = "loadBalancer" + description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer." +} + +variable "net_profile_pod_cidr" { + type = string + default = null + description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created." +} + +variable "net_profile_pod_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidr" { + type = string + default = null + description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "network_contributor_role_assigned_subnet_ids" { + type = map(string) + default = {} + description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id" + nullable = false +} + +variable "network_data_plane" { + type = string + default = null + description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created." +} + +variable "network_ip_versions" { + type = list(string) + default = null + description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created." +} + +variable "network_mode" { + type = string + default = null + description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created." +} + +variable "network_plugin" { + type = string + default = "kubenet" + description = "Network plugin to use for networking." + nullable = false +} + +variable "network_plugin_mode" { + type = string + default = null + description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created." +} + +variable "network_policy" { + type = string + default = null + description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created." +} + +variable "node_network_profile" { + type = object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + }) + default = null + description = <<-EOT + - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. +--- + An `allowed_host_ports` block supports the following: + - `port_start`: (Optional) Specifies the start of the port range. + - `port_end`: (Optional) Specifies the end of the port range. + - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. +EOT +} + +variable "node_os_channel_upgrade" { + type = string + default = null + description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." +} + +variable "node_pools" { + type = map(object({ + name = string + node_count = optional(number) + tags = optional(map(string)) + vm_size = string + host_group_id = optional(string) + capacity_reservation_group_id = optional(string) + custom_ca_trust_enabled = optional(bool) + enable_auto_scaling = optional(bool) + enable_host_encryption = optional(bool) + enable_node_public_ip = optional(bool) + eviction_policy = optional(string) + gpu_instance = optional(string) + kubelet_config = optional(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_files = optional(number) + pod_max_pid = optional(number) + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + fips_enabled = optional(bool) + kubelet_disk_type = optional(string) + max_count = optional(number) + max_pods = optional(number) + message_of_the_day = optional(string) + mode = optional(string, "User") + min_count = optional(number) + node_network_profile = optional(object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + })) + node_labels = optional(map(string)) + node_public_ip_prefix_id = optional(string) + node_taints = optional(list(string)) + orchestrator_version = optional(string) + os_disk_size_gb = optional(number) + os_disk_type = optional(string, "Managed") + os_sku = optional(string) + os_type = optional(string, "Linux") + pod_subnet = optional(object({ + id = string + }), null) + priority = optional(string, "Regular") + proximity_placement_group_id = optional(string) + spot_max_price = optional(number) + scale_down_mode = optional(string, "Delete") + snapshot_id = optional(string) + ultra_ssd_enabled = optional(bool) + vnet_subnet = optional(object({ + id = string + }), null) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = optional(string) + zones = optional(set(string)) + create_before_destroy = optional(bool, true) + })) + default = {} + description = <<-EOT + A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: + map(object({ + name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. + node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. + tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. + vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. + enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). + enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. + eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. + gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + kubelet_config = optional(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + })) + fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). + kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. + max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. + max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. + min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + node_network_profile = optional(object({ + node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. + allowed_host_ports = optional(object({ + port_start = (Optional) Specifies the start of the port range. + port_end = (Optional) Specifies the end of the port range. + protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. + })) + })) + node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. + node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. + node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. + orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. + os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. + os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + pod_subnet = optional(object({ + id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + })) + priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. + proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). + spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. + scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. + snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. + vnet_subnet = optional(object({ + id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. + })) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) + zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. + })) + EOT + nullable = false +} + +variable "node_resource_group" { + type = string + default = null + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created." +} + +variable "oidc_issuer_enabled" { + type = bool + default = false + description = "Enable or Disable the OIDC issuer URL. Defaults to false." +} + +variable "oms_agent_enabled" { + type = bool + default = true + description = "Enable OMS Agent Addon." + nullable = false +} + +variable "only_critical_addons_enabled" { + type = bool + default = null + description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." +} + +variable "open_service_mesh_enabled" { + type = bool + default = null + description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." +} + +variable "orchestrator_version" { + type = string + default = null + description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" +} + +variable "os_disk_size_gb" { + type = number + default = 50 + description = "Disk size of nodes in GBs." +} + +variable "os_disk_type" { + type = string + default = "Managed" + description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." + nullable = false +} + +variable "os_sku" { + type = string + default = null + description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." +} + +variable "pod_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "prefix" { + type = string + default = "" + description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." +} + +variable "private_cluster_enabled" { + type = bool + default = false + description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet." +} + +variable "private_cluster_public_fqdn_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`." +} + +variable "private_dns_zone_id" { + type = string + default = null + description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created." +} + +variable "public_ssh_key" { + type = string + default = "" + description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created." +} + +variable "rbac_aad" { + type = bool + default = true + description = "(Optional) Is Azure Active Directory integration enabled?" + nullable = false +} + +variable "rbac_aad_admin_group_object_ids" { + type = list(string) + default = null + description = "Object ID of groups with admin access." +} + +variable "rbac_aad_azure_rbac_enabled" { + type = bool + default = null + description = "(Optional) Is Role Based Access Control based on Azure AD enabled?" +} + +variable "rbac_aad_tenant_id" { + type = string + default = null + description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used." +} + +variable "role_based_access_control_enabled" { + type = bool + default = false + description = "Enable Role Based Access Control." + nullable = false +} + +variable "run_command_enabled" { + type = bool + default = true + description = "(Optional) Whether to enable run command for the cluster or not." +} + +variable "scale_down_mode" { + type = string + default = "Delete" + description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." +} + +variable "secret_rotation_enabled" { + type = bool + default = false + description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`" + nullable = false +} + +variable "secret_rotation_interval" { + type = string + default = "2m" + description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`" + nullable = false +} + +variable "service_mesh_profile" { + type = object({ + mode = string + internal_ingress_gateway_enabled = optional(bool, true) + external_ingress_gateway_enabled = optional(bool, true) + }) + default = null + description = <<-EOT + `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. + `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. + `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. + EOT +} + +variable "sku_tier" { + type = string + default = "Free" + description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`" + + validation { + condition = contains(["Free", "Standard", "Premium"], var.sku_tier) + error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0." + } +} + +variable "snapshot_id" { + type = string + default = null + description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." +} + +variable "storage_profile_blob_driver_enabled" { + type = bool + default = false + description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`" +} + +variable "storage_profile_disk_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_disk_driver_version" { + type = string + default = "v1" + description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`." +} + +variable "storage_profile_enabled" { + type = bool + default = false + description = "Enable storage profile" + nullable = false +} + +variable "storage_profile_file_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the File CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_snapshot_controller_enabled" { + type = bool + default = true + description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`" +} + +variable "support_plan" { + type = string + default = "KubernetesOfficial" + description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." + + validation { + condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan) + error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`." + } +} + +variable "tags" { + type = map(string) + default = {} + description = "Any tags that should be present on the AKS cluster resources" +} + +variable "temporary_name_for_rotation" { + type = string + default = null + description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" +} + +variable "ultra_ssd_enabled" { + type = bool + default = false + description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." +} + +variable "vnet_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "web_app_routing" { + type = object({ + dns_zone_ids = list(string) + }) + default = null + description = <<-EOT + object({ + dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list." + }) +EOT +} + +variable "workload_autoscaler_profile" { + type = object({ + keda_enabled = optional(bool, false) + vertical_pod_autoscaler_enabled = optional(bool, false) + }) + default = null + description = <<-EOT + `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads. + `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. +EOT +} + +variable "workload_identity_enabled" { + type = bool + default = false + description = "Enable or Disable Workload Identity. Defaults to false." +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf new file mode 100644 index 000000000..c9d2fe8f1 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + azapi = { + source = "Azure/azapi" + version = ">=2.0, < 3.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = ">= 3.107.0, < 4.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.5" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.1" + } + } +} From a899d793a112b6345a19f9fda17c2d96cc639e27 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:54:45 +0530 Subject: [PATCH 13/36] updated module source --- modules/kubernetes_cluster/azure_aks/0.2/main.tf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index c02f6e30f..e6c76806c 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -10,8 +10,7 @@ module "name" { # Create the AKS cluster using the official Azure module module "k8scluster" { - source = "Azure/aks/azurerm" - version = "10.2.0" + source = "./k8scluster" # Required variables resource_group_name = var.inputs.network_details.attributes.resource_group_name From 1b9e42c0498517aebc9489e34768fa9bb4eb2514 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 16:55:04 +0530 Subject: [PATCH 14/36] updated output type --- modules/kubernetes_cluster/azure_aks/0.2/facets.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 6237b7654..114c4cc55 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -308,7 +308,7 @@ inputs: - azurerm outputs: default: - type: '@outputs/azure_aks' + type: '@facets/azure_aks' title: Kubernetes Cluster Output description: The output for the Kubernetes cluster providers: From 3080b84997011193eea7a29b0dcbe53195685656 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 17:39:39 +0530 Subject: [PATCH 15/36] updates --- modules/kubernetes_cluster/azure_aks/0.2/main.tf | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index e6c76806c..a086129d8 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -8,7 +8,7 @@ module "name" { globally_unique = true } -# Create the AKS cluster using the official Azure module +# Create the AKS cluster using the locally modified Azure module module "k8scluster" { source = "./k8scluster" @@ -125,9 +125,6 @@ module "k8scluster" { var.instance.spec.tags != null ? var.instance.spec.tags : {} ) - # Disable http application routing - http_application_routing_enabled = false - # Disable local accounts for better security local_account_disabled = true From 62fa9f30880f76f47603ffdec821cdae988bda6f Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 17:40:45 +0530 Subject: [PATCH 16/36] updates --- modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf index c9d2fe8f1..7859b9fae 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf @@ -8,7 +8,7 @@ terraform { } azurerm = { source = "hashicorp/azurerm" - version = ">= 3.107.0, < 4.0" + version = ">= 3.107.0" } null = { source = "hashicorp/null" From fc31a972bfc04c79ae6b275292873591d99f5c1a Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 19:57:13 +0530 Subject: [PATCH 17/36] network fixes --- modules/network/azure_vpc/0.2/facets.yaml | 3 --- modules/network/azure_vpc/0.2/main.tf | 12 ++++++------ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/modules/network/azure_vpc/0.2/facets.yaml b/modules/network/azure_vpc/0.2/facets.yaml index 7ddffcb0d..d59eeef0f 100644 --- a/modules/network/azure_vpc/0.2/facets.yaml +++ b/modules/network/azure_vpc/0.2/facets.yaml @@ -13,9 +13,6 @@ inputs: optional: false providers: - azurerm - - azurerm3 - - azurerm3-105-0 - - azurerm3-116-0 spec: type: object properties: diff --git a/modules/network/azure_vpc/0.2/main.tf b/modules/network/azure_vpc/0.2/main.tf index bf35679bc..d2ffb49a6 100644 --- a/modules/network/azure_vpc/0.2/main.tf +++ b/modules/network/azure_vpc/0.2/main.tf @@ -312,8 +312,8 @@ resource "azurerm_subnet" "database" { address_prefixes = [each.value.cidr_block] service_endpoints = ["Microsoft.Storage"] - # Enable private link endpoint policies - enforce_private_link_endpoint_network_policies = true + # Configure private endpoint network policies + private_endpoint_network_policies = "Disabled" # Delegate to SQL services delegation { @@ -382,8 +382,8 @@ resource "azurerm_subnet" "functions" { address_prefixes = [each.value.cidr_block] service_endpoints = ["Microsoft.Storage"] - # Enable private link endpoint policies - enforce_private_link_endpoint_network_policies = true + # Configure private endpoint network policies + private_endpoint_network_policies = "Disabled" # Delegate to Azure Functions delegation { @@ -412,8 +412,8 @@ resource "azurerm_subnet" "private_link_service" { address_prefixes = [each.value.cidr_block] service_endpoints = ["Microsoft.Storage"] - # Enable private link service policies (this is why we need a dedicated subnet) - enforce_private_link_service_network_policies = true + # Configure private link service network policies (disabled for Private Link Service) + private_link_service_network_policies_enabled = false lifecycle { ignore_changes = [service_endpoints, name] From 2479f7bccc5f6c3ce3e821edaf2d14872657b60f Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 20:10:44 +0530 Subject: [PATCH 18/36] refactoring --- modules/network/azure_vpc/0.2/facets.yaml | 10 + modules/network/azure_vpc/0.2/locals.tf | 206 +++++ modules/network/azure_vpc/0.2/main.tf | 762 +----------------- modules/network/azure_vpc/0.2/nat-gateway.tf | 77 ++ modules/network/azure_vpc/0.2/network.tf | 29 + .../azure_vpc/0.2/private-endpoints.tf | 67 ++ modules/network/azure_vpc/0.2/routing.tf | 66 ++ modules/network/azure_vpc/0.2/subnets.tf | 174 ++++ modules/network/azure_vpc/0.2/variables.tf | 238 +++++- 9 files changed, 885 insertions(+), 744 deletions(-) create mode 100644 modules/network/azure_vpc/0.2/locals.tf create mode 100644 modules/network/azure_vpc/0.2/nat-gateway.tf create mode 100644 modules/network/azure_vpc/0.2/network.tf create mode 100644 modules/network/azure_vpc/0.2/private-endpoints.tf create mode 100644 modules/network/azure_vpc/0.2/routing.tf create mode 100644 modules/network/azure_vpc/0.2/subnets.tf diff --git a/modules/network/azure_vpc/0.2/facets.yaml b/modules/network/azure_vpc/0.2/facets.yaml index d59eeef0f..1ea9c54c6 100644 --- a/modules/network/azure_vpc/0.2/facets.yaml +++ b/modules/network/azure_vpc/0.2/facets.yaml @@ -281,4 +281,14 @@ sample: Project: main-infrastructure iac: validated_files: + - main.tf - variables.tf + - locals.tf + - network.tf + - subnets.tf + - nat-gateway.tf + - routing.tf + - security-groups.tf + - private-endpoints.tf + - outputs.tf +disable_state_reference_on_selective_release: true \ No newline at end of file diff --git a/modules/network/azure_vpc/0.2/locals.tf b/modules/network/azure_vpc/0.2/locals.tf new file mode 100644 index 000000000..5c77c0aeb --- /dev/null +++ b/modules/network/azure_vpc/0.2/locals.tf @@ -0,0 +1,206 @@ +######################################################################### +# Local Values and Calculations # +######################################################################### + +locals { + # Private endpoint DNS zone mappings + private_dns_zones = { + enable_storage = "privatelink.blob.core.windows.net" + enable_sql = "privatelink.database.windows.net" + enable_keyvault = "privatelink.vaultcore.azure.net" + enable_acr = "privatelink.azurecr.io" + enable_aks = "privatelink.${var.instance.spec.region}.azmk8s.io" + enable_cosmos = "privatelink.documents.azure.com" + enable_servicebus = "privatelink.servicebus.windows.net" + enable_eventhub = "privatelink.servicebus.windows.net" + enable_monitor = "privatelink.monitor.azure.com" + enable_cognitive = "privatelink.cognitiveservices.azure.com" + } + + private_endpoints_enabled = { + for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true + } + + # Calculate subnet mask from IP count + subnet_mask_map = { + "256" = 24 # /24 = 256 IPs + "512" = 23 # /23 = 512 IPs + "1024" = 22 # /22 = 1024 IPs + "2048" = 21 # /21 = 2048 IPs + "4096" = 20 # /20 = 4096 IPs + "8192" = 19 # /19 = 8192 IPs + } + + # Use fixed CIDR allocation like the original (optional) + use_fixed_cidrs = lookup(var.instance.spec, "use_fixed_cidr_allocation", false) + + # Fixed CIDR allocation (similar to original logic) + fixed_private_subnets = local.use_fixed_cidrs ? [for i in range(4) : cidrsubnet(var.instance.spec.vnet_cidr, 4, i)] : [] + fixed_public_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 12), cidrsubnet(var.instance.spec.vnet_cidr, 4, 14), cidrsubnet(var.instance.spec.vnet_cidr, 4, 15)] : [] + fixed_database_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 4), cidrsubnet(var.instance.spec.vnet_cidr, 4, 5)] : [] + fixed_gateway_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 6)] : [] + fixed_cache_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 112)] : [] + fixed_functions_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 113)] : [] + fixed_private_link_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 114)] : [] + + vnet_prefix_length = tonumber(split("/", var.instance.spec.vnet_cidr)[1]) + + public_subnet_newbits = local.subnet_mask_map[var.instance.spec.public_subnets.subnet_size] - local.vnet_prefix_length + private_subnet_newbits = local.subnet_mask_map[var.instance.spec.private_subnets.subnet_size] - local.vnet_prefix_length + database_subnet_newbits = local.subnet_mask_map[var.instance.spec.database_subnets.subnet_size] - local.vnet_prefix_length + + # Calculate total number of subnets needed (only for dynamic allocation) + public_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0 + private_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az : 0 + database_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az : 0 + + # Specialized subnets (always use fixed allocation for these) + gateway_subnets_enabled = lookup(var.instance.spec, "enable_gateway_subnet", false) + cache_subnets_enabled = lookup(var.instance.spec, "enable_cache_subnet", false) + functions_subnets_enabled = lookup(var.instance.spec, "enable_functions_subnet", false) + private_link_svc_enabled = lookup(var.instance.spec, "enable_private_link_service_subnet", false) + + # Create list of newbits for cidrsubnets function (dynamic allocation only) + subnet_newbits = !local.use_fixed_cidrs ? concat( + var.instance.spec.public_subnets.count_per_az > 0 ? [ + for i in range(local.public_total_subnets) : local.public_subnet_newbits + ] : [], + [for i in range(local.private_total_subnets) : local.private_subnet_newbits], + [for i in range(local.database_total_subnets) : local.database_subnet_newbits] + ) : [] + + # Generate all subnet CIDRs using cidrsubnets function - this prevents overlaps (dynamic allocation) + all_subnet_cidrs = !local.use_fixed_cidrs && length(local.subnet_newbits) > 0 ? cidrsubnets(var.instance.spec.vnet_cidr, local.subnet_newbits...) : [] + + # Extract subnet CIDRs by type (dynamic allocation) + public_subnet_cidrs = !local.use_fixed_cidrs && var.instance.spec.public_subnets.count_per_az > 0 ? slice( + local.all_subnet_cidrs, + 0, + local.public_total_subnets + ) : local.fixed_public_subnets + + private_subnet_cidrs = !local.use_fixed_cidrs ? slice( + local.all_subnet_cidrs, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets : 0, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets + ) : local.fixed_private_subnets + + database_subnet_cidrs = !local.use_fixed_cidrs ? slice( + local.all_subnet_cidrs, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets, + var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets + local.database_total_subnets : local.private_total_subnets + local.database_total_subnets + ) : local.fixed_database_subnets + + # Create subnet mappings with AZ and CIDR + public_subnets = var.instance.spec.public_subnets.count_per_az > 0 ? ( + local.use_fixed_cidrs ? [ + for i, cidr in local.public_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.public_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.public_subnet_cidrs[az_index * var.instance.spec.public_subnets.count_per_az + subnet_index] + } + ] + ]) + ) : [] + + private_subnets = local.use_fixed_cidrs ? [ + for i, cidr in local.private_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.private_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.private_subnet_cidrs[az_index * var.instance.spec.private_subnets.count_per_az + subnet_index] + } + ] + ]) + + database_subnets = local.use_fixed_cidrs ? [ + for i, cidr in local.database_subnet_cidrs : { + az_index = i % length(var.instance.spec.availability_zones) + subnet_index = floor(i / length(var.instance.spec.availability_zones)) + az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] + cidr_block = cidr + } + ] : flatten([ + for az_index, az in var.instance.spec.availability_zones : [ + for subnet_index in range(var.instance.spec.database_subnets.count_per_az) : { + az_index = az_index + subnet_index = subnet_index + az = az + cidr_block = local.database_subnet_cidrs[az_index * var.instance.spec.database_subnets.count_per_az + subnet_index] + } + ] + ]) + + # Specialized subnets (always use fixed allocation) + gateway_subnets = local.gateway_subnets_enabled ? [ + for i, cidr in local.fixed_gateway_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + cache_subnets = local.cache_subnets_enabled ? [ + for i, cidr in local.fixed_cache_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + functions_subnets = local.functions_subnets_enabled ? [ + for i, cidr in local.fixed_functions_subnets : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + private_link_service_subnets = local.private_link_svc_enabled ? [ + for i, cidr in local.fixed_private_link_subnet : { + subnet_index = i + cidr_block = cidr + } + ] : [] + + # Private endpoints configuration with defaults + private_endpoints = var.instance.spec.private_endpoints != null ? var.instance.spec.private_endpoints : { + enable_storage = true + enable_sql = true + enable_keyvault = true + enable_acr = true + enable_aks = false + enable_cosmos = false + enable_servicebus = false + enable_eventhub = false + enable_monitor = false + enable_cognitive = false + } + + # Resource naming prefix + name_prefix = "${var.environment.unique_name}-${var.instance_name}" + + # Common tags + common_tags = merge( + var.environment.cloud_tags, + lookup(var.instance.spec, "tags", {}), + { + Name = local.name_prefix + Environment = var.environment.name + } + ) +} diff --git a/modules/network/azure_vpc/0.2/main.tf b/modules/network/azure_vpc/0.2/main.tf index d2ffb49a6..51b6b1398 100644 --- a/modules/network/azure_vpc/0.2/main.tf +++ b/modules/network/azure_vpc/0.2/main.tf @@ -1,747 +1,33 @@ ######################################################################### # Terraform Module Structure # # # -# ── Guidance for Code Generators / AI Tools ───────────────────────── # +# ── Guidance for Code Generators / AI Tools ────────────────────────── # # # -# • Keep this main.tf file **intentionally empty**. # -# It serves only as the module's entry point. # +# • This main.tf file serves as the module's entry point and overview. # # # -# • Create additional *.tf files that are **logically grouped** # -# according to the functionality and resources of the module. # +# • All logic has been organized into separate files for maintainability:# +# - locals.tf: Local values and calculations # +# - network.tf: Core network infrastructure (RG, VNet) # +# - subnets.tf: All subnet resources # +# - nat-gateway.tf: NAT Gateway and associations # +# - routing.tf: Route tables and routing configuration # +# - security-groups.tf: Network Security Groups # +# - private-endpoints.tf: Private DNS zones and endpoints # # # -# • Group related resources, data sources, locals, variables, and # -# outputs into separate files to improve clarity and maintainability. # +# • This structure improves readability, maintainability, and # +# makes it easier for teams to work on specific components. # # # -# • Choose file names that clearly reflect the purpose of the contents. # -# # -# • Add new files as needed when new functionality areas are introduced,# -# instead of expanding existing files indefinitely. # -# # -# This ensures modules stay clean, scalable, and easy to navigate. # ######################################################################### -# Local values for calculations -locals { - # Private endpoint DNS zone mappings - private_dns_zones = { - enable_storage = "privatelink.blob.core.windows.net" - enable_sql = "privatelink.database.windows.net" - enable_keyvault = "privatelink.vaultcore.azure.net" - enable_acr = "privatelink.azurecr.io" - enable_aks = "privatelink.${var.instance.spec.region}.azmk8s.io" - enable_cosmos = "privatelink.documents.azure.com" - enable_servicebus = "privatelink.servicebus.windows.net" - enable_eventhub = "privatelink.servicebus.windows.net" - enable_monitor = "privatelink.monitor.azure.com" - enable_cognitive = "privatelink.cognitiveservices.azure.com" - } - - private_endpoints_enabled = { - for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true - } - # Calculate subnet mask from IP count - subnet_mask_map = { - "256" = 24 # /24 = 256 IPs - "512" = 23 # /23 = 512 IPs - "1024" = 22 # /22 = 1024 IPs - "2048" = 21 # /21 = 2048 IPs - "4096" = 20 # /20 = 4096 IPs - "8192" = 19 # /19 = 8192 IPs - } - - # Use fixed CIDR allocation like the original (optional) - use_fixed_cidrs = lookup(var.instance.spec, "use_fixed_cidr_allocation", false) - - # Fixed CIDR allocation (similar to original logic) - fixed_private_subnets = local.use_fixed_cidrs ? [for i in range(4) : cidrsubnet(var.instance.spec.vnet_cidr, 4, i)] : [] - fixed_public_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 12), cidrsubnet(var.instance.spec.vnet_cidr, 4, 14), cidrsubnet(var.instance.spec.vnet_cidr, 4, 15)] : [] - fixed_database_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 4), cidrsubnet(var.instance.spec.vnet_cidr, 4, 5)] : [] - fixed_gateway_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 6)] : [] - fixed_cache_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 112)] : [] - fixed_functions_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 113)] : [] - fixed_private_link_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 114)] : [] - - vnet_prefix_length = tonumber(split("/", var.instance.spec.vnet_cidr)[1]) - - public_subnet_newbits = local.subnet_mask_map[var.instance.spec.public_subnets.subnet_size] - local.vnet_prefix_length - private_subnet_newbits = local.subnet_mask_map[var.instance.spec.private_subnets.subnet_size] - local.vnet_prefix_length - database_subnet_newbits = local.subnet_mask_map[var.instance.spec.database_subnets.subnet_size] - local.vnet_prefix_length - - # Calculate total number of subnets needed (only for dynamic allocation) - public_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0 - private_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az : 0 - database_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az : 0 - - # Specialized subnets (always use fixed allocation for these) - gateway_subnets_enabled = lookup(var.instance.spec, "enable_gateway_subnet", false) - cache_subnets_enabled = lookup(var.instance.spec, "enable_cache_subnet", false) - functions_subnets_enabled = lookup(var.instance.spec, "enable_functions_subnet", false) - private_link_svc_enabled = lookup(var.instance.spec, "enable_private_link_service_subnet", false) - - # Create list of newbits for cidrsubnets function (dynamic allocation only) - subnet_newbits = !local.use_fixed_cidrs ? concat( - var.instance.spec.public_subnets.count_per_az > 0 ? [ - for i in range(local.public_total_subnets) : local.public_subnet_newbits - ] : [], - [for i in range(local.private_total_subnets) : local.private_subnet_newbits], - [for i in range(local.database_total_subnets) : local.database_subnet_newbits] - ) : [] - - # Generate all subnet CIDRs using cidrsubnets function - this prevents overlaps (dynamic allocation) - all_subnet_cidrs = !local.use_fixed_cidrs && length(local.subnet_newbits) > 0 ? cidrsubnets(var.instance.spec.vnet_cidr, local.subnet_newbits...) : [] - - # Extract subnet CIDRs by type (dynamic allocation) - public_subnet_cidrs = !local.use_fixed_cidrs && var.instance.spec.public_subnets.count_per_az > 0 ? slice( - local.all_subnet_cidrs, - 0, - local.public_total_subnets - ) : local.fixed_public_subnets - - private_subnet_cidrs = !local.use_fixed_cidrs ? slice( - local.all_subnet_cidrs, - var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets : 0, - var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets - ) : local.fixed_private_subnets - - database_subnet_cidrs = !local.use_fixed_cidrs ? slice( - local.all_subnet_cidrs, - var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets, - var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets + local.database_total_subnets : local.private_total_subnets + local.database_total_subnets - ) : local.fixed_database_subnets - - # Create subnet mappings with AZ and CIDR - public_subnets = var.instance.spec.public_subnets.count_per_az > 0 ? ( - local.use_fixed_cidrs ? [ - for i, cidr in local.public_subnet_cidrs : { - az_index = i % length(var.instance.spec.availability_zones) - subnet_index = floor(i / length(var.instance.spec.availability_zones)) - az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] - cidr_block = cidr - } - ] : flatten([ - for az_index, az in var.instance.spec.availability_zones : [ - for subnet_index in range(var.instance.spec.public_subnets.count_per_az) : { - az_index = az_index - subnet_index = subnet_index - az = az - cidr_block = local.public_subnet_cidrs[az_index * var.instance.spec.public_subnets.count_per_az + subnet_index] - } - ] - ]) - ) : [] - - private_subnets = local.use_fixed_cidrs ? [ - for i, cidr in local.private_subnet_cidrs : { - az_index = i % length(var.instance.spec.availability_zones) - subnet_index = floor(i / length(var.instance.spec.availability_zones)) - az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] - cidr_block = cidr - } - ] : flatten([ - for az_index, az in var.instance.spec.availability_zones : [ - for subnet_index in range(var.instance.spec.private_subnets.count_per_az) : { - az_index = az_index - subnet_index = subnet_index - az = az - cidr_block = local.private_subnet_cidrs[az_index * var.instance.spec.private_subnets.count_per_az + subnet_index] - } - ] - ]) - - database_subnets = local.use_fixed_cidrs ? [ - for i, cidr in local.database_subnet_cidrs : { - az_index = i % length(var.instance.spec.availability_zones) - subnet_index = floor(i / length(var.instance.spec.availability_zones)) - az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)] - cidr_block = cidr - } - ] : flatten([ - for az_index, az in var.instance.spec.availability_zones : [ - for subnet_index in range(var.instance.spec.database_subnets.count_per_az) : { - az_index = az_index - subnet_index = subnet_index - az = az - cidr_block = local.database_subnet_cidrs[az_index * var.instance.spec.database_subnets.count_per_az + subnet_index] - } - ] - ]) - - # Specialized subnets (always use fixed allocation) - gateway_subnets = local.gateway_subnets_enabled ? [ - for i, cidr in local.fixed_gateway_subnet : { - subnet_index = i - cidr_block = cidr - } - ] : [] - - cache_subnets = local.cache_subnets_enabled ? [ - for i, cidr in local.fixed_cache_subnet : { - subnet_index = i - cidr_block = cidr - } - ] : [] - - functions_subnets = local.functions_subnets_enabled ? [ - for i, cidr in local.fixed_functions_subnets : { - subnet_index = i - cidr_block = cidr - } - ] : [] - - private_link_service_subnets = local.private_link_svc_enabled ? [ - for i, cidr in local.fixed_private_link_subnet : { - subnet_index = i - cidr_block = cidr - } - ] : [] - - # Private endpoints configuration with defaults - private_endpoints = var.instance.spec.private_endpoints != null ? var.instance.spec.private_endpoints : { - enable_storage = true - enable_sql = true - enable_keyvault = true - enable_acr = true - enable_aks = false - enable_cosmos = false - enable_servicebus = false - enable_eventhub = false - enable_monitor = false - enable_cognitive = false - } - - # Resource naming prefix - name_prefix = "${var.environment.unique_name}-${var.instance_name}" - - # Common tags - common_tags = merge( - var.environment.cloud_tags, - lookup(var.instance.spec, "tags", {}), - { - Name = local.name_prefix - Environment = var.environment.name - } - ) -} - -# Resource Group -resource "azurerm_resource_group" "main" { - name = "${local.name_prefix}-rg" - location = var.instance.spec.region - - tags = local.common_tags - - lifecycle { - prevent_destroy = true - } -} - -# Virtual Network -resource "azurerm_virtual_network" "main" { - name = "${local.name_prefix}-vnet" - address_space = [var.instance.spec.vnet_cidr] - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - tags = local.common_tags - - lifecycle { - prevent_destroy = true - } -} - -# Public Subnets -resource "azurerm_subnet" "public" { - for_each = var.instance.spec.public_subnets.count_per_az > 0 ? { - for subnet in local.public_subnets : - "${subnet.az}-${subnet.subnet_index}" => subnet - } : {} - - name = "${local.name_prefix}-public-${each.value.az}-${each.value.subnet_index + 1}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - lifecycle { - ignore_changes = [delegation, service_endpoints, name] - } -} - -# Private Subnets -resource "azurerm_subnet" "private" { - for_each = { - for subnet in local.private_subnets : - "${subnet.az}-${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-private-${each.value.az}-${each.value.subnet_index + 1}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - # Delegate subnet to specific services if needed - dynamic "delegation" { - for_each = var.instance.spec.enable_aks ? [1] : [] - content { - name = "aks-delegation" - service_delegation { - name = "Microsoft.ContainerService/managedClusters" - actions = [ - "Microsoft.Network/virtualNetworks/subnets/join/action", - ] - } - } - } - - lifecycle { - ignore_changes = [delegation, service_endpoints, name] - } -} - -# Database Subnets -resource "azurerm_subnet" "database" { - for_each = { - for subnet in local.database_subnets : - "${subnet.az}-${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-database-${each.value.az}-${each.value.subnet_index + 1}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - # Configure private endpoint network policies - private_endpoint_network_policies = "Disabled" - - # Delegate to SQL services - delegation { - name = "sql-delegation" - service_delegation { - name = "Microsoft.DBforMySQL/flexibleServers" - actions = [ - "Microsoft.Network/virtualNetworks/subnets/join/action", - "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", - "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action" - ] - } - } - - lifecycle { - ignore_changes = [service_endpoints, delegation, name] - } -} - -# Gateway Subnets (for VPN/ExpressRoute gateways) -resource "azurerm_subnet" "gateway" { - for_each = { - for subnet in local.gateway_subnets : - "${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-gateway-subnet-${each.value.subnet_index}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - lifecycle { - ignore_changes = [delegation, service_endpoints, name] - } -} - -# Cache Subnets (for Redis and other caching services) -resource "azurerm_subnet" "cache" { - for_each = { - for subnet in local.cache_subnets : - "${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-cache-subnet-${each.value.subnet_index}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - lifecycle { - ignore_changes = [delegation, service_endpoints, name] - } -} - -# Functions Subnets (dedicated for Azure Functions) -resource "azurerm_subnet" "functions" { - for_each = { - for subnet in local.functions_subnets : - "${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-functions-subnet-${each.value.subnet_index}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - # Configure private endpoint network policies - private_endpoint_network_policies = "Disabled" - - # Delegate to Azure Functions - delegation { - name = "functions-delegation" - service_delegation { - name = "Microsoft.Web/serverFarms" - actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"] - } - } - - lifecycle { - ignore_changes = [service_endpoints, delegation, name] - } -} - -# Private Link Service Subnets -resource "azurerm_subnet" "private_link_service" { - for_each = { - for subnet in local.private_link_service_subnets : - "${subnet.subnet_index}" => subnet - } - - name = "${local.name_prefix}-pls-subnet-${each.value.subnet_index}" - resource_group_name = azurerm_resource_group.main.name - virtual_network_name = azurerm_virtual_network.main.name - address_prefixes = [each.value.cidr_block] - service_endpoints = ["Microsoft.Storage"] - - # Configure private link service network policies (disabled for Private Link Service) - private_link_service_network_policies_enabled = false - - lifecycle { - ignore_changes = [service_endpoints, name] - } -} - -# Public IP for NAT Gateway -resource "azurerm_public_ip" "nat_gateway" { - for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { - for az in var.instance.spec.availability_zones : az => az - } : var.instance.spec.public_subnets.count_per_az > 0 ? { - single = var.instance.spec.availability_zones[0] - } : {} - - name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-pip-${each.key}" : "${local.name_prefix}-natgw-pip" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - allocation_method = "Static" - sku = "Standard" - zones = [each.value] - - tags = local.common_tags - - lifecycle { - ignore_changes = [name] - } -} - -# NAT Gateway -resource "azurerm_nat_gateway" "main" { - for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { - for az in var.instance.spec.availability_zones : az => az - } : var.instance.spec.public_subnets.count_per_az > 0 ? { - single = var.instance.spec.availability_zones[0] - } : {} - - name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-${each.key}" : "${local.name_prefix}-natgw" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - sku_name = "Standard" - idle_timeout_in_minutes = 10 - zones = [each.value] - - tags = local.common_tags - - lifecycle { - ignore_changes = [name] - } -} - -# Associate Public IP with NAT Gateway -resource "azurerm_nat_gateway_public_ip_association" "main" { - for_each = azurerm_nat_gateway.main - - nat_gateway_id = each.value.id - public_ip_address_id = azurerm_public_ip.nat_gateway[each.key].id -} - -# Route Table for Public Subnets -resource "azurerm_route_table" "public" { - count = var.instance.spec.public_subnets.count_per_az > 0 ? 1 : 0 - - name = "${local.name_prefix}-public-rt" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - tags = local.common_tags -} - -# Associate Route Table with Public Subnets -resource "azurerm_subnet_route_table_association" "public" { - for_each = azurerm_subnet.public - - subnet_id = each.value.id - route_table_id = azurerm_route_table.public[0].id -} - -# Route Table for Private Subnets -resource "azurerm_route_table" "private" { - for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { - for az in var.instance.spec.availability_zones : az => az - } : var.instance.spec.public_subnets.count_per_az > 0 ? { - single = "1" - } : {} - - name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-private-rt-${each.key}" : "${local.name_prefix}-private-rt" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - tags = local.common_tags -} - -# Associate Route Table with Private Subnets -resource "azurerm_subnet_route_table_association" "private" { - for_each = azurerm_subnet.private - - subnet_id = each.value.id - route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id -} - -# Route Table for Database Subnets (isolated) -resource "azurerm_route_table" "database" { - for_each = { - for az in var.instance.spec.availability_zones : az => az - } - - name = "${local.name_prefix}-database-rt-${each.key}" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - tags = local.common_tags -} - -# Associate Route Table with Database Subnets -resource "azurerm_subnet_route_table_association" "database" { - for_each = azurerm_subnet.database - - subnet_id = each.value.id - route_table_id = azurerm_route_table.database[split("-", each.key)[0]].id -} - -# Associate NAT Gateway with Private Route Tables -resource "azurerm_subnet_nat_gateway_association" "private" { - for_each = { - for k, v in azurerm_subnet.private : k => v - if var.instance.spec.public_subnets.count_per_az > 0 - } - - subnet_id = each.value.id - nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id -} - -# Associate NAT Gateway with Functions Subnets -resource "azurerm_subnet_nat_gateway_association" "functions" { - for_each = { - for k, v in azurerm_subnet.functions : k => v - if var.instance.spec.public_subnets.count_per_az > 0 - } - - subnet_id = each.value.id - nat_gateway_id = azurerm_nat_gateway.main["1"].id # Functions typically use single NAT Gateway -} - -# Network Security Group - Allow all within VNet (similar to original logic) -resource "azurerm_network_security_group" "allow_all_default" { - name = "${local.name_prefix}-allow-all-default-nsg" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - security_rule { - name = "AllowVnetInbound" - priority = 100 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = var.instance.spec.vnet_cidr - destination_address_prefix = "*" - description = "Allowing connection from within vnet" - } - - tags = merge(local.common_tags, { - Terraform = "true" - }) - - lifecycle { - ignore_changes = [name] - } -} - -# Security Group for VPC Endpoints (keep existing for private endpoints) -resource "azurerm_network_security_group" "vpc_endpoints" { - count = anytrue([ - try(local.private_endpoints.enable_storage, false), - try(local.private_endpoints.enable_sql, false), - try(local.private_endpoints.enable_keyvault, false), - try(local.private_endpoints.enable_acr, false), - try(local.private_endpoints.enable_aks, false), - try(local.private_endpoints.enable_cosmos, false), - try(local.private_endpoints.enable_servicebus, false), - try(local.private_endpoints.enable_eventhub, false), - try(local.private_endpoints.enable_monitor, false), - try(local.private_endpoints.enable_cognitive, false) - ]) ? 1 : 0 - - name = "${local.name_prefix}-private-endpoints-nsg" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - - security_rule { - name = "AllowHTTPS" - priority = 1001 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "443" - source_address_prefix = var.instance.spec.vnet_cidr - destination_address_prefix = "*" - } - - security_rule { - name = "AllowOutbound" - priority = 1001 - direction = "Outbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } - - tags = local.common_tags -} - -# Network Security Groups for Subnets - Apply the allow-all NSG to all subnets -resource "azurerm_subnet_network_security_group_association" "public" { - for_each = azurerm_subnet.public - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "private" { - for_each = azurerm_subnet.private - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "database" { - for_each = azurerm_subnet.database - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "gateway" { - for_each = azurerm_subnet.gateway - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "cache" { - for_each = azurerm_subnet.cache - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "functions" { - for_each = azurerm_subnet.functions - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -resource "azurerm_subnet_network_security_group_association" "private_link_service" { - for_each = azurerm_subnet.private_link_service - - subnet_id = each.value.id - network_security_group_id = azurerm_network_security_group.allow_all_default.id -} - -# Private DNS Zone for Private Endpoints -resource "azurerm_private_dns_zone" "private_endpoints" { - for_each = { - for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true - } - - name = each.value - resource_group_name = azurerm_resource_group.main.name - - tags = var.instance.spec.tags -} - -# Link Private DNS Zone to VNet -resource "azurerm_private_dns_zone_virtual_network_link" "private_endpoints" { - for_each = azurerm_private_dns_zone.private_endpoints - - name = "${local.name_prefix}-${each.key}-dns-link" - resource_group_name = azurerm_resource_group.main.name - private_dns_zone_name = each.value.name - virtual_network_id = azurerm_virtual_network.main.id - registration_enabled = false - - tags = local.common_tags -} - -# Example Storage Account (for demonstration of private endpoint) -resource "azurerm_storage_account" "example" { - count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 - - name = substr(replace(replace(lower(local.name_prefix), "-", ""), "_", ""), 0, 20) - resource_group_name = azurerm_resource_group.main.name - location = azurerm_resource_group.main.location - account_tier = "Standard" - account_replication_type = "LRS" - - # Disable public access - - - tags = local.common_tags -} - -# Private Endpoint for Storage Account -resource "azurerm_private_endpoint" "storage" { - count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 - - name = "${local.name_prefix}-storage-pe" - location = azurerm_resource_group.main.location - resource_group_name = azurerm_resource_group.main.name - subnet_id = values(azurerm_subnet.private)[0].id - - private_service_connection { - name = "${local.name_prefix}-storage-psc" - private_connection_resource_id = azurerm_storage_account.example[0].id - subresource_names = ["blob"] - is_manual_connection = false - } - - private_dns_zone_group { - name = "storage-dns-zone-group" - private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["enable_storage"].id] - } - - tags = local.common_tags -} \ No newline at end of file +# This Azure VPC module creates a comprehensive virtual network infrastructure +# with support for multiple subnet types, NAT gateways, private endpoints, +# and flexible CIDR allocation strategies. +# +# Key Features: +# - Dynamic or fixed CIDR allocation +# - Multiple subnet types (public, private, database, specialized) +# - NAT Gateway with per-AZ or shared strategies +# - Private endpoints with DNS integration +# - Comprehensive routing and security group configuration +# +# All resources are defined in their respective files for better organization. diff --git a/modules/network/azure_vpc/0.2/nat-gateway.tf b/modules/network/azure_vpc/0.2/nat-gateway.tf new file mode 100644 index 000000000..8a67cf284 --- /dev/null +++ b/modules/network/azure_vpc/0.2/nat-gateway.tf @@ -0,0 +1,77 @@ +######################################################################### +# NAT Gateway Resources # +######################################################################### + +# Public IP for NAT Gateway +resource "azurerm_public_ip" "nat_gateway" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = var.instance.spec.availability_zones[0] + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-pip-${each.key}" : "${local.name_prefix}-natgw-pip" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + allocation_method = "Static" + sku = "Standard" + zones = [each.value] + + tags = local.common_tags + + lifecycle { + ignore_changes = [name] + } +} + +# NAT Gateway +resource "azurerm_nat_gateway" "main" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = var.instance.spec.availability_zones[0] + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-${each.key}" : "${local.name_prefix}-natgw" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + sku_name = "Standard" + idle_timeout_in_minutes = 10 + zones = [each.value] + + tags = local.common_tags + + lifecycle { + ignore_changes = [name] + } +} + +# Associate Public IP with NAT Gateway +resource "azurerm_nat_gateway_public_ip_association" "main" { + for_each = azurerm_nat_gateway.main + + nat_gateway_id = each.value.id + public_ip_address_id = azurerm_public_ip.nat_gateway[each.key].id +} + +# Associate NAT Gateway with Private Subnets +resource "azurerm_subnet_nat_gateway_association" "private" { + for_each = { + for k, v in azurerm_subnet.private : k => v + if var.instance.spec.public_subnets.count_per_az > 0 + } + + subnet_id = each.value.id + nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id +} + +# Associate NAT Gateway with Functions Subnets +resource "azurerm_subnet_nat_gateway_association" "functions" { + for_each = { + for k, v in azurerm_subnet.functions : k => v + if var.instance.spec.public_subnets.count_per_az > 0 + } + + subnet_id = each.value.id + nat_gateway_id = azurerm_nat_gateway.main["1"].id # Functions typically use single NAT Gateway +} diff --git a/modules/network/azure_vpc/0.2/network.tf b/modules/network/azure_vpc/0.2/network.tf new file mode 100644 index 000000000..81767c918 --- /dev/null +++ b/modules/network/azure_vpc/0.2/network.tf @@ -0,0 +1,29 @@ +######################################################################### +# Core Network Infrastructure # +######################################################################### + +# Resource Group +resource "azurerm_resource_group" "main" { + name = "${local.name_prefix}-rg" + location = var.instance.spec.region + + tags = local.common_tags + + lifecycle { + prevent_destroy = true + } +} + +# Virtual Network +resource "azurerm_virtual_network" "main" { + name = "${local.name_prefix}-vnet" + address_space = [var.instance.spec.vnet_cidr] + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags + + lifecycle { + prevent_destroy = true + } +} diff --git a/modules/network/azure_vpc/0.2/private-endpoints.tf b/modules/network/azure_vpc/0.2/private-endpoints.tf new file mode 100644 index 000000000..b919fd269 --- /dev/null +++ b/modules/network/azure_vpc/0.2/private-endpoints.tf @@ -0,0 +1,67 @@ +######################################################################### +# Private DNS and Private Endpoints # +######################################################################### + +# Private DNS Zone for Private Endpoints +resource "azurerm_private_dns_zone" "private_endpoints" { + for_each = { + for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true + } + + name = each.value + resource_group_name = azurerm_resource_group.main.name + + tags = var.instance.spec.tags +} + +# Link Private DNS Zone to VNet +resource "azurerm_private_dns_zone_virtual_network_link" "private_endpoints" { + for_each = azurerm_private_dns_zone.private_endpoints + + name = "${local.name_prefix}-${each.key}-dns-link" + resource_group_name = azurerm_resource_group.main.name + private_dns_zone_name = each.value.name + virtual_network_id = azurerm_virtual_network.main.id + registration_enabled = false + + tags = local.common_tags +} + +# Example Storage Account (for demonstration of private endpoint) +resource "azurerm_storage_account" "example" { + count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 + + name = substr(replace(replace(lower(local.name_prefix), "-", ""), "_", ""), 0, 20) + resource_group_name = azurerm_resource_group.main.name + location = azurerm_resource_group.main.location + account_tier = "Standard" + account_replication_type = "LRS" + + # Disable public access + + tags = local.common_tags +} + +# Private Endpoint for Storage Account +resource "azurerm_private_endpoint" "storage" { + count = try(local.private_endpoints.enable_storage, false) ? 1 : 0 + + name = "${local.name_prefix}-storage-pe" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + subnet_id = values(azurerm_subnet.private)[0].id + + private_service_connection { + name = "${local.name_prefix}-storage-psc" + private_connection_resource_id = azurerm_storage_account.example[0].id + subresource_names = ["blob"] + is_manual_connection = false + } + + private_dns_zone_group { + name = "storage-dns-zone-group" + private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["enable_storage"].id] + } + + tags = local.common_tags +} diff --git a/modules/network/azure_vpc/0.2/routing.tf b/modules/network/azure_vpc/0.2/routing.tf new file mode 100644 index 000000000..148ac7394 --- /dev/null +++ b/modules/network/azure_vpc/0.2/routing.tf @@ -0,0 +1,66 @@ +######################################################################### +# Route Tables and Routing # +######################################################################### + +# Route Table for Public Subnets +resource "azurerm_route_table" "public" { + count = var.instance.spec.public_subnets.count_per_az > 0 ? 1 : 0 + + name = "${local.name_prefix}-public-rt" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Public Subnets +resource "azurerm_subnet_route_table_association" "public" { + for_each = azurerm_subnet.public + + subnet_id = each.value.id + route_table_id = azurerm_route_table.public[0].id +} + +# Route Table for Private Subnets +resource "azurerm_route_table" "private" { + for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? { + for az in var.instance.spec.availability_zones : az => az + } : var.instance.spec.public_subnets.count_per_az > 0 ? { + single = "1" + } : {} + + name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-private-rt-${each.key}" : "${local.name_prefix}-private-rt" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Private Subnets +resource "azurerm_subnet_route_table_association" "private" { + for_each = azurerm_subnet.private + + subnet_id = each.value.id + route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id +} + +# Route Table for Database Subnets (isolated) +resource "azurerm_route_table" "database" { + for_each = { + for az in var.instance.spec.availability_zones : az => az + } + + name = "${local.name_prefix}-database-rt-${each.key}" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + tags = local.common_tags +} + +# Associate Route Table with Database Subnets +resource "azurerm_subnet_route_table_association" "database" { + for_each = azurerm_subnet.database + + subnet_id = each.value.id + route_table_id = azurerm_route_table.database[split("-", each.key)[0]].id +} diff --git a/modules/network/azure_vpc/0.2/subnets.tf b/modules/network/azure_vpc/0.2/subnets.tf new file mode 100644 index 000000000..392f74274 --- /dev/null +++ b/modules/network/azure_vpc/0.2/subnets.tf @@ -0,0 +1,174 @@ +######################################################################### +# Subnet Resources # +######################################################################### + +# Public Subnets +resource "azurerm_subnet" "public" { + for_each = var.instance.spec.public_subnets.count_per_az > 0 ? { + for subnet in local.public_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } : {} + + name = "${local.name_prefix}-public-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Private Subnets +resource "azurerm_subnet" "private" { + for_each = { + for subnet in local.private_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-private-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Delegate subnet to specific services if needed + dynamic "delegation" { + for_each = var.instance.spec.enable_aks ? [1] : [] + content { + name = "aks-delegation" + service_delegation { + name = "Microsoft.ContainerService/managedClusters" + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + ] + } + } + } + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Database Subnets +resource "azurerm_subnet" "database" { + for_each = { + for subnet in local.database_subnets : + "${subnet.az}-${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-database-${each.value.az}-${each.value.subnet_index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Configure private endpoint network policies + private_endpoint_network_policies = "Disabled" + + # Delegate to SQL services + delegation { + name = "sql-delegation" + service_delegation { + name = "Microsoft.DBforMySQL/flexibleServers" + actions = [ + "Microsoft.Network/virtualNetworks/subnets/join/action", + "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action", + "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action" + ] + } + } + + lifecycle { + ignore_changes = [service_endpoints, delegation, name] + } +} + +# Gateway Subnets (for VPN/ExpressRoute gateways) +resource "azurerm_subnet" "gateway" { + for_each = { + for subnet in local.gateway_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-gateway-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Cache Subnets (for Redis and other caching services) +resource "azurerm_subnet" "cache" { + for_each = { + for subnet in local.cache_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-cache-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + lifecycle { + ignore_changes = [delegation, service_endpoints, name] + } +} + +# Functions Subnets (dedicated for Azure Functions) +resource "azurerm_subnet" "functions" { + for_each = { + for subnet in local.functions_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-functions-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Configure private endpoint network policies + private_endpoint_network_policies = "Disabled" + + # Delegate to Azure Functions + delegation { + name = "functions-delegation" + service_delegation { + name = "Microsoft.Web/serverFarms" + actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"] + } + } + + lifecycle { + ignore_changes = [service_endpoints, delegation, name] + } +} + +# Private Link Service Subnets +resource "azurerm_subnet" "private_link_service" { + for_each = { + for subnet in local.private_link_service_subnets : + "${subnet.subnet_index}" => subnet + } + + name = "${local.name_prefix}-pls-subnet-${each.value.subnet_index}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = [each.value.cidr_block] + service_endpoints = ["Microsoft.Storage"] + + # Configure private link service network policies (disabled for Private Link Service) + private_link_service_network_policies_enabled = false + + lifecycle { + ignore_changes = [service_endpoints, name] + } +} diff --git a/modules/network/azure_vpc/0.2/variables.tf b/modules/network/azure_vpc/0.2/variables.tf index 8a299b587..fa75696b6 100644 --- a/modules/network/azure_vpc/0.2/variables.tf +++ b/modules/network/azure_vpc/0.2/variables.tf @@ -1,16 +1,242 @@ -variable "instance" { - description = "The resource instance" - type = any -} +######################################################################### +# Facets Module Variables # +# # +# Auto-injected variables that every Facets module receives # +######################################################################### + variable "instance_name" { description = "The architectural name for the resource as added in the Facets blueprint designer." type = string + + validation { + condition = can(regex("^[a-zA-Z0-9_-]+$", var.instance_name)) + error_message = "Instance name must contain only alphanumeric characters, hyphens, and underscores." + } } + variable "environment" { description = "An object containing details about the environment." - type = any + type = object({ + name = string + unique_name = string + cloud_tags = map(string) + }) + + validation { + condition = can(var.environment.name) && can(var.environment.unique_name) && can(var.environment.cloud_tags) + error_message = "Environment must contain name, unique_name, and cloud_tags." + } } + variable "inputs" { description = "A map of inputs requested by the module developer." type = any -} \ No newline at end of file + default = {} +} + +######################################################################### +# Instance Configuration Schema # +# # +# Comprehensive validation for all module specifications # +######################################################################### + +variable "instance" { + description = "The resource instance configuration" + type = object({ + spec = object({ + # Core VNet Configuration + vnet_cidr = string + region = string + availability_zones = list(string) + + # Optional CIDR Strategy + use_fixed_cidr_allocation = optional(bool, false) + + # Public Subnets Configuration + public_subnets = object({ + count_per_az = number + subnet_size = string + }) + + # Private Subnets Configuration + private_subnets = object({ + count_per_az = number + subnet_size = string + }) + + # Database Subnets Configuration + database_subnets = object({ + count_per_az = number + subnet_size = string + }) + + # Specialized Subnet Toggles + enable_gateway_subnet = optional(bool, false) + enable_cache_subnet = optional(bool, false) + enable_functions_subnet = optional(bool, false) + enable_private_link_service_subnet = optional(bool, false) + enable_aks = optional(bool, false) + + # NAT Gateway Configuration + nat_gateway = object({ + strategy = string + }) + + # Private Endpoints Configuration + private_endpoints = optional(object({ + enable_storage = optional(bool, true) + enable_sql = optional(bool, true) + enable_keyvault = optional(bool, true) + enable_acr = optional(bool, true) + enable_aks = optional(bool, false) + enable_cosmos = optional(bool, false) + enable_servicebus = optional(bool, false) + enable_eventhub = optional(bool, false) + enable_monitor = optional(bool, false) + enable_cognitive = optional(bool, false) + })) + + # Additional Tags + tags = optional(map(string), {}) + }) + }) + + ######################################################################### + # VNet CIDR Validation # + ######################################################################### + validation { + condition = can(cidrhost(var.instance.spec.vnet_cidr, 0)) + error_message = "VNet CIDR must be a valid CIDR block (e.g., 10.0.0.0/16)." + } + + validation { + condition = can(regex("^([0-9]{1,3}\\.){3}[0-9]{1,3}/[0-9]{1,2}$", var.instance.spec.vnet_cidr)) + error_message = "VNet CIDR must follow the format x.x.x.x/xx (e.g., 10.0.0.0/16)." + } + + ######################################################################### + # Region Validation # + ######################################################################### + validation { + condition = length(var.instance.spec.region) > 0 + error_message = "Azure region cannot be empty." + } + + validation { + condition = contains([ + "eastus", "eastus2", "southcentralus", "westus2", "westus3", "australiaeast", + "southeastasia", "northeurope", "swedencentral", "uksouth", "westeurope", + "centralus", "southafricanorth", "centralindia", "eastasia", "japaneast", + "koreacentral", "canadacentral", "francecentral", "germanywestcentral", + "norwayeast", "switzerlandnorth", "uaenorth", "brazilsouth", "eastus2euap", + "qatarcentral", "centralusstage", "eastusstage", "eastus2stage", "northcentralusstage", + "southcentralusstage", "westusstage", "westus2stage", "asia", "asiapacific", + "australia", "brazil", "canada", "europe", "france", "germany", "global", + "india", "japan", "korea", "norway", "singapore", "southafrica", "switzerland", + "uae", "uk", "unitedstates" + ], var.instance.spec.region) + error_message = "Region must be a valid Azure region name." + } + + ######################################################################### + # Availability Zones Validation # + ######################################################################### + validation { + condition = length(var.instance.spec.availability_zones) >= 1 && length(var.instance.spec.availability_zones) <= 3 + error_message = "Availability zones must contain between 1 and 3 zones." + } + + validation { + condition = alltrue([ + for zone in var.instance.spec.availability_zones : + contains(["1", "2", "3"], zone) + ]) + error_message = "Availability zones must be \"1\", \"2\", or \"3\"." + } + + ######################################################################### + # Public Subnets Validation # + ######################################################################### + validation { + condition = var.instance.spec.public_subnets.count_per_az >= 0 && var.instance.spec.public_subnets.count_per_az <= 3 + error_message = "Public subnets count per AZ must be between 0 and 3." + } + + validation { + condition = contains([ + "256", "512", "1024", "2048", "4096", "8192" + ], var.instance.spec.public_subnets.subnet_size) + error_message = "Public subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192." + } + + ######################################################################### + # Private Subnets Validation # + ######################################################################### + validation { + condition = var.instance.spec.private_subnets.count_per_az >= 1 && var.instance.spec.private_subnets.count_per_az <= 3 + error_message = "Private subnets count per AZ must be between 1 and 3." + } + + validation { + condition = contains([ + "256", "512", "1024", "2048", "4096", "8192" + ], var.instance.spec.private_subnets.subnet_size) + error_message = "Private subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192." + } + + ######################################################################### + # Database Subnets Validation # + ######################################################################### + validation { + condition = var.instance.spec.database_subnets.count_per_az >= 0 && var.instance.spec.database_subnets.count_per_az <= 3 + error_message = "Database subnets count per AZ must be between 0 and 3." + } + + validation { + condition = contains([ + "256", "512", "1024", "2048", "4096", "8192" + ], var.instance.spec.database_subnets.subnet_size) + error_message = "Database subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192." + } + + ######################################################################### + # NAT Gateway Strategy Validation # + ######################################################################### + validation { + condition = contains([ + "single", "per_az" + ], var.instance.spec.nat_gateway.strategy) + error_message = "NAT Gateway strategy must be either 'single' or 'per_az'." + } + + ######################################################################### + # Logical Validations # + ######################################################################### + validation { + condition = var.instance.spec.public_subnets.count_per_az > 0 || var.instance.spec.nat_gateway.strategy == "single" + error_message = "NAT Gateway requires at least one public subnet when using 'per_az' strategy." + } + + validation { + condition = ( + (var.instance.spec.public_subnets.count_per_az > 0 ? + length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0) + + length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az + + length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az + ) <= 20 + error_message = "Total number of subnets across all types and AZs cannot exceed 20." + } + + validation { + condition = !var.instance.spec.enable_aks || var.instance.spec.private_subnets.count_per_az > 0 + error_message = "AKS integration requires at least one private subnet per AZ." + } + + ######################################################################### + # CIDR Size Validation # + ######################################################################### + validation { + condition = tonumber(split("/", var.instance.spec.vnet_cidr)[1]) <= 24 + error_message = "VNet CIDR prefix must be /24 or larger (smaller number) to accommodate all configured subnets." + } +} From 6dc7dae301ac8d50746df7412299dfd63744ceee Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 20:10:54 +0530 Subject: [PATCH 19/36] refactoring --- .../network/azure_vpc/0.2/security-groups.tf | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 modules/network/azure_vpc/0.2/security-groups.tf diff --git a/modules/network/azure_vpc/0.2/security-groups.tf b/modules/network/azure_vpc/0.2/security-groups.tf new file mode 100644 index 000000000..0b9e90dc7 --- /dev/null +++ b/modules/network/azure_vpc/0.2/security-groups.tf @@ -0,0 +1,127 @@ +######################################################################### +# Network Security Groups # +######################################################################### + +# Network Security Group - Allow all within VNet (similar to original logic) +resource "azurerm_network_security_group" "allow_all_default" { + name = "${local.name_prefix}-allow-all-default-nsg" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + security_rule { + name = "AllowVnetInbound" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = var.instance.spec.vnet_cidr + destination_address_prefix = "*" + description = "Allowing connection from within vnet" + } + + tags = merge(local.common_tags, { + Terraform = "true" + }) + + lifecycle { + ignore_changes = [name] + } +} + +# Security Group for VPC Endpoints (keep existing for private endpoints) +resource "azurerm_network_security_group" "vpc_endpoints" { + count = anytrue([ + try(local.private_endpoints.enable_storage, false), + try(local.private_endpoints.enable_sql, false), + try(local.private_endpoints.enable_keyvault, false), + try(local.private_endpoints.enable_acr, false), + try(local.private_endpoints.enable_aks, false), + try(local.private_endpoints.enable_cosmos, false), + try(local.private_endpoints.enable_servicebus, false), + try(local.private_endpoints.enable_eventhub, false), + try(local.private_endpoints.enable_monitor, false), + try(local.private_endpoints.enable_cognitive, false) + ]) ? 1 : 0 + + name = "${local.name_prefix}-private-endpoints-nsg" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + + security_rule { + name = "AllowHTTPS" + priority = 1001 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = var.instance.spec.vnet_cidr + destination_address_prefix = "*" + } + + security_rule { + name = "AllowOutbound" + priority = 1001 + direction = "Outbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + tags = local.common_tags +} + +# Network Security Groups for Subnets - Apply the allow-all NSG to all subnets +resource "azurerm_subnet_network_security_group_association" "public" { + for_each = azurerm_subnet.public + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "private" { + for_each = azurerm_subnet.private + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "database" { + for_each = azurerm_subnet.database + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "gateway" { + for_each = azurerm_subnet.gateway + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "cache" { + for_each = azurerm_subnet.cache + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "functions" { + for_each = azurerm_subnet.functions + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} + +resource "azurerm_subnet_network_security_group_association" "private_link_service" { + for_each = azurerm_subnet.private_link_service + + subnet_id = each.value.id + network_security_group_id = azurerm_network_security_group.allow_all_default.id +} From 16398c086469ae43059a102eb31af405dbd2622a Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Mon, 4 Aug 2025 20:45:13 +0530 Subject: [PATCH 20/36] fixes in k8s --- .../azure_aks/0.2/facets.yaml | 21 +++++++++++-------- .../kubernetes_cluster/azure_aks/0.2/main.tf | 6 ++---- .../azure_aks/0.2/outputs.tf | 14 +++---------- .../azure_aks/0.2/variables.tf | 6 +++--- 4 files changed, 20 insertions(+), 27 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 114c4cc55..7b39f8908 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -316,24 +316,27 @@ outputs: source: hashicorp/kubernetes version: 2.17.0 attributes: - host: attributes.cluster.auth.host - token: attributes.cluster.auth.token - cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate + host: attributes.auth_host + client_certificate: attributes.client_certificate + client_key: attributes.client_key + cluster_ca_certificate: attributes.auth_cluster_ca_certificate helm: source: hashicorp/helm version: 2.8.0 attributes: kubernetes: - host: attributes.cluster.auth.host - token: attributes.cluster.auth.token - cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate + host: attributes.auth_host + client_certificate: attributes.client_certificate + client_key: attributes.client_key + cluster_ca_certificate: attributes.auth_cluster_ca_certificate kubernetes-alpha: source: hashicorp/kubernetes-alpha version: 0.6.0 attributes: - host: attributes.cluster.auth.host - cluster_ca_certificate: attributes.cluster.auth.cluster_ca_certificate - token: attributes.cluster.auth.token + host: attributes.auth_host + client_certificate: attributes.client_certificate + client_key: attributes.client_key + cluster_ca_certificate: attributes.auth_cluster_ca_certificate sample: kind: kubernetes_cluster flavor: azure_aks_cluster diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index a086129d8..03788d71f 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -10,7 +10,8 @@ module "name" { # Create the AKS cluster using the locally modified Azure module module "k8scluster" { - source = "./k8scluster" + source = "Azure/aks/azurerm//v4" + version = "10.2.0" # Required variables resource_group_name = var.inputs.network_details.attributes.resource_group_name @@ -132,6 +133,3 @@ module "k8scluster" { rbac_aad = true rbac_aad_azure_rbac_enabled = true } - -# Data source to get current client configuration for authentication -data "azurerm_client_config" "current" {} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf index a7005ba9a..443d29afe 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf @@ -12,22 +12,14 @@ locals { node_resource_group = module.k8scluster.node_resource_group resource_group_name = var.inputs.network_details.attributes.resource_group_name auth_host = module.k8scluster.host - auth_token = data.azurerm_client_config.current.access_token auth_cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate) - vpc_id = var.inputs.network_details.attributes.vpc_id - subnet_ids = var.inputs.network_details.attributes.private_subnet_ids - network_profile = module.k8scluster.network_profile - identity_type = module.k8scluster.cluster_identity.type - identity_tenant_id = module.k8scluster.cluster_identity.tenant_id - identity_principal_id = module.k8scluster.cluster_identity.principal_id - log_analytics_workspace_id = module.k8scluster.azurerm_log_analytics_workspace_id + client_certificate = base64decode(module.k8scluster.client_certificate) + client_key = base64decode(module.k8scluster.client_key) } output_interfaces = { - kubernetes_config = { + kubernetes = { host = module.k8scluster.host client_key = base64decode(module.k8scluster.client_key) - config_path = "null" - config_context = module.k8scluster.aks_name client_certificate = base64decode(module.k8scluster.client_certificate) cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate) } diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index 00e58a5a7..a8e828a3b 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -154,7 +154,7 @@ variable "inputs" { type = object({ network_details = object({ attributes = object({ - vpc_id = string + vnet_id = string region = string resource_group_name = string availability_zones = list(string) @@ -172,8 +172,8 @@ variable "inputs" { }) validation { - condition = length(var.inputs.network_details.attributes.vpc_id) > 0 - error_message = "VPC ID cannot be empty." + condition = length(var.inputs.network_details.attributes.vnet_id) > 0 + error_message = "VNet ID cannot be empty." } validation { From f3f6a830e6bfde793b0d625c07d61448934fb712 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 10:43:12 +0530 Subject: [PATCH 21/36] removed the local k8s module --- .../0.2/k8scluster/.checkov_config.yaml | 30 - .../azure_aks/0.2/k8scluster/CHANGELOG-v4.md | 20 - .../azure_aks/0.2/k8scluster/CHANGELOG-v5.md | 31 - .../azure_aks/0.2/k8scluster/CHANGELOG-v6.md | 122 -- .../azure_aks/0.2/k8scluster/CHANGELOG-v7.md | 93 - .../azure_aks/0.2/k8scluster/CHANGELOG-v8.md | 27 - .../azure_aks/0.2/k8scluster/CHANGELOG-v9.md | 76 - .../azure_aks/0.2/k8scluster/CHANGELOG.md | 5 - .../0.2/k8scluster/CODE_OF_CONDUCT.md | 5 - .../azure_aks/0.2/k8scluster/GNUmakefile | 4 - .../azure_aks/0.2/k8scluster/LICENSE | 21 - .../0.2/k8scluster/NoticeOnUpgradeTov10.0.md | 53 - .../0.2/k8scluster/NoticeOnUpgradeTov5.0.md | 93 - .../0.2/k8scluster/NoticeOnUpgradeTov6.0.md | 5 - .../0.2/k8scluster/NoticeOnUpgradeTov7.0.md | 52 - .../0.2/k8scluster/NoticeOnUpgradeTov8.0.md | 53 - .../0.2/k8scluster/NoticeOnUpgradeTov9.0.md | 9 - .../azure_aks/0.2/k8scluster/README.md | 490 ----- .../azure_aks/0.2/k8scluster/SECURITY.md | 41 - .../0.2/k8scluster/extra_node_pool.tf | 317 ---- .../k8scluster/extra_node_pool_override.tf | 17 - .../azure_aks/0.2/k8scluster/locals.tf | 74 - .../azure_aks/0.2/k8scluster/log_analytics.tf | 124 -- .../azure_aks/0.2/k8scluster/main.tf | 741 -------- .../azure_aks/0.2/k8scluster/main_override.tf | 6 - .../azure_aks/0.2/k8scluster/outputs.tf | 231 --- .../0.2/k8scluster/role_assignments.tf | 126 -- .../azure_aks/0.2/k8scluster/tfvmmakefile | 85 - .../azure_aks/0.2/k8scluster/variables.tf | 1601 ----------------- .../azure_aks/0.2/k8scluster/versions.tf | 26 - 30 files changed, 4578 deletions(-) delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml deleted file mode 100644 index b39c33402..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -block-list-secret-scan: [] -branch: master -directory: - - ./ -download-external-modules: false -evaluate-variables: true -external-modules-download-path: .external_modules -framework: - - all -quiet: true -secrets-scan-file-type: [] -skip-check: - - CKV_GHA_3 - - CKV_AZURE_5 - - CKV_AZURE_6 - - CKV_AZURE_112 - - CKV_AZURE_115 - - CKV_AZURE_116 - - CKV_AZURE_168 - - CKV_AZURE_170 - - CKV_AZURE_139 - - CKV_AZURE_165 - - CKV_AZURE_166 - - CKV_AZURE_164 -skip-framework: - - dockerfile - - kubernetes -skip-path: - - test/vendor -summary-position: top diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md deleted file mode 100644 index 42433d0ea..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v4.md +++ /dev/null @@ -1,20 +0,0 @@ -## 4.15.0 (May 06, 2022) - -ENHANCEMENTS: - -* Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146)) -* Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136)) - -BUG FIXES: - -## 4.16.0 (June 02, 2022) - -ENHANCEMENTS: - -* Added output for `addon_profile` ([#151](https://github.com/Azure/terraform-azurerm-aks/pull/151)) -* Adding Microsoft SECURITY.MD ([#167](https://github.com/Azure/terraform-azurerm-aks/pull/167)) -* Added variable `os_disk_type` for default node pools ([#169](https://github.com/Azure/terraform-azurerm-aks/pull/169)) - -BUG FIXES: - -* Trivial fix to the example in the README ([#166](https://github.com/Azure/terraform-azurerm-aks/pull/166)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md deleted file mode 100644 index bda5b8027..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v5.md +++ /dev/null @@ -1,31 +0,0 @@ -## 5.0.0 (July 14, 2022) - -ENHANCEMENTS: - -* Variable `enable_kube_dashboard` has been removed as [#181](https://github.com/Azure/terraform-azurerm-aks/issues/181) described. ([#187](https://github.com/Azure/terraform-azurerm-aks/pull/187)) -* Add new variable `location` so we can define location for the resources explicitly. ([#172](https://github.com/Azure/terraform-azurerm-aks/pull/172)) -* Bump AzureRM Provider version to 3.3.0. ([#157](https://github.com/Azure/terraform-azurerm-aks/pull/157)) -* Add new variable `private_dns_zone_id` to make argument `private_dns_zone_id` configurable. ([#174](https://github.com/Azure/terraform-azurerm-aks/pull/174)) -* Add new variable `open_service_mesh_enabled` to make argument `open_service_mesh_enabled` configurable. ([#132](https://github.com/Azure/terraform-azurerm-aks/pull/132)) -* Remove `addon_profile` in the outputs since the block has been removed from provider 3.x. Extract embedded blocks inside `addon_profile` block into standalone outputs. ([#188](https://github.com/Azure/terraform-azurerm-aks/pull/188)) -* Add `nullable = true` to some variables to simplify the conditional expressions. ([#193](https://github.com/Azure/terraform-azurerm-aks/pull/193)) -* Add new variable `oidc_issuer_enabled` to make argument `oidc_issuer_enabled` configurable. ([#205](https://github.com/Azure/terraform-azurerm-aks/pull/205) -* Add new output `oidc_issuer_url` to expose the created issuer URL from the module. [#206](https://github.com/Azure/terraform-azurerm-aks/pull/206)) -* Turn monitoring on in the test code. ([#201](https://github.com/Azure/terraform-azurerm-aks/pull/201)) -* Add new variables `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` to make arguments `private_dns_zone_id` and `private_cluster_public_fqdn_enabled` configurable. ([#149](https://github.com/Azure/terraform-azurerm-aks/pull/149)) -* Remove `module.ssh-key` and moves resource `tls_private_key` inside the module to root directory, then outputs tls keys. ([#189](https://github.com/Azure/terraform-azurerm-aks/pull/189)) -* Add new variables `rbac_aad_azure_rbac_enabled` and `rbac_aad_tenant_id` to make arguments in `azure_active_directory_role_based_access_control` configurable. ([#199](https://github.com/Azure/terraform-azurerm-aks/pull/199)) -* Add `count` meta-argument to resource `tls_private_key` to avoid the unnecessary creation. ([#209](https://github.com/Azure/terraform-azurerm-aks/pull/209)) -* Add new variable `only_critical_addons_enabled` to make argument `only_critical_addons_enabled` in block `default_node_pool` configurable. ([#129](https://github.com/Azure/terraform-azurerm-aks/pull/129)) -* Add support for the argument `key_vault_secrets_provider`. ([#214](https://github.com/Azure/terraform-azurerm-aks/pull/214)) -* Provides a way to attach existing Log Analytics Workspace to AKS through Container Insights. ([#213](https://github.com/Azure/terraform-azurerm-aks/pull/213)) -* Add new variable `local_account_disabled` to make argument `local_account_disabled` configurable. ([#218](https://github.com/Azure/terraform-azurerm-aks/pull/218)) -* Set argument `private_cluster_enabled` to `true` in the test code. ([#219](https://github.com/Azure/terraform-azurerm-aks/pull/219)) -* Add new variable `disk_encryption_set_id` to make argument `disk_encryption_set_id` configurable. Create resource `azurerm_disk_encryption_set` in the test code to turn disk encryption on for the cluster. ([#195](https://github.com/Azure/terraform-azurerm-aks/pull/195)) -* Add new variable `api_server_authorized_ip_ranges` to make argument `api_server_authorized_ip_ranges` configurable. ([#220](https://github.com/Azure/terraform-azurerm-aks/pull/220)) -* Rename output `system_assigned_identity` to `cluster_identity` since it could be user assigned identity. Remove the index inside output's value expression. ([#197](https://github.com/Azure/terraform-azurerm-aks/pull/197)) -* Rename `var.enable_azure_policy` to `var.azure_policy_enabled` to meet the naming convention. Set `azure_policy_enabled` to `true` in test fixture code. ([#203](https://github.com/Azure/terraform-azurerm-aks/pull/203)) - -BUG FIXES: - -* Change the incorrect description of variable `tags`. ([#175](https://github.com/Azure/terraform-azurerm-aks/pull/175)) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md deleted file mode 100644 index ed1f9f094..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v6.md +++ /dev/null @@ -1,122 +0,0 @@ -# Changelog - -## [Unreleased](https://github.com/Azure/terraform-azurerm-aks/tree/HEAD) - -**Merged pull requests:** - -- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) - -## [6.8.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.8.0) (2023-04-04) - -**Merged pull requests:** - -- Add support for `monitor_metrics` [\#341](https://github.com/Azure/terraform-azurerm-aks/pull/341) ([zioproto](https://github.com/zioproto)) -- Support setting os\_sku for default\_node\_pool [\#339](https://github.com/Azure/terraform-azurerm-aks/pull/339) ([mjeco](https://github.com/mjeco)) -- Upgrade required Terraform version [\#338](https://github.com/Azure/terraform-azurerm-aks/pull/338) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support `temporary_name_for_rotation` [\#334](https://github.com/Azure/terraform-azurerm-aks/pull/334) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.9.1 to 0.12.0 in /test [\#330](https://github.com/Azure/terraform-azurerm-aks/pull/330) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Fix example multiple\_node\_pools [\#328](https://github.com/Azure/terraform-azurerm-aks/pull/328) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add Network Contributor role assignments scoped to AKS nodepools subnets [\#327](https://github.com/Azure/terraform-azurerm-aks/pull/327) ([zioproto](https://github.com/zioproto)) -- Add support for extra node pools [\#323](https://github.com/Azure/terraform-azurerm-aks/pull/323) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `default_node_pool.kubelet_config` [\#322](https://github.com/Azure/terraform-azurerm-aks/pull/322) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for `public_network_access_enabled` [\#314](https://github.com/Azure/terraform-azurerm-aks/pull/314) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.7.1](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.1) (2023-03-06) - -**Merged pull requests:** - -- Fix \#316 `current client lacks permissions to read Key Rotation Policy` issue [\#317](https://github.com/Azure/terraform-azurerm-aks/pull/317) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.7.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.7.0) (2023-02-27) - -**Merged pull requests:** - -- Add support for `linux_os_config` [\#309](https://github.com/Azure/terraform-azurerm-aks/pull/309) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/gruntwork-io/terratest from 0.41.10 to 0.41.11 in /test [\#307](https://github.com/Azure/terraform-azurerm-aks/pull/307) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/Azure/terraform-module-test-helper from 0.8.1 to 0.9.1 in /test [\#306](https://github.com/Azure/terraform-azurerm-aks/pull/306) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/net from 0.1.0 to 0.7.0 in /test [\#305](https://github.com/Azure/terraform-azurerm-aks/pull/305) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-getter from 1.6.1 to 1.7.0 in /test [\#304](https://github.com/Azure/terraform-azurerm-aks/pull/304) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-getter/v2 from 2.1.1 to 2.2.0 in /test [\#303](https://github.com/Azure/terraform-azurerm-aks/pull/303) ([dependabot[bot]](https://github.com/apps/dependabot)) -- fix: allow orchestrator\_version if auto-upgrade is 'patch' to allow default\_node\_pool upgrade [\#302](https://github.com/Azure/terraform-azurerm-aks/pull/302) ([aescrob](https://github.com/aescrob)) -- Add support for default node pool's `node_taints` [\#300](https://github.com/Azure/terraform-azurerm-aks/pull/300) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for acr attachment [\#298](https://github.com/Azure/terraform-azurerm-aks/pull/298) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `web_app_routing` [\#297](https://github.com/Azure/terraform-azurerm-aks/pull/297) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.7.1 to 0.8.1 in /test [\#295](https://github.com/Azure/terraform-azurerm-aks/pull/295) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [6.6.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.6.0) (2023-01-29) - -**Merged pull requests:** - -- Bump github.com/Azure/terraform-module-test-helper from 0.6.0 to 0.7.1 in /test [\#293](https://github.com/Azure/terraform-azurerm-aks/pull/293) ([dependabot[bot]](https://github.com/apps/dependabot)) -- identity type is either SystemAssigned or UserAssigned [\#292](https://github.com/Azure/terraform-azurerm-aks/pull/292) ([zioproto](https://github.com/zioproto)) -- Bump github.com/gruntwork-io/terratest from 0.41.7 to 0.41.9 in /test [\#290](https://github.com/Azure/terraform-azurerm-aks/pull/290) ([dependabot[bot]](https://github.com/apps/dependabot)) -- feat: Implement support for KMS arguments [\#288](https://github.com/Azure/terraform-azurerm-aks/pull/288) ([mkilchhofer](https://github.com/mkilchhofer)) -- feat: allow for configuring auto\_scaler\_profile [\#278](https://github.com/Azure/terraform-azurerm-aks/pull/278) ([DavidSpek](https://github.com/DavidSpek)) -- Azure AD RBAC enable/disable with variable rbac\_aad [\#269](https://github.com/Azure/terraform-azurerm-aks/pull/269) ([zioproto](https://github.com/zioproto)) - -## [6.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.5.0) (2023-01-03) - -**Merged pull requests:** - -- Bump github.com/Azure/terraform-module-test-helper from 0.4.0 to 0.6.0 in /test [\#287](https://github.com/Azure/terraform-azurerm-aks/pull/287) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.41.6 to 0.41.7 in /test [\#286](https://github.com/Azure/terraform-azurerm-aks/pull/286) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add support for `scale_down_mode` [\#285](https://github.com/Azure/terraform-azurerm-aks/pull/285) ([lonegunmanb](https://github.com/lonegunmanb)) -- auto-upgrade: variable orchestrator\_version to null [\#283](https://github.com/Azure/terraform-azurerm-aks/pull/283) ([zioproto](https://github.com/zioproto)) - -## [6.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.4.0) (2022-12-26) - -**Merged pull requests:** - -- feat\(storage\_profile\): add support for CSI arguments [\#282](https://github.com/Azure/terraform-azurerm-aks/pull/282) ([aescrob](https://github.com/aescrob)) - -## [6.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.3.0) (2022-12-20) - -**Merged pull requests:** - -- feat: add var automatic\_channel\_upgrade [\#281](https://github.com/Azure/terraform-azurerm-aks/pull/281) ([the-technat](https://github.com/the-technat)) -- Upgrade `terraform-module-test-helper` lib so we can get rid of override file to execute version upgrade test [\#279](https://github.com/Azure/terraform-azurerm-aks/pull/279) ([lonegunmanb](https://github.com/lonegunmanb)) -- Added support for load\_balancer\_profile [\#277](https://github.com/Azure/terraform-azurerm-aks/pull/277) ([mazilu88](https://github.com/mazilu88)) -- Add auto changelog update to this repo. [\#275](https://github.com/Azure/terraform-azurerm-aks/pull/275) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump test helper version [\#273](https://github.com/Azure/terraform-azurerm-aks/pull/273) ([lonegunmanb](https://github.com/lonegunmanb)) -- Ignore `scripts` soft link [\#272](https://github.com/Azure/terraform-azurerm-aks/pull/272) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for pod subnet [\#271](https://github.com/Azure/terraform-azurerm-aks/pull/271) ([mr-onion-2](https://github.com/mr-onion-2)) - -## [6.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.2.0) (2022-10-18) - -**Merged pull requests:** - -- Add breaking change detect CI step. [\#268](https://github.com/Azure/terraform-azurerm-aks/pull/268) ([lonegunmanb](https://github.com/lonegunmanb)) -- Workload Identity support [\#266](https://github.com/Azure/terraform-azurerm-aks/pull/266) ([nlamirault](https://github.com/nlamirault)) -- Add unit test for complex local logic [\#264](https://github.com/Azure/terraform-azurerm-aks/pull/264) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [6.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.1.0) (2022-09-30) - -**Merged pull requests:** - -- Improve placeholders for visibility in the UX [\#262](https://github.com/Azure/terraform-azurerm-aks/pull/262) ([zioproto](https://github.com/zioproto)) -- align acc test in CI pipeline with local machine by running e2e test … [\#260](https://github.com/Azure/terraform-azurerm-aks/pull/260) ([lonegunmanb](https://github.com/lonegunmanb)) -- align pr-check with local machine by using docker command instead [\#259](https://github.com/Azure/terraform-azurerm-aks/pull/259) ([lonegunmanb](https://github.com/lonegunmanb)) -- bugfix: Make the Azure Defender clause robust against a non-existent … [\#258](https://github.com/Azure/terraform-azurerm-aks/pull/258) ([gzur](https://github.com/gzur)) -- Add support for `maintenance_window` [\#256](https://github.com/Azure/terraform-azurerm-aks/pull/256) ([lonegunmanb](https://github.com/lonegunmanb)) -- Updates terraform code to meet updated code style requirement [\#253](https://github.com/Azure/terraform-azurerm-aks/pull/253) ([lonegunmanb](https://github.com/lonegunmanb)) -- Output cluster's fqdn [\#251](https://github.com/Azure/terraform-azurerm-aks/pull/251) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix example path in readme file. [\#249](https://github.com/Azure/terraform-azurerm-aks/pull/249) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update azurerm provider's restriction. [\#248](https://github.com/Azure/terraform-azurerm-aks/pull/248) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for optional Ultra disks [\#245](https://github.com/Azure/terraform-azurerm-aks/pull/245) ([digiserg](https://github.com/digiserg)) -- add aci\_connector addon [\#230](https://github.com/Azure/terraform-azurerm-aks/pull/230) ([zioproto](https://github.com/zioproto)) - -## [6.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/6.0.0) (2022-09-13) - -**Merged pull requests:** - -- Add outputs for created Log Analytics workspace [\#243](https://github.com/Azure/terraform-azurerm-aks/pull/243) ([zioproto](https://github.com/zioproto)) -- Prepare v6.0 and new CI pipeline. [\#241](https://github.com/Azure/terraform-azurerm-aks/pull/241) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update hashicorp/terraform-provider-azurerm to version 3.21.0 \(fixes for AKS 1.24\) [\#238](https://github.com/Azure/terraform-azurerm-aks/pull/238) ([zioproto](https://github.com/zioproto)) -- Output Kubernetes Cluster Name [\#234](https://github.com/Azure/terraform-azurerm-aks/pull/234) ([vermacodes](https://github.com/vermacodes)) -- feat\(aks\): add microsoft defender support [\#232](https://github.com/Azure/terraform-azurerm-aks/pull/232) ([eyenx](https://github.com/eyenx)) -- fix: mark outputs as sensitive [\#231](https://github.com/Azure/terraform-azurerm-aks/pull/231) ([jvelasquez](https://github.com/jvelasquez)) -- Loose the restriction on tls provider's version to include major version greater than 3.0 [\#229](https://github.com/Azure/terraform-azurerm-aks/pull/229) ([lonegunmanb](https://github.com/lonegunmanb)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md deleted file mode 100644 index 67b2e2375..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v7.md +++ /dev/null @@ -1,93 +0,0 @@ -# Changelog - -## [7.5.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.5.0) (2023-11-14) - -**Merged pull requests:** - -- Add support for `node_os_channel_upgrade` [\#474](https://github.com/Azure/terraform-azurerm-aks/pull/474) ([lonegunmanb](https://github.com/lonegunmanb)) -- use lowercase everywhere for network plugin mode overlay [\#472](https://github.com/Azure/terraform-azurerm-aks/pull/472) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.15.1-0.20230728050712-96e8615f5515 to 0.17.0 in /test [\#469](https://github.com/Azure/terraform-azurerm-aks/pull/469) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add support for `service_mesh_profile` block [\#468](https://github.com/Azure/terraform-azurerm-aks/pull/468) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for Image Cleaner [\#466](https://github.com/Azure/terraform-azurerm-aks/pull/466) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add `fips_enabled` support for `default_node_pool` block [\#464](https://github.com/Azure/terraform-azurerm-aks/pull/464) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add default empty list for `allowed` and `not_allowed` in `var.maintenance_window` [\#463](https://github.com/Azure/terraform-azurerm-aks/pull/463) ([lonegunmanb](https://github.com/lonegunmanb)) -- fix: correct wording of the doc [\#461](https://github.com/Azure/terraform-azurerm-aks/pull/461) ([meysam81](https://github.com/meysam81)) -- add run\_command\_enabled [\#452](https://github.com/Azure/terraform-azurerm-aks/pull/452) ([zioproto](https://github.com/zioproto)) -- add msi\_auth\_for\_monitoring\_enabled [\#446](https://github.com/Azure/terraform-azurerm-aks/pull/446) ([admincasper](https://github.com/admincasper)) -- Restore readme file by stop formatting markdown table [\#445](https://github.com/Azure/terraform-azurerm-aks/pull/445) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.4.0) (2023-09-18) - -**Merged pull requests:** - -- Support for creating nodepools from snapshots [\#442](https://github.com/Azure/terraform-azurerm-aks/pull/442) ([zioproto](https://github.com/zioproto)) -- Add multiple terraform-docs configs to generate a seperated markdown document for input variables [\#441](https://github.com/Azure/terraform-azurerm-aks/pull/441) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `maintenance_window_node_os` block [\#440](https://github.com/Azure/terraform-azurerm-aks/pull/440) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.3.2](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.2) (2023-09-07) - -**Merged pull requests:** - -- Hide input variables in readme to boost the rendering [\#437](https://github.com/Azure/terraform-azurerm-aks/pull/437) ([lonegunmanb](https://github.com/lonegunmanb)) -- Improve information to upgrade to 7.0 [\#432](https://github.com/Azure/terraform-azurerm-aks/pull/432) ([zioproto](https://github.com/zioproto)) -- Add confidential computing in aks module [\#423](https://github.com/Azure/terraform-azurerm-aks/pull/423) ([jiaweitao001](https://github.com/jiaweitao001)) - -## [7.3.1](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.1) (2023-08-10) - -**Merged pull requests:** - -- Bump k8s version in exmaples to pass e2e tests [\#422](https://github.com/Azure/terraform-azurerm-aks/pull/422) ([jiaweitao001](https://github.com/jiaweitao001)) - -## [7.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.3.0) (2023-08-03) - -**Merged pull requests:** - -- Add `location` and `resource_group_name` for `var.log_analytics_workspace` [\#412](https://github.com/Azure/terraform-azurerm-aks/pull/412) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix \#405 incorrect role assignment resource [\#410](https://github.com/Azure/terraform-azurerm-aks/pull/410) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.2.0) (2023-07-10) - -**Merged pull requests:** - -- Bump google.golang.org/grpc from 1.51.0 to 1.53.0 in /test [\#406](https://github.com/Azure/terraform-azurerm-aks/pull/406) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Support for Azure CNI Cilium [\#398](https://github.com/Azure/terraform-azurerm-aks/pull/398) ([JitseHijlkema](https://github.com/JitseHijlkema)) -- Use `lonegunmanb/public-ip/lonegunmanb` module to retrieve public ip [\#396](https://github.com/Azure/terraform-azurerm-aks/pull/396) ([lonegunmanb](https://github.com/lonegunmanb)) -- Fix incorrect e2e test code so it could pass on our local machine [\#395](https://github.com/Azure/terraform-azurerm-aks/pull/395) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for Proximity placement group for default node pool [\#392](https://github.com/Azure/terraform-azurerm-aks/pull/392) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add upgrade\_settings block for default nodepool [\#391](https://github.com/Azure/terraform-azurerm-aks/pull/391) ([CiucurDaniel](https://github.com/CiucurDaniel)) -- Bump github.com/Azure/terraform-module-test-helper from 0.13.0 to 0.14.0 in /test [\#386](https://github.com/Azure/terraform-azurerm-aks/pull/386) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [7.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.1.0) (2023-06-07) - -**Merged pull requests:** - -- Deprecate `api_server_authorized_ip_ranges` by using `api_server_access_profile` block [\#381](https://github.com/Azure/terraform-azurerm-aks/pull/381) ([lonegunmanb](https://github.com/lonegunmanb)) -- `oidc_issuer_enabled` must be set to `true` to enable Azure AD Worklo… [\#377](https://github.com/Azure/terraform-azurerm-aks/pull/377) ([zioproto](https://github.com/zioproto)) -- assign network contributor role to control plane identity [\#369](https://github.com/Azure/terraform-azurerm-aks/pull/369) ([zioproto](https://github.com/zioproto)) -- Add tracing tag toggle variables [\#362](https://github.com/Azure/terraform-azurerm-aks/pull/362) ([lonegunmanb](https://github.com/lonegunmanb)) -- Support for Azure CNI Overlay [\#354](https://github.com/Azure/terraform-azurerm-aks/pull/354) ([zioproto](https://github.com/zioproto)) -- Make `var.prefix` optional [\#382](https://github.com/Azure/terraform-azurerm-aks/pull/382) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove constraint on `authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#375](https://github.com/Azure/terraform-azurerm-aks/pull/375) ([lonegunmanb](https://github.com/lonegunmanb)) -- Filter null value out from `local.subnet_ids` [\#374](https://github.com/Azure/terraform-azurerm-aks/pull/374) ([lonegunmanb](https://github.com/lonegunmanb)) -- User `location` returned from data source for log analytics solution. [\#349](https://github.com/Azure/terraform-azurerm-aks/pull/349) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [7.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/7.0.0) (2023-05-18) - -**Merged pull requests:** - -- Upgrade notice for v7.0 [\#367](https://github.com/Azure/terraform-azurerm-aks/pull/367) ([lonegunmanb](https://github.com/lonegunmanb)) -- Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` [\#361](https://github.com/Azure/terraform-azurerm-aks/pull/361) ([lonegunmanb](https://github.com/lonegunmanb)) -- feat!: add create\_before\_destroy=true to node pools [\#357](https://github.com/Azure/terraform-azurerm-aks/pull/357) ([the-technat](https://github.com/the-technat)) -- Move breaking change details into separate docs. add notice on v7.0.0 [\#355](https://github.com/Azure/terraform-azurerm-aks/pull/355) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.12.0 to 0.13.0 in /test [\#352](https://github.com/Azure/terraform-azurerm-aks/pull/352) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Trivial: fix typo ingration -\> integration [\#351](https://github.com/Azure/terraform-azurerm-aks/pull/351) ([zioproto](https://github.com/zioproto)) -- Output Kubernetes Cluster Network Profile [\#333](https://github.com/Azure/terraform-azurerm-aks/pull/333) ([joshua-giumelli-deltatre](https://github.com/joshua-giumelli-deltatre)) -- \[Breaking\] Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard`. [\#346](https://github.com/Azure/terraform-azurerm-aks/pull/346) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] - Ignore changes on `kubernetes_version` from outside of Terraform [\#336](https://github.com/Azure/terraform-azurerm-aks/pull/336) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] - Fix \#315 by amending missing `linux_os_config` block [\#320](https://github.com/Azure/terraform-azurerm-aks/pull/320) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] Wrap `log_analytics_solution_id` to an object to fix \#263. [\#265](https://github.com/Azure/terraform-azurerm-aks/pull/265) ([lonegunmanb](https://github.com/lonegunmanb)) -- \[Breaking\] Remove unused net\_profile\_docker\_bridge\_cidr [\#222](https://github.com/Azure/terraform-azurerm-aks/pull/222) ([zioproto](https://github.com/zioproto)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md deleted file mode 100644 index 2c035d842..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v8.md +++ /dev/null @@ -1,27 +0,0 @@ -# Changelog - -**Merged pull requests:** - -- Add support for nodepool's `gpu_instance` [\#519](https://github.com/Azure/terraform-azurerm-aks/pull/519) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump github.com/Azure/terraform-module-test-helper from 0.17.0 to 0.18.0 in /test [\#516](https://github.com/Azure/terraform-azurerm-aks/pull/516) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add upgrade notice document [\#513](https://github.com/Azure/terraform-azurerm-aks/pull/513) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add retry when the ingress is not ready [\#510](https://github.com/Azure/terraform-azurerm-aks/pull/510) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `support_plan` and `Premium` sku tier. [\#508](https://github.com/Azure/terraform-azurerm-aks/pull/508) ([ecklm](https://github.com/ecklm)) -- Refactor code, split monolith tf config into multiple files [\#494](https://github.com/Azure/terraform-azurerm-aks/pull/494) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove `var.http_application_routing_enabled` [\#493](https://github.com/Azure/terraform-azurerm-aks/pull/493) ([lonegunmanb](https://github.com/lonegunmanb)) -- feat\(`http_proxy_config`\): Add `http_proxy_config` [\#492](https://github.com/Azure/terraform-azurerm-aks/pull/492) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove `public_network_access_enabled` entirely [\#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) ([lonegunmanb](https://github.com/lonegunmanb)) -- Ignore deprecated attribute `public_network_access_enabled` [\#485](https://github.com/Azure/terraform-azurerm-aks/pull/485) ([ishuar](https://github.com/ishuar)) -- feat: enable precondition on `default_node_pool` for autoscaling with node pool type [\#484](https://github.com/Azure/terraform-azurerm-aks/pull/484) ([ishuar](https://github.com/ishuar)) -- Add web\_app\_routing\_identity block to outputs [\#481](https://github.com/Azure/terraform-azurerm-aks/pull/481) ([bonddim](https://github.com/bonddim)) -- Add support for `kubelet_identity` nested block [\#479](https://github.com/Azure/terraform-azurerm-aks/pull/479) ([lonegunmanb](https://github.com/lonegunmanb)) -- Prepare for v8.0 [\#462](https://github.com/Azure/terraform-azurerm-aks/pull/462) ([lonegunmanb](https://github.com/lonegunmanb)) -- Remove precondition on extra node pool which prevent using windows pool with overlay [\#512](https://github.com/Azure/terraform-azurerm-aks/pull/512) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for `maintenance_window_auto_upgrade` [\#505](https://github.com/Azure/terraform-azurerm-aks/pull/505) ([skolobov](https://github.com/skolobov)) -- Let the users decide whether adding a random suffix in cluster and pool's name or not. [\#496](https://github.com/Azure/terraform-azurerm-aks/pull/496) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add role assignments for ingress application gateway and corresponding example [\#426](https://github.com/Azure/terraform-azurerm-aks/pull/426) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add support for workload\_autoscaler\_profile settings [\#404](https://github.com/Azure/terraform-azurerm-aks/pull/404) ([bonddim](https://github.com/bonddim)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md deleted file mode 100644 index 05e2d7539..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG-v9.md +++ /dev/null @@ -1,76 +0,0 @@ -# Changelog - -## [9.4.1](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.1) (2025-02-05) - -**Merged pull requests:** - -- Revert changes of `9.4.0` [\#635](https://github.com/Azure/terraform-azurerm-aks/pull/635) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [9.4.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.4.0) (2025-02-05) - -**Merged pull requests:** - -- Bump azapi provider to \>=2.0, \< 3.0 [\#632](https://github.com/Azure/terraform-azurerm-aks/pull/632) ([zioproto](https://github.com/zioproto)) -- Dependabot 624 626 [\#627](https://github.com/Azure/terraform-azurerm-aks/pull/627) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.28.0 to 0.30.0 in /test [\#626](https://github.com/Azure/terraform-azurerm-aks/pull/626) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.48.0 to 0.48.1 in /test [\#624](https://github.com/Azure/terraform-azurerm-aks/pull/624) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Dependabot changes from PR 609 619 620 [\#621](https://github.com/Azure/terraform-azurerm-aks/pull/621) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.27.0 to 0.28.0 in /test [\#620](https://github.com/Azure/terraform-azurerm-aks/pull/620) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.47.2 to 0.48.0 in /test [\#619](https://github.com/Azure/terraform-azurerm-aks/pull/619) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#616](https://github.com/Azure/terraform-azurerm-aks/pull/616) ([lonegunmanb](https://github.com/lonegunmanb)) -- Bump golang.org/x/crypto from 0.21.0 to 0.31.0 in /test [\#615](https://github.com/Azure/terraform-azurerm-aks/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 in /test [\#609](https://github.com/Azure/terraform-azurerm-aks/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [9.3.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.3.0) (2024-12-11) - -**Merged pull requests:** - -- Support of oms\_agent\_enabled add-on [\#613](https://github.com/Azure/terraform-azurerm-aks/pull/613) ([lonegunmanb](https://github.com/lonegunmanb)) -- Implement node\_network\_profile for default node pool [\#598](https://github.com/Azure/terraform-azurerm-aks/pull/598) ([zioproto](https://github.com/zioproto)) -- Bump examples to AKS 1.30 [\#595](https://github.com/Azure/terraform-azurerm-aks/pull/595) ([zioproto](https://github.com/zioproto)) -- Add `v4` sub-folder so this module could run with AzureRM provider both `v3` and `v4`. [\#594](https://github.com/Azure/terraform-azurerm-aks/pull/594) ([lonegunmanb](https://github.com/lonegunmanb)) - -## [9.2.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.2.0) (2024-11-07) - -**Merged pull requests:** - -- Make the Azure Key Vault public because private Key Vault requires preview API [\#599](https://github.com/Azure/terraform-azurerm-aks/pull/599) ([zioproto](https://github.com/zioproto)) -- Bump github.com/Azure/terraform-module-test-helper from 0.25.0 to 0.26.0 in /test [\#593](https://github.com/Azure/terraform-azurerm-aks/pull/593) ([lonegunmanb](https://github.com/lonegunmanb)) -- Use oidc as authentication method [\#592](https://github.com/Azure/terraform-azurerm-aks/pull/592) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update README.md [\#589](https://github.com/Azure/terraform-azurerm-aks/pull/589) ([shailwx](https://github.com/shailwx)) -- Add `cost_analysis_enabled` option [\#583](https://github.com/Azure/terraform-azurerm-aks/pull/583) ([artificial-aidan](https://github.com/artificial-aidan)) -- Bump github.com/Azure/terraform-module-test-helper from 0.24.0 to 0.25.0 in /test [\#581](https://github.com/Azure/terraform-azurerm-aks/pull/581) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/gruntwork-io/terratest from 0.46.15 to 0.47.0 in /test [\#579](https://github.com/Azure/terraform-azurerm-aks/pull/579) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/Azure/terraform-module-test-helper from 0.22.0 to 0.24.0 in /test [\#574](https://github.com/Azure/terraform-azurerm-aks/pull/574) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 in /test [\#562](https://github.com/Azure/terraform-azurerm-aks/pull/562) ([dependabot[bot]](https://github.com/apps/dependabot)) - -## [9.1.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.1.0) (2024-07-04) - -**Merged pull requests:** - -- Downgrade next major version back to v9 [\#577](https://github.com/Azure/terraform-azurerm-aks/pull/577) ([lonegunmanb](https://github.com/lonegunmanb)) -- Restore devcontainer [\#576](https://github.com/Azure/terraform-azurerm-aks/pull/576) ([zioproto](https://github.com/zioproto)) -- set drainTimeoutInMinutes default value to null [\#575](https://github.com/Azure/terraform-azurerm-aks/pull/575) ([zioproto](https://github.com/zioproto)) -- fix README.md format [\#570](https://github.com/Azure/terraform-azurerm-aks/pull/570) ([joaoestrela](https://github.com/joaoestrela)) -- Bump github.com/hashicorp/go-getter from 1.7.4 to 1.7.5 in /test [\#569](https://github.com/Azure/terraform-azurerm-aks/pull/569) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Start new Changelog file for v10 [\#567](https://github.com/Azure/terraform-azurerm-aks/pull/567) ([zioproto](https://github.com/zioproto)) -- fixed inaccurate variable descriptions for azure cni in overlay mode [\#566](https://github.com/Azure/terraform-azurerm-aks/pull/566) ([Xelef2000](https://github.com/Xelef2000)) -- add drain\_timeout\_in\_minutes and node\_soak\_duration\_in\_minutes [\#564](https://github.com/Azure/terraform-azurerm-aks/pull/564) ([zioproto](https://github.com/zioproto)) - -## [9.0.0](https://github.com/Azure/terraform-azurerm-aks/tree/9.0.0) (2024-06-07) - -**Merged pull requests:** - -- Compromise on e2e tests involving ingress, since it's not stable [\#558](https://github.com/Azure/terraform-azurerm-aks/pull/558) ([lonegunmanb](https://github.com/lonegunmanb)) -- Add weekly-codeql action [\#555](https://github.com/Azure/terraform-azurerm-aks/pull/555) ([lonegunmanb](https://github.com/lonegunmanb)) -- Change default value for `var.agents_pool_max_surge` to 10% [\#554](https://github.com/Azure/terraform-azurerm-aks/pull/554) ([lonegunmanb](https://github.com/lonegunmanb)) -- Update Microsoft.ContainerService managedClusters API version to 2024-02-01 [\#552](https://github.com/Azure/terraform-azurerm-aks/pull/552) ([olofmattsson-inriver](https://github.com/olofmattsson-inriver)) -- Bump github.com/Azure/terraform-module-test-helper from 0.19.0 to 0.22.0 in /test [\#549](https://github.com/Azure/terraform-azurerm-aks/pull/549) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Amending log analytics attributes [\#548](https://github.com/Azure/terraform-azurerm-aks/pull/548) ([lonegunmanb](https://github.com/lonegunmanb)) -- bump k8s version for example since 1.26 has been deprecated [\#540](https://github.com/Azure/terraform-azurerm-aks/pull/540) ([lonegunmanb](https://github.com/lonegunmanb)) -- fix\(typo\): typo in output variable [\#537](https://github.com/Azure/terraform-azurerm-aks/pull/537) ([mbaykara](https://github.com/mbaykara)) -- Bump github.com/Azure/terraform-module-test-helper from 0.18.0 to 0.19.0 in /test [\#521](https://github.com/Azure/terraform-azurerm-aks/pull/521) ([dependabot[bot]](https://github.com/apps/dependabot)) - - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md deleted file mode 100644 index 9996f9928..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Changelog - -## Important Notice - -* fix: add back `private_cluster_enabled` variable by @tobiasehlert [#667](https://github.com/Azure/terraform-azurerm-aks/pull/667) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md deleted file mode 100644 index af8b0207d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. - -Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile deleted file mode 100644 index 3db7ccd9d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile +++ /dev/null @@ -1,4 +0,0 @@ -SHELL := /bin/bash - -$(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) --include tfvmmakefile \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE deleted file mode 100644 index 21071075c..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md deleted file mode 100644 index f611a6a75..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov10.0.md +++ /dev/null @@ -1,53 +0,0 @@ -# Notice on Upgrade to v10.x - -## AzAPI provider version constraint has been updated to `>=2.0, < 3.0`. - -## [`var.web_app_routing` type change](https://github.com/Azure/terraform-azurerm-aks/pull/606) - -`var.web_app_routing.dns_zone_id` has been replaced by `var.web_app_routing.dns_zone_ids`. The new variable is a list of DNS zone IDs. This change allows for the specification of multiple DNS zones for routing. - -## [`data.azurerm_resource_group.main` in this module has been removed, `var.location` is a required variable now.](https://github.com/Azure/terraform-azurerm-aks/pull/644) - -## [Create log analytics workspace would also create required monitor data collection rule now](https://github.com/Azure/terraform-azurerm-aks/pull/623) - -The changes in this pull request introduce support for a Data Collection Rule (DCR) for Azure Monitor Container Insights in the Terraform module. - -## [`CHANGELOG.md` file is no longer maintained, please read release note in GitHub repository instead](https://github.com/Azure/terraform-azurerm-aks/pull/651) - -[New release notes](https://github.com/Azure/terraform-azurerm-aks/releases). - -## [The following variables have been removed:](https://github.com/Azure/terraform-azurerm-aks/pull/652) - -* `agents_taints` -* `api_server_subnet_id` -* `private_cluster_enabled` -* `rbac_aad_client_app_id` -* `rbac_aad_managed` -* `rbac_aad_server_app_id` -* `rbac_aad_server_app_secret` - -## `var.pod_subnet_id` has been replaced by `var.pod_subnet.id` - -## `var.vnet_subnet_id` has been replaced by `var.vnet_subnet.id` - -## `var.node_pools.pod_subnet_id` has been replaced by `var.node_pools.pod_subnet.id` - -## `var.node_pools.vnet_subnet_id` has been replaced by `var.node_pools.vnet_subnet.id` - -## `azurerm_role_assignment.network_contributor` will be re-created - -Since `for_each`'s target has been changed from a set of string to a map of object to avoid "Known after apply" values in iterator, we have to re-create the `azurerm_role_assignment.network_contributor` resource. This will cause the role assignment to be removed and re-added, which may result in a brief period of time where the role assignment is not present. - -## When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself. - -## `var.client_secret` now is `sensitive` - -## New interval between cluster creation and kubernetes version upgrade - -New variable `interval_before_cluster_update` was added. Sometimes when we tried to update cluster's kubernetes version after cluster creation, we got the error `Operation is not allowed because there's an in progress update managed cluster operation on the managed cluster started`. A `time_sleep` was added to avoid such potential conflict. You can set this variable to `null` to bypass the sleep. - -## @zioproto is no longer a maintainer of this module - -For personal reasons, @zioproto is no longer a maintainer of this module. I want to express my sincere gratitude for his contributions and support over the years. His dedication and hard work are invaluable to this module. - -THANK YOU @zioproto ! diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md deleted file mode 100644 index 4f31d8157..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov5.0.md +++ /dev/null @@ -1,93 +0,0 @@ -# Notice on Upgrade to v5.x - -V5.0.0 is a major version upgrade and a lot of breaking changes have been introduced. Extreme caution must be taken during the upgrade to avoid resource replacement and downtime by accident. - -Running the `terraform plan` first to inspect the plan is strongly advised. - -## Terraform and terraform-provider-azurerm version restrictions - -Now Terraform core's lowest version is v1.2.0 and terraform-provider-azurerm's lowest version is v3.21.0. - -## variable `user_assigned_identity_id` has been renamed. - -variable `user_assigned_identity_id` has been renamed to `identity_ids` and it's type has been changed from `string` to `list(string)`. - -## `addon_profile` in outputs is no longer available. - -It has been broken into the following new outputs: - -* `aci_connector_linux` -* `aci_connector_linux_enabled` -* `azure_policy_enabled` -* `http_application_routing_enabled` -* `ingress_application_gateway` -* `ingress_application_gateway_enabled` -* `key_vault_secrets_provider` -* `key_vault_secrets_provider_enabled` -* `oms_agent` -* `oms_agent_enabled` -* `open_service_mesh_enabled` - -## The following variables have been renamed from `enable_xxx` to `xxx_enabled` - -* `enable_azure_policy` has been renamed to `azure_policy_enabled` -* `enable_http_application_routing` has been renamed to `http_application_routing_enabled` -* `enable_ingress_application_gateway` has been renamed to `ingress_application_gateway_enabled` -* `enable_log_analytics_workspace` has been renamed to `log_analytics_workspace_enabled` -* `enable_open_service_mesh` has been renamed to `open_service_mesh_enabled` -* `enable_role_based_access_control` has been renamed to `role_based_access_control_enabled` - -## `nullable = true` has been added to the following variables so setting them to `null` explicitly will use the default value - -* `log_analytics_workspace_enable` -* `os_disk_type` -* `private_cluster_enabled` -* `rbac_aad_managed` -* `rbac_aad_admin_group_object_ids` -* `network_policy` -* `enable_node_public_ip` - -## `var.admin_username`'s default value has been removed - -In v4.x `var.admin_username` has a default value `azureuser` and has been removed in V5.0.0. Since the `admin_username` argument in `linux_profile` block is a ForceNew argument, any value change to this argument will trigger a Kubernetes cluster replacement **SO THE EXTREME CAUTION MUST BE TAKEN**. The module's callers must set `var.admin_username` to `azureuser` explicitly if they didn't set it before. - -## `module.ssh-key` has been removed - -The file named `private_ssh_key` which contains the tls private key will be deleted since the `local_file` resource has been removed. Now the private key is exported via `generated_cluster_private_ssh_key` in output and the corresponding public key is exported via `generated_cluster_public_ssh_key` in output. - -A `moved` block has been added to relocate the existing `tls_private_key` resource to the new address. If the `var.admin_username` is not `null`, no action is needed. - -Resource `tls_private_key`'s creation now is conditional. Users may see the destruction of existing `tls_private_key` in the generated plan if `var.admin_username` is `null`. - -## `system_assigned_identity` in the output has been renamed to `cluster_identity` - -The `system_assigned_identity` was: - -```hcl -output "system_assigned_identity" { - value = azurerm_kubernetes_cluster.main.identity -} -``` - -Now it has been renamed to `cluster_identity`, and the block has been changed to: - -```hcl -output "cluster_identity" { - description = "The `azurerm_kubernetes_cluster`'s `identity` block." - value = try(azurerm_kubernetes_cluster.main.identity[0], null) -} -``` - -The callers who used to read the cluster's identity block need to remove the index in their expression, from `module.aks.system_assigned_identity[0]` to `module.aks.cluster_identity`. - -## The following outputs are now sensitive. All outputs referenced them must be declared as sensitive too - -* `client_certificate` -* `client_key` -* `cluster_ca_certificate` -* `generated_cluster_private_ssh_key` -* `host` -* `kube_admin_config_raw` -* `kube_config_raw` -* `password` -* `username` diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md deleted file mode 100644 index e75b87ea3..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov6.0.md +++ /dev/null @@ -1,5 +0,0 @@ -# Notice on Upgrade to v6.x - -We've added a CI pipeline for this module to speed up our code review and to enforce a high code quality standard, if you want to contribute by submitting a pull request, please read [Pre-Commit & Pr-Check & Test](#Pre-Commit--Pr-Check--Test) section, or your pull request might be rejected by CI pipeline. - -A pull request will be reviewed when it has passed Pre Pull Request Check in the pipeline, and will be merged when it has passed the acceptance tests. Once the ci Pipeline failed, please read the pipeline's output, thanks for your cooperation. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md deleted file mode 100644 index e3c1f41a5..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov7.0.md +++ /dev/null @@ -1,52 +0,0 @@ -# Notice on Upgrade to v7.x - -## Add validation block to enforce users to change `sku_tier` from `Paid` to `Standard` - -AzureRM's minimum version is `>= 3.51, < 4.0` now. -[`var.sku_tier` cannot be set to `Paid` anymore](https://github.com/hashicorp/terraform-provider-azurerm/issues/20887), now possible values are `Free` and `Standard`. - -## Ignore changes on `kubernetes_version` from outside of Terraform - -Related issue: #335 - -Two new resources would be created when upgrading from v6.x to v7.x: - -* `null_resource.kubernetes_version_keeper` -* `azapi_update_resource.aks_cluster_post_create` - -`azurerm_kubernetes_cluster.main` resource would ignore change on `kubernetes_version` from outside of Terraform in case AKS cluster's patch version has been upgraded automatically. -When you change `var.kubernetes_version`'s value, it would trigger a re-creation of `null_resource.kubernetes_version_keeper` and re-creation of `azapi_update_resource.aks_cluster_post_create`, which would upgrade the AKS cluster's `kubernetes_version`. - -`azapi` provider is required to be configured in your Terraform configuration. - -## Fix #315 by amending missing `linux_os_config` block - -In v6.0, `default_node_pool.linux_os_config` block won't be added to `azurerm_kubernetes_cluster.main` resource when `var.enable_auto_scaling` is `true`. This bug has been fixed in v7.0.0 so you might see a diff on `azurerm_kubernetes_cluster.main` resource. - -## Wrap `log_analytics_solution_id` to an object to fix #263. - -`var.log_analytics_solution_id` is now an object with `id` attribute. This change is to fix #263. - -## Remove unused net_profile_docker_bridge_cidr - -`var.net_profile_docker_bridge_cidr` has been [deprecated](https://github.com/hashicorp/terraform-provider-azurerm/issues/18119) and is not used in the module anymore and has been removed. - -## Add `create_before_destroy=true` to node pools #357 - -Now `azurerm_kubernetes_cluster_node_pool.node_pool` resource has `create_before_destroy=true` to avoid downtime when upgrading node pools. Users must be aware that there would be a "random" suffix added into pool's name, this suffix's length is `4`, so your previous node pool's name `nodepool1` would be `nodepool1xxxx`. This suffix is calculated from node pool's config, the same configuration would lead to the same suffix. You might need to shorten your node pool's name because of this new added suffix. - -To enable this feature, we've also added new `null_resource.pool_name_keeper` to track node pool's name in case you've changed the name. - -## Check `api_server_authorized_ip_ranges` when `public_network_access_enabled` is `true` #361 - -As the [document](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#public_network_access_enabled) described: - ->When `public_network_access_enabled` is set to true, `0.0.0.0/32` must be added to `authorized_ip_ranges` in the `api_server_access_profile block`. - -We'll add `api_server_access_profile` nested block after AzureRM provider's v4.0, but starting from v7.0 we'll enforce such pre-condition check. - -## Add `depends_on` to `azurerm_kubernetes_cluster_node_pool` resources #418 - -If you have `azurerm_kubernetes_cluster_node_pool` resources not managed with this module (`var.nodepools`) you -must have an explicit `depends_on` on those resources to avoid conflicting nodepools operations. -See issue #418 for more details. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md deleted file mode 100644 index 96077ba1a..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov8.0.md +++ /dev/null @@ -1,53 +0,0 @@ -# Notice on Upgrade to v8.x - -## New variable `cluster_name_random_suffix` - -1. A new variable `cluster_name_random_suffix` is added. This allows users to decide whether they want to add a random suffix to a cluster's name. This is particularly useful when Terraform needs to recreate a resource that cannot be updated in-place, as it avoids naming conflicts. Because of [#357](https://github.com/Azure/terraform-azurerm-aks/pull/357), now the `azurerm_kubernetes_cluster` resource is `create_before_destroy = true` now, we cannot turn this feature off. If you want to recreate this cluster by one apply without any trouble, please turn this random naming suffix on to avoid the naming conflict. - -2. The `create_before_destroy` attribute is added to the `node_pools` variable as an object field. This attribute determines whether a new node pool should be created before the old one is destroyed during updates. By default, it is set to `true`. - -3. The naming of extra node pools has been updated. Now, a random UUID is used as the seed for the random suffix in the name of the node pool, instead of the JSON-encoded value of the node pool. **This naming suffix only apply for extra node pools that create before destroy.** - -You're recommended to set `var.cluster_name_random_suffix` to `true` explicitly, and you'll see a random suffix in your cluster's name. If you don't like this suffix, please remember now a new cluster with the same name would be created before the old one has been deleted. If you do want to recreate the cluster, please run `terraform destroy` first. - -## Remove `var.http_application_routing_enabled` - -According to the [document](https://learn.microsoft.com/en-us/azure/aks/http-application-routing), HTTP application routing add-on for AKS has been retired so we have to remove this feature from this module. - -1. The variable `http_application_routing_enabled` has been removed from the module. This variable was previously used to enable HTTP Application Routing Addon. - -2. The `http_application_routing_enabled` output has been removed from `outputs.tf`. This output was previously used to display whether HTTP Application Routing was enabled. - -3. The `http_application_routing_enabled` attribute has been removed from the `azurerm_kubernetes_cluster` resource in `main.tf`. This attribute was previously used to enable HTTP Application Routing for the Kubernetes cluster. - -4. The `http_application_routing_enabled` attribute has been added to the `ignore_changes` lifecycle block of the `azurerm_kubernetes_cluster` resource in `main.tf`. This means changes to this attribute will not trigger the resource to be updated. - -These changes mean that users of this module will no longer be able to enable HTTP Application Routing through this module. - -The new feature for the Ingress in AKS is [Managed NGINX ingress with the application routing add-on](https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default), you can enable this with `var.web_app_routing`. - -Users who were using this feature, please read this [Migrate document](https://learn.microsoft.com/en-us/azure/aks/app-routing-migration). - -## Remove `public_network_access_enabled` entirely - -According to this [announcement](https://github.com/Azure/AKS/issues/3690), now public network access for AKS is no longer supported. - -The primary impact [#488](https://github.com/Azure/terraform-azurerm-aks/pull/488) is the complete removal of the `public_network_access_enabled` variable from the module. - -1. The `public_network_access_enabled` variable has been removed from the `variables.tf` file. This means that the module no longer supports the configuration of public network access at the Kubernetes cluster level. - -2. The `public_network_access_enabled` variable has also been removed from the `main.tf` file and all example files (`application_gateway_ingress/main.tf`, `multiple_node_pools/main.tf`, `named_cluster/main.tf`, `startup/main.tf`, `with_acr/main.tf`, `without_monitor/main.tf`). This indicates that the module no longer uses this variable in the creation of the Azure Kubernetes Service (AKS) resource. - -3. The `public_network_access_enabled` has been added into `azurerm_kubernetes_cluster`'s `ignore_changes` list. Any change to this attribute won't trigger update. - -## Add role assignments for ingress application gateway - -The `variables.tf` file is updated with new variables related to the application gateway for ingress, including `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress`. - -The `brown_field_application_gateway_for_ingress`, `create_role_assignments_for_application_gateway`, and `green_field_application_gateway_for_ingress` variables are used to configure the Application Gateway Ingress for the Azure Kubernetes Service (AKS) in the Terraform module. - -1. `brown_field_application_gateway_for_ingress`: This variable is used when you want to use an existing Application Gateway as the ingress for the AKS cluster. It is an object that contains the ID of the Application Gateway (`id`) and the ID of the Subnet (`subnet_id`) which the Application Gateway is connected to. If this variable is set, the module will not create a new Application Gateway and will use the existing one instead. - -2. `green_field_application_gateway_for_ingress`: This variable is used when you want the module to create a new Application Gateway for the AKS cluster. It is an object that contains the name of the Application Gateway to be used or created in the Nodepool Resource Group (`name`), the subnet CIDR to be used to create an Application Gateway (`subnet_cidr`), and the ID of the subnet on which to create an Application Gateway (`subnet_id`). If this variable is set, the module will create a new Application Gateway with the provided configuration. - -3. `create_role_assignments_for_application_gateway`: This is a boolean variable that determines whether to create the corresponding role assignments for the application gateway or not. By default, it is set to `true`. Role assignments are necessary for the Application Gateway to function correctly with the AKS cluster. If set to `true`, the module will create the necessary role assignments on the Application Gateway. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md deleted file mode 100644 index 9bd796e2d..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/NoticeOnUpgradeTov9.0.md +++ /dev/null @@ -1,9 +0,0 @@ -# Notice on Upgrade to v9.x - -## New default value for variable `agents_pool_max_surge` - -variable `agents_pool_max_surge` now has default value `10%`. This change might cause configuration drift. If you want to keep the old value, please set it explicitly in your configuration. - -## API version for `azapi_update_resource` resource has been upgraded from `Microsoft.ContainerService/managedClusters@2023-01-02-preview` to `Microsoft.ContainerService/managedClusters@2024-02-01`. - -After a test, it won't affect the existing Terraform state and cause configuration drift. The upgrade is caused by the retirement of original API. diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md deleted file mode 100644 index e754e5a7f..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md +++ /dev/null @@ -1,490 +0,0 @@ -# terraform-azurerm-aks - -## Deploys a Kubernetes cluster (AKS) on Azure with monitoring support through Azure Log Analytics - -This Terraform module deploys a Kubernetes cluster on Azure using AKS (Azure Kubernetes Service) and adds support for monitoring with Log Analytics. - --> **NOTE:** If you have not assigned `client_id` or `client_secret`, A `SystemAssigned` identity will be created. - --> **NOTE:** If you're using AzureRM `v4`, you can use this module by setting `source` to `Azure/aks/azurerm//v4`. - -## Notice on breaking changes - -Please be aware that major version(e.g., from 6.8.0 to 7.0.0) update contains breaking changes that may impact your infrastructure. It is crucial to review these changes with caution before proceeding with the upgrade. - -In most cases, you will need to adjust your Terraform code to accommodate the changes introduced in the new major version. We strongly recommend reviewing the changelog and migration guide to understand the modifications and ensure a smooth transition. - -To help you in this process, we have provided detailed documentation on the breaking changes, new features, and any deprecated functionalities. Please take the time to read through these resources to avoid any potential issues or disruptions to your infrastructure. - -* [Notice on Upgrade to v10.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov10.0.md) -* [Notice on Upgrade to v9.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov9.0.md) -* [Notice on Upgrade to v8.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov8.0.md) -* [Notice on Upgrade to v7.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov7.0.md) -* [Notice on Upgrade to v6.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov6.0.md) -* [Notice on Upgrade to v5.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov5.0.md) - -Remember, upgrading to a major version with breaking changes should be done carefully and thoroughly tested in your environment. If you have any questions or concerns, please don't hesitate to reach out to our support team for assistance. - -## Usage in Terraform 1.2.0 - -Please view folders in `examples`. - -The module supports some outputs that may be used to configure a kubernetes -provider after deploying an AKS cluster. - -```hcl -provider "kubernetes" { - host = module.aks.host - client_certificate = base64decode(module.aks.client_certificate) - client_key = base64decode(module.aks.client_key) - cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate) -} -``` - -There're some examples in the examples folder. You can execute `terraform apply` command in `examples`'s sub folder to try the module. These examples are tested against every PR with the [E2E Test](#Pre-Commit--Pr-Check--Test). - -## Enable or disable tracing tags - -We're using [BridgeCrew Yor](https://github.com/bridgecrewio/yor) and [yorbox](https://github.com/lonegunmanb/yorbox) to help manage tags consistently across infrastructure as code (IaC) frameworks. In this module you might see tags like: - -```hcl -resource "azurerm_resource_group" "rg" { - location = "eastus" - name = random_pet.name - tags = merge(var.tags, (/**/ (var.tracing_tags_enabled ? { for k, v in /**/ { - avm_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" - avm_git_file = "main.tf" - avm_git_last_modified_at = "2023-05-05 08:57:54" - avm_git_org = "lonegunmanb" - avm_git_repo = "terraform-yor-tag-test-module" - avm_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" - } /**/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /**/)) -} -``` - -To enable tracing tags, set the variable to true: - -```hcl -module "example" { -source = "{module_source}" -... -tracing_tags_enabled = true -} -``` - -The `tracing_tags_enabled` is default to `false`. - -To customize the prefix for your tracing tags, set the `tracing_tags_prefix` variable value in your Terraform configuration: - -```hcl -module "example" { -source = "{module_source}" -... -tracing_tags_prefix = "custom_prefix_" -} -``` - -The actual applied tags would be: - -```text -{ -custom_prefix_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" -custom_prefix_git_file = "main.tf" -custom_prefix_git_last_modified_at = "2023-05-05 08:57:54" -custom_prefix_git_org = "lonegunmanb" -custom_prefix_git_repo = "terraform-yor-tag-test-module" -custom_prefix_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" -} -``` - -## Pre-Commit & Pr-Check & Test - -### Configurations - -- [Configure Terraform for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/terraform-install-configure) - -We assumed that you have setup service principal's credentials in your environment variables like below: - -```shell -export ARM_SUBSCRIPTION_ID="" -export ARM_TENANT_ID="" -export ARM_CLIENT_ID="" -export ARM_CLIENT_SECRET="" -``` - -On Windows Powershell: - -```shell -$env:ARM_SUBSCRIPTION_ID="" -$env:ARM_TENANT_ID="" -$env:ARM_CLIENT_ID="" -$env:ARM_CLIENT_SECRET="" -``` - -We provide a docker image to run the pre-commit checks and tests for you: `mcr.microsoft.com/azterraform:latest` - -To run the pre-commit task, we can run the following command: - -```shell -$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit -``` - -On Windows Powershell: - -```shell -$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit -``` - -In pre-commit task, we will: - -1. Run `terraform fmt -recursive` command for your Terraform code. -2. Run `terrafmt fmt -f` command for markdown files and go code files to ensure that the Terraform code embedded in these files are well formatted. -3. Run `go mod tidy` and `go mod vendor` for test folder to ensure that all the dependencies have been synced. -4. Run `gofmt` for all go code files. -5. Run `gofumpt` for all go code files. -6. Run `terraform-docs` on `README.md` file, then run `markdown-table-formatter` to format markdown tables in `README.md`. - -Then we can run the pr-check task to check whether our code meets our pipeline's requirement(We strongly recommend you run the following command before you commit): - -```shell -$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pr-check -``` - -On Windows Powershell: - -```shell -$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pr-check -``` - -To run the e2e-test, we can run the following command: - -```text -docker run --rm -v $(pwd):/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -On Windows Powershell: - -```text -docker run --rm -v ${pwd}:/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -To follow [**Ensure AKS uses disk encryption set**](https://docs.bridgecrew.io/docs/ensure-that-aks-uses-disk-encryption-set) policy we've used `azurerm_key_vault` in example codes, and to follow [**Key vault does not allow firewall rules settings**](https://docs.bridgecrew.io/docs/ensure-that-key-vault-allows-firewall-rules-settings) we've limited the ip cidr on it's `network_acls`. By default we'll use the ip returned by `https://api.ipify.org?format=json` api as your public ip, but in case you need to use another cidr, you can set an environment variable like below: - -```text -docker run --rm -v $(pwd):/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -On Windows Powershell: -```text -docker run --rm -v ${pwd}:/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test -``` - -#### Prerequisites - -- [Docker](https://www.docker.com/community-edition#/download) - -## Authors - -Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d) - -## License - -[MIT](LICENSE) - -# Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Module Spec - -The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!** - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.3 | -| [azapi](#requirement\_azapi) | >=2.0, < 3.0 | -| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 | -| [null](#requirement\_null) | >= 3.0 | -| [time](#requirement\_time) | >= 0.5 | -| [tls](#requirement\_tls) | >= 3.1 | - -## Providers - -| Name | Version | -|------|---------| -| [azapi](#provider\_azapi) | >=2.0, < 3.0 | -| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 | -| [null](#provider\_null) | >= 3.0 | -| [time](#provider\_time) | >= 0.5 | -| [tls](#provider\_tls) | >= 3.1 | - -## Modules - -No modules. - -## Resources - -| Name | Type | -|------|------| -| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | -| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | -| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | -| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | -| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | -| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | -| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | -| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource | -| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource | -| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | -| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | -| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | -| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | -| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | -| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | -| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | -| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source | -| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no | -| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no | -| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no | -| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no | -| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no | -| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no | -| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no | -| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no | -| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) |
list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
}))
| `[]` | no | -| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) |
list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
| `[]` | no | -| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no | -| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no | -| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no | -| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no | -| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no | -| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no | -| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no | -| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no | -| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no | -| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no | -| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no | -| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no | -| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no | -| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no | -| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no | -| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no | -| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no | -| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no | -| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no | -| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no | -| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no | -| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no | -| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no | -| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no | -| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no | -| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**

By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no | -| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no | -| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. |
object({
id = string
subnet_id = string
})
| `null` | no | -| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no | -| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no | -| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no | -| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no | -| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no | -| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. |
object({
sgx_quote_helper_enabled = bool
})
| `null` | no | -| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no | -| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no | -| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no | -| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no | -| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 |
object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
})
|
{
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
}
| no | -| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no | -| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no | -| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no | -| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no | -| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no | -| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no | -| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. |
object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
})
| `null` | no | -| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. |
object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
})
| `null` | no | -| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no | -| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no | -| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no | -| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no | -| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no | -| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no | -| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no | -| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no | -| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no | -| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. |
object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
})
| `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | -| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no | -| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no | -| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no | -| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no | -| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no | -| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no | -| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no | -| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no | -| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no | -| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes | -| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. |
object({
id = string
})
| `null` | no | -| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. |
object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
})
| `null` | no | -| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no | -| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no | -| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no | -| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no | -| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. |
object({
identity_ids = optional(set(string))
type = string
})
| `null` | no | -| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no | -| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no | -| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no | -| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no | -| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no | -| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no | -| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. |
object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
})
| `null` | no | -| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | -| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | -| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no | -| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` |
[
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no | -| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` |
[
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no | -| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` |
[
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no | -| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) |
object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
})
| `null` | no | -| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no | -| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. |
object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
})
| `null` | no | -| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no | -| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no | -| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no | -| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no | -| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no | -| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no | -| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no | -| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no | -| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. |
object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
})
| `null` | no | -| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no | -| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) |
map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
}))
| `{}` | no | -| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no | -| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no | -| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no | -| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no | -| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no | -| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | -| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no | -| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no | -| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no | -| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | -| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no | -| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no | -| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no | -| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | -| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no | -| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no | -| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no | -| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no | -| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no | -| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes | -| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no | -| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no | -| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no | -| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no | -| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no | -| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. |
object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
})
| `null` | no | -| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no | -| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no | -| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no | -| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no | -| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no | -| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no | -| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no | -| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no | -| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no | -| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no | -| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no | -| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no | -| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | -| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) |
object({
dns_zone_ids = list(string)
})
| `null` | no | -| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. |
object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
})
| `null` | no | -| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. | -| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? | -| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | -| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | -| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | -| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. | -| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. | -| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. | -| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. | -| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. | -| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) | -| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace | -| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace | -| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace | -| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | -| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | -| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | -| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. | -| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. | -| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | -| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | -| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. | -| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). | -| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. | -| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. | -| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. | -| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? | -| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. | -| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? | -| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. | -| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. | -| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. | -| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. | -| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block | -| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. | -| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. | -| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. | -| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. | -| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? | -| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | -| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. | -| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. | -| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. | - diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md deleted file mode 100644 index 869fdfe2b..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf deleted file mode 100644 index 7f368600b..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf +++ /dev/null @@ -1,317 +0,0 @@ -moved { - from = azurerm_kubernetes_cluster_node_pool.node_pool - to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { - for_each = local.node_pools_create_before_destroy - - kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" - capacity_reservation_group_id = each.value.capacity_reservation_group_id - eviction_policy = each.value.eviction_policy - fips_enabled = each.value.fips_enabled - gpu_instance = each.value.gpu_instance - host_group_id = each.value.host_group_id - kubelet_disk_type = each.value.kubelet_disk_type - max_count = each.value.max_count - max_pods = each.value.max_pods - min_count = each.value.min_count - mode = each.value.mode - node_count = each.value.node_count - node_labels = each.value.node_labels - node_public_ip_prefix_id = each.value.node_public_ip_prefix_id - node_taints = each.value.node_taints - orchestrator_version = each.value.orchestrator_version - os_disk_size_gb = each.value.os_disk_size_gb - os_disk_type = each.value.os_disk_type - os_sku = each.value.os_sku - os_type = each.value.os_type - pod_subnet_id = try(each.value.pod_subnet.id, null) - priority = each.value.priority - proximity_placement_group_id = each.value.proximity_placement_group_id - scale_down_mode = each.value.scale_down_mode - snapshot_id = each.value.snapshot_id - spot_max_price = each.value.spot_max_price - tags = each.value.tags - ultra_ssd_enabled = each.value.ultra_ssd_enabled - vm_size = each.value.vm_size - vnet_subnet_id = try(each.value.vnet_subnet.id, null) - workload_runtime = each.value.workload_runtime - zones = each.value.zones - - dynamic "kubelet_config" { - for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] - - content { - allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls - container_log_max_line = each.value.kubelet_config.container_log_max_files - container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb - cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled - cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period - cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy - image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold - image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold - pod_max_pid = each.value.kubelet_config.pod_max_pid - topology_manager_policy = each.value.kubelet_config.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] - - content { - swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb - transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag - transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] - - content { - fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr - fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max - fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches - fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open - kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max - net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog - net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max - net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default - net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max - net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn - net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default - net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max - net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max - vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count - vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness - vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] - - content { - application_security_group_ids = each.value.node_network_profile.application_security_group_ids - node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags - - dynamic "allowed_host_ports" { - for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports - - content { - port_end = allowed_host_ports.value.port_end - port_start = allowed_host_ports.value.port_start - protocol = allowed_host_ports.value.protocol - } - } - } - } - dynamic "upgrade_settings" { - for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] - - content { - max_surge = each.value.upgrade_settings.max_surge - drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes - node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes - } - } - dynamic "windows_profile" { - for_each = each.value.windows_profile == null ? [] : ["windows_profile"] - - content { - outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - create_before_destroy = true - ignore_changes = [ - name - ] - replace_triggered_by = [ - null_resource.pool_name_keeper[each.key], - ] - - precondition { - condition = can(regex("[a-z0-9]{1,8}", each.value.name)) - error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" - } - precondition { - condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) - error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " - } - precondition { - condition = var.agents_type == "VirtualMachineScaleSets" - error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." - } - } -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { - for_each = local.node_pools_create_after_destroy - - kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - name = each.value.name - capacity_reservation_group_id = each.value.capacity_reservation_group_id - eviction_policy = each.value.eviction_policy - fips_enabled = each.value.fips_enabled - host_group_id = each.value.host_group_id - kubelet_disk_type = each.value.kubelet_disk_type - max_count = each.value.max_count - max_pods = each.value.max_pods - min_count = each.value.min_count - mode = each.value.mode - node_count = each.value.node_count - node_labels = each.value.node_labels - node_public_ip_prefix_id = each.value.node_public_ip_prefix_id - node_taints = each.value.node_taints - orchestrator_version = each.value.orchestrator_version - os_disk_size_gb = each.value.os_disk_size_gb - os_disk_type = each.value.os_disk_type - os_sku = each.value.os_sku - os_type = each.value.os_type - pod_subnet_id = try(each.value.pod_subnet.id, null) - priority = each.value.priority - proximity_placement_group_id = each.value.proximity_placement_group_id - scale_down_mode = each.value.scale_down_mode - snapshot_id = each.value.snapshot_id - spot_max_price = each.value.spot_max_price - tags = each.value.tags - ultra_ssd_enabled = each.value.ultra_ssd_enabled - vm_size = each.value.vm_size - vnet_subnet_id = try(each.value.vnet_subnet.id, null) - workload_runtime = each.value.workload_runtime - zones = each.value.zones - - dynamic "kubelet_config" { - for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] - - content { - allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls - container_log_max_line = each.value.kubelet_config.container_log_max_files - container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb - cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled - cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period - cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy - image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold - image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold - pod_max_pid = each.value.kubelet_config.pod_max_pid - topology_manager_policy = each.value.kubelet_config.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] - - content { - swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb - transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag - transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] - - content { - fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr - fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max - fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches - fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open - kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max - net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog - net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max - net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default - net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max - net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn - net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default - net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max - net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max - vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count - vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness - vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] - - content { - node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags - } - } - dynamic "upgrade_settings" { - for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] - - content { - max_surge = each.value.upgrade_settings.max_surge - drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes - node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes - } - } - dynamic "windows_profile" { - for_each = each.value.windows_profile == null ? [] : ["windows_profile"] - - content { - outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - precondition { - condition = can(regex("[a-z0-9]{1,8}", each.value.name)) - error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" - } - precondition { - condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) - error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " - } - precondition { - condition = var.agents_type == "VirtualMachineScaleSets" - error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." - } - } -} - -resource "null_resource" "pool_name_keeper" { - for_each = var.node_pools - - triggers = { - pool_name = each.value.name - } - - lifecycle { - precondition { - condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids) - error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself." - } - } -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf deleted file mode 100644 index 500f27ece..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf +++ /dev/null @@ -1,17 +0,0 @@ -# tflint-ignore-file: azurerm_resource_tag - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip - message_of_the_day = each.value.message_of_the_day -} - -resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { - custom_ca_trust_enabled = each.value.custom_ca_trust_enabled - enable_auto_scaling = each.value.enable_auto_scaling - enable_host_encryption = each.value.enable_host_encryption - enable_node_public_ip = each.value.enable_node_public_ip - message_of_the_day = each.value.message_of_the_day -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf deleted file mode 100644 index 2b69dfe13..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf +++ /dev/null @@ -1,74 +0,0 @@ -locals { - # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. - auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete - # automatic upgrades are either: - # - null - # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null - # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null - automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( - (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || - (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) - ) - cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") - # Abstract the decision whether to create an Analytics Workspace or not. - create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null - create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null - default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) - # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 - existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) - existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] - existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) - existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) - # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 - existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) - existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) - existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) - existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) - ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress - # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. - # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled - # is set to `true`. - log_analytics_workspace = var.log_analytics_workspace_enabled ? ( - # The Log Analytics Workspace should be enabled: - var.log_analytics_workspace == null ? { - # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. - # Create an `azurerm_log_analytics_workspace` resource and use that. - id = local.azurerm_log_analytics_workspace_id - name = local.azurerm_log_analytics_workspace_name - location = local.azurerm_log_analytics_workspace_location - resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name - } : { - # `log_analytics_workspace` is supplied. Let's use that. - id = var.log_analytics_workspace.id - name = var.log_analytics_workspace.name - location = var.log_analytics_workspace.location - # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 - resource_group_name = split("/", var.log_analytics_workspace.id)[4] - } - ) : null # Finally, the Log Analytics Workspace should be disabled. - node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } - node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } - private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) - query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) - subnet_ids = [for _, s in local.subnets : s.id] - subnets = merge({ for k, v in merge( - [ - for key, pool in var.node_pools : { - "${key}-vnet-subnet" : pool.vnet_subnet, - "${key}-pod-subnet" : pool.pod_subnet, - } - ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { - "vnet-subnet" : { - id = var.vnet_subnet.id - } - }) - # subnet_ids = for id in local.potential_subnet_ids : id if id != null - use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null - use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null - valid_private_dns_zone_regexs = [ - "private\\.[a-z0-9]+\\.azmk8s\\.io", - "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", - "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io", - "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io", - ] -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf deleted file mode 100644 index fe51625be..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf +++ /dev/null @@ -1,124 +0,0 @@ -resource "azurerm_log_analytics_workspace" "main" { - count = local.create_analytics_workspace ? 1 : 0 - - location = var.location - name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") - resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) - allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions - cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced - daily_quota_gb = var.log_analytics_workspace_daily_quota_gb - data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id - immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled - internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled - internet_query_enabled = var.log_analytics_workspace_internet_query_enabled - local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled - reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day - retention_in_days = var.log_retention_in_days - sku = var.log_analytics_workspace_sku - tags = var.tags - - dynamic "identity" { - for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] - - content { - type = identity.value.type - identity_ids = identity.value.identity_ids - } - } - - lifecycle { - precondition { - condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) - error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." - } - } -} - -locals { - azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) - azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) - azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) - azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) -} - -data "azurerm_log_analytics_workspace" "main" { - count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 - - name = var.log_analytics_workspace.name - resource_group_name = local.log_analytics_workspace.resource_group_name -} - -resource "azurerm_log_analytics_solution" "main" { - count = local.create_analytics_solution ? 1 : 0 - - location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) - resource_group_name = local.log_analytics_workspace.resource_group_name - solution_name = "ContainerInsights" - workspace_name = local.log_analytics_workspace.name - workspace_resource_id = local.log_analytics_workspace.id - tags = var.tags - - plan { - product = "OMSGallery/ContainerInsights" - publisher = "Microsoft" - } -} - -locals { - dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) -} - -resource "azurerm_monitor_data_collection_rule" "dcr" { - count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 - - location = local.dcr_location - name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" - resource_group_name = var.resource_group_name - description = "DCR for Azure Monitor Container Insights" - tags = var.tags - - data_flow { - destinations = [local.log_analytics_workspace.name] - streams = var.monitor_data_collection_rule_extensions_streams - } - data_flow { - destinations = [local.log_analytics_workspace.name] - streams = ["Microsoft-Syslog"] - } - destinations { - log_analytics { - name = local.log_analytics_workspace.name - workspace_resource_id = local.log_analytics_workspace.id - } - } - data_sources { - extension { - extension_name = "ContainerInsights" - name = "ContainerInsightsExtension" - streams = var.monitor_data_collection_rule_extensions_streams - extension_json = jsonencode({ - "dataCollectionSettings" : { - interval = var.data_collection_settings.data_collection_interval - namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection - namespaces = var.data_collection_settings.namespaces_for_data_collection - enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled - } - }) - } - syslog { - facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities - log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels - name = "sysLogsDataSource" - streams = ["Microsoft-Syslog"] - } - } -} - -resource "azurerm_monitor_data_collection_rule_association" "dcra" { - count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 - - target_resource_id = azurerm_kubernetes_cluster.main.id - data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id - description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." - name = "ContainerInsightsExtension" -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf deleted file mode 100644 index 0a8dc8e59..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf +++ /dev/null @@ -1,741 +0,0 @@ -moved { - from = module.ssh-key.tls_private_key.ssh - to = tls_private_key.ssh[0] -} - -resource "tls_private_key" "ssh" { - count = var.admin_username == null ? 0 : 1 - - algorithm = "RSA" - rsa_bits = 2048 -} - -resource "azurerm_kubernetes_cluster" "main" { - location = var.location - name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}" - resource_group_name = var.resource_group_name - azure_policy_enabled = var.azure_policy_enabled - cost_analysis_enabled = var.cost_analysis_enabled - disk_encryption_set_id = var.disk_encryption_set_id - dns_prefix = var.prefix - dns_prefix_private_cluster = var.dns_prefix_private_cluster - image_cleaner_enabled = var.image_cleaner_enabled - image_cleaner_interval_hours = var.image_cleaner_interval_hours - kubernetes_version = var.kubernetes_version - local_account_disabled = var.local_account_disabled - node_resource_group = var.node_resource_group - oidc_issuer_enabled = var.oidc_issuer_enabled - open_service_mesh_enabled = var.open_service_mesh_enabled - private_cluster_enabled = var.private_cluster_enabled - private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled - private_dns_zone_id = var.private_dns_zone_id - role_based_access_control_enabled = var.role_based_access_control_enabled - run_command_enabled = var.run_command_enabled - sku_tier = var.sku_tier - support_plan = var.support_plan - tags = var.tags - workload_identity_enabled = var.workload_identity_enabled - - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] - - content { - name = var.agents_pool_name - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = null - max_pods = var.agents_max_pods - min_count = null - node_count = var.agents_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = try(var.pod_subnet.id, null) - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vm_size = var.agents_size - vnet_subnet_id = try(var.vnet_subnet.id, null) - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "node_network_profile" { - for_each = var.node_network_profile == null ? [] : [var.node_network_profile] - - content { - application_security_group_ids = node_network_profile.value.application_security_group_ids - node_public_ip_tags = node_network_profile.value.node_public_ip_tags - - dynamic "allowed_host_ports" { - for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports - - content { - port_end = allowed_host_ports.value.port_end - port_start = allowed_host_ports.value.port_start - protocol = allowed_host_ports.value.protocol - } - } - } - } - dynamic "upgrade_settings" { - for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = var.agents_pool_max_surge - drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes - node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes - } - } - } - } - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] - - content { - name = var.agents_pool_name - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = var.agents_max_count - max_pods = var.agents_max_pods - min_count = var.agents_min_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = try(var.pod_subnet.id, null) - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vm_size = var.agents_size - vnet_subnet_id = try(var.vnet_subnet.id, null) - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "upgrade_settings" { - for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = var.agents_pool_max_surge - drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes - node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes - } - } - } - } - dynamic "aci_connector_linux" { - for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] - - content { - subnet_name = var.aci_connector_linux_subnet_name - } - } - dynamic "api_server_access_profile" { - for_each = var.api_server_authorized_ip_ranges != null ? [ - "api_server_access_profile" - ] : [] - - content { - authorized_ip_ranges = var.api_server_authorized_ip_ranges - } - } - dynamic "auto_scaler_profile" { - for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : [] - - content { - balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups - empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max - expander = var.auto_scaler_profile_expander - max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec - max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time - max_unready_nodes = var.auto_scaler_profile_max_unready_nodes - max_unready_percentage = var.auto_scaler_profile_max_unready_percentage - new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay - scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add - scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete - scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure - scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded - scale_down_unready = var.auto_scaler_profile_scale_down_unready - scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold - scan_interval = var.auto_scaler_profile_scan_interval - skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage - skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods - } - } - dynamic "azure_active_directory_role_based_access_control" { - for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : [] - - content { - admin_group_object_ids = var.rbac_aad_admin_group_object_ids - azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled - managed = true - tenant_id = var.rbac_aad_tenant_id - } - } - dynamic "confidential_computing" { - for_each = var.confidential_computing == null ? [] : [var.confidential_computing] - - content { - sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled - } - } - dynamic "http_proxy_config" { - for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"] - - content { - http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy) - https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy) - no_proxy = var.http_proxy_config.no_proxy - trusted_ca = var.http_proxy_config.trusted_ca - } - } - dynamic "identity" { - for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : [] - - content { - type = var.identity_type - identity_ids = var.identity_ids - } - } - dynamic "ingress_application_gateway" { - for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : [] - - content { - gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null) - gateway_name = try(var.green_field_application_gateway_for_ingress.name, null) - subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null) - subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null) - } - } - dynamic "key_management_service" { - for_each = var.kms_enabled ? ["key_management_service"] : [] - - content { - key_vault_key_id = var.kms_key_vault_key_id - key_vault_network_access = var.kms_key_vault_network_access - } - } - dynamic "key_vault_secrets_provider" { - for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] - - content { - secret_rotation_enabled = var.secret_rotation_enabled - secret_rotation_interval = var.secret_rotation_interval - } - } - dynamic "kubelet_identity" { - for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] - - content { - client_id = kubelet_identity.value.client_id - object_id = kubelet_identity.value.object_id - user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id - } - } - dynamic "linux_profile" { - for_each = var.admin_username == null ? [] : ["linux_profile"] - - content { - admin_username = var.admin_username - - ssh_key { - key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "") - } - } - } - dynamic "maintenance_window" { - for_each = var.maintenance_window != null ? ["maintenance_window"] : [] - - content { - dynamic "allowed" { - for_each = var.maintenance_window.allowed - - content { - day = allowed.value.day - hours = allowed.value.hours - } - } - dynamic "not_allowed" { - for_each = var.maintenance_window.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "maintenance_window_auto_upgrade" { - for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] - - content { - duration = maintenance_window_auto_upgrade.value.duration - frequency = maintenance_window_auto_upgrade.value.frequency - interval = maintenance_window_auto_upgrade.value.interval - day_of_month = maintenance_window_auto_upgrade.value.day_of_month - day_of_week = maintenance_window_auto_upgrade.value.day_of_week - start_date = maintenance_window_auto_upgrade.value.start_date - start_time = maintenance_window_auto_upgrade.value.start_time - utc_offset = maintenance_window_auto_upgrade.value.utc_offset - week_index = maintenance_window_auto_upgrade.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "maintenance_window_node_os" { - for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] - - content { - duration = maintenance_window_node_os.value.duration - frequency = maintenance_window_node_os.value.frequency - interval = maintenance_window_node_os.value.interval - day_of_month = maintenance_window_node_os.value.day_of_month - day_of_week = maintenance_window_node_os.value.day_of_week - start_date = maintenance_window_node_os.value.start_date - start_time = maintenance_window_node_os.value.start_time - utc_offset = maintenance_window_node_os.value.utc_offset - week_index = maintenance_window_node_os.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed - - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - dynamic "microsoft_defender" { - for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] - - content { - log_analytics_workspace_id = local.log_analytics_workspace.id - } - } - dynamic "monitor_metrics" { - for_each = var.monitor_metrics != null ? ["monitor_metrics"] : [] - - content { - annotations_allowed = var.monitor_metrics.annotations_allowed - labels_allowed = var.monitor_metrics.labels_allowed - } - } - network_profile { - network_plugin = var.network_plugin - dns_service_ip = var.net_profile_dns_service_ip - ebpf_data_plane = var.ebpf_data_plane - ip_versions = var.network_ip_versions - load_balancer_sku = var.load_balancer_sku - network_data_plane = var.network_data_plane - network_mode = var.network_mode - network_plugin_mode = var.network_plugin_mode - network_policy = var.network_policy - outbound_type = var.net_profile_outbound_type - pod_cidr = var.net_profile_pod_cidr - pod_cidrs = var.net_profile_pod_cidrs - service_cidr = var.net_profile_service_cidr - service_cidrs = var.net_profile_service_cidrs - - dynamic "load_balancer_profile" { - for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ - "load_balancer_profile" - ] : [] - - content { - idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes - managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count - managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count - outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids - outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids - outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated - } - } - dynamic "nat_gateway_profile" { - for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile] - - content { - idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes - managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count - } - } - } - dynamic "oms_agent" { - for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : [] - - content { - log_analytics_workspace_id = local.log_analytics_workspace.id - msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled - } - } - dynamic "service_mesh_profile" { - for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] - - content { - mode = var.service_mesh_profile.mode - external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled - internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled - } - } - dynamic "service_principal" { - for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] - - content { - client_id = var.client_id - client_secret = var.client_secret - } - } - dynamic "storage_profile" { - for_each = var.storage_profile_enabled ? ["storage_profile"] : [] - - content { - blob_driver_enabled = var.storage_profile_blob_driver_enabled - disk_driver_enabled = var.storage_profile_disk_driver_enabled - disk_driver_version = var.storage_profile_disk_driver_version - file_driver_enabled = var.storage_profile_file_driver_enabled - snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled - } - } - dynamic "web_app_routing" { - for_each = var.web_app_routing == null ? [] : ["web_app_routing"] - - content { - dns_zone_ids = var.web_app_routing.dns_zone_ids - } - } - dynamic "workload_autoscaler_profile" { - for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] - - content { - keda_enabled = workload_autoscaler_profile.value.keda_enabled - vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled - } - } - - depends_on = [ - null_resource.pool_name_keeper, - ] - - lifecycle { - ignore_changes = [ - http_application_routing_enabled, - http_proxy_config[0].no_proxy, - kubernetes_version, - # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. - name, - ] - replace_triggered_by = [ - null_resource.kubernetes_cluster_name_keeper.id - ] - - precondition { - condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "") - error_message = "Either `client_id` and `client_secret` or `identity_type` must be set." - } - precondition { - # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128 - condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0) - error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well." - } - precondition { - condition = var.identity_ids == null || var.client_id == "" - error_message = "Cannot set both `client_id` and `identity_ids`." - } - precondition { - condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium") - error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled." - } - precondition { - condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled) - error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true." - } - precondition { - condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard") - error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`" - } - precondition { - condition = local.automatic_channel_upgrade_check - error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`." - } - precondition { - condition = !(var.kms_enabled && var.identity_type != "UserAssigned") - error_message = "KMS etcd encryption doesn't work with system-assigned managed identity." - } - precondition { - condition = !var.workload_identity_enabled || var.oidc_issuer_enabled - error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity" - } - precondition { - condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure" - error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure." - } - precondition { - condition = var.network_policy != "azure" || var.network_plugin == "azure" - error_message = "network_policy must be `azure` when network_plugin is `azure`" - } - precondition { - condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure" - error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure." - } - precondition { - condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null - error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified." - } - precondition { - condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster)) - error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`." - } - precondition { - condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage" - error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`." - } - precondition { - condition = (var.kubelet_identity == null) || ( - (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0) - error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set." - } - precondition { - condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets" - error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes." - } - precondition { - condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null - error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`." - } - precondition { - condition = var.prefix == null || var.dns_prefix_private_cluster == null - error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." - } - precondition { - condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled - error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`." - } - precondition { - condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != "" - error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone" - } - precondition { - condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)])) - error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`" - } - } -} - -resource "null_resource" "kubernetes_cluster_name_keeper" { - triggers = { - name = local.cluster_name - } -} - -resource "null_resource" "kubernetes_version_keeper" { - triggers = { - version = var.kubernetes_version - } -} - -resource "time_sleep" "interval_before_cluster_update" { - count = var.interval_before_cluster_update == null ? 0 : 1 - - create_duration = var.interval_before_cluster_update - - depends_on = [ - azurerm_kubernetes_cluster.main, - ] - - lifecycle { - replace_triggered_by = [ - null_resource.kubernetes_version_keeper.id, - ] - } -} - -resource "azapi_update_resource" "aks_cluster_post_create" { - resource_id = azurerm_kubernetes_cluster.main.id - type = "Microsoft.ContainerService/managedClusters@2024-02-01" - body = { - properties = { - kubernetesVersion = var.kubernetes_version - } - } - - depends_on = [ - time_sleep.interval_before_cluster_update, - ] - - lifecycle { - ignore_changes = all - replace_triggered_by = [null_resource.kubernetes_version_keeper.id] - } -} - -resource "null_resource" "http_proxy_config_no_proxy_keeper" { - count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 - - triggers = { - http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "") - } -} - -resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" { - count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 - - resource_id = azurerm_kubernetes_cluster.main.id - type = "Microsoft.ContainerService/managedClusters@2024-02-01" - body = { - properties = { - httpProxyConfig = { - noProxy = var.http_proxy_config.no_proxy - } - } - } - - depends_on = [azapi_update_resource.aks_cluster_post_create] - - lifecycle { - ignore_changes = all - replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id] - } -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf deleted file mode 100644 index a1f537658..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf +++ /dev/null @@ -1,6 +0,0 @@ -# tflint-ignore-file: azurerm_resource_tag - -resource "azurerm_kubernetes_cluster" "main" { - automatic_channel_upgrade = var.automatic_channel_upgrade - node_os_channel_upgrade = var.node_os_channel_upgrade -} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf deleted file mode 100644 index e3d37ce76..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf +++ /dev/null @@ -1,231 +0,0 @@ -output "aci_connector_linux" { - description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource." - value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null) -} - -output "aci_connector_linux_enabled" { - description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?" - value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0]) -} - -output "admin_client_certificate" { - description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "") -} - -output "admin_client_key" { - description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "") -} - -output "admin_cluster_ca_certificate" { - description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "") -} - -output "admin_host" { - description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "") -} - -output "admin_password" { - description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "") -} - -output "admin_username" { - description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "") -} - -output "aks_id" { - description = "The `azurerm_kubernetes_cluster`'s id." - value = azurerm_kubernetes_cluster.main.id -} - -output "aks_name" { - description = "The `azurerm_kubernetes_cluster`'s name." - value = azurerm_kubernetes_cluster.main.name -} - -output "azure_policy_enabled" { - description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)" - value = azurerm_kubernetes_cluster.main.azure_policy_enabled -} - -output "azurerm_log_analytics_workspace_id" { - description = "The id of the created Log Analytics workspace" - value = try(azurerm_log_analytics_workspace.main[0].id, null) -} - -output "azurerm_log_analytics_workspace_name" { - description = "The name of the created Log Analytics workspace" - value = try(azurerm_log_analytics_workspace.main[0].name, null) -} - -output "azurerm_log_analytics_workspace_primary_shared_key" { - description = "Specifies the workspace key of the log analytics workspace" - sensitive = true - value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null) -} - -output "client_certificate" { - description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate -} - -output "client_key" { - description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].client_key -} - -output "cluster_ca_certificate" { - description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate -} - -output "cluster_fqdn" { - description = "The FQDN of the Azure Kubernetes Managed Cluster." - value = azurerm_kubernetes_cluster.main.fqdn -} - -output "cluster_identity" { - description = "The `azurerm_kubernetes_cluster`'s `identity` block." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.identity[0], null) -} - -output "cluster_portal_fqdn" { - description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.portal_fqdn -} - -output "cluster_private_fqdn" { - description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.private_fqdn -} - -output "generated_cluster_private_ssh_key" { - description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format." - sensitive = true - value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null -} - -output "generated_cluster_public_ssh_key" { - description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)." - value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null -} - -output "host" { - description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].host -} - -output "http_application_routing_zone_name" { - description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing." - value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : "" -} - -output "ingress_application_gateway" { - description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block." - value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null) -} - -output "ingress_application_gateway_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?" - value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0]) -} - -output "key_vault_secrets_provider" { - description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block." - value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null) -} - -output "key_vault_secrets_provider_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?" - value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0]) -} - -output "kube_admin_config_raw" { - description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_admin_config_raw -} - -output "kube_config_raw" { - description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config_raw -} - -output "kubelet_identity" { - description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block." - value = azurerm_kubernetes_cluster.main.kubelet_identity -} - -output "location" { - description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created." - value = azurerm_kubernetes_cluster.main.location -} - -output "network_profile" { - description = "The `azurerm_kubernetes_cluster`'s `network_profile` block" - value = azurerm_kubernetes_cluster.main.network_profile -} - -output "node_resource_group" { - description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.node_resource_group -} - -output "node_resource_group_id" { - description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster." - value = azurerm_kubernetes_cluster.main.node_resource_group_id -} - -output "oidc_issuer_url" { - description = "The OIDC issuer URL that is associated with the cluster." - value = azurerm_kubernetes_cluster.main.oidc_issuer_url -} - -output "oms_agent" { - description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument." - value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null) -} - -output "oms_agent_enabled" { - description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?" - value = can(azurerm_kubernetes_cluster.main.oms_agent[0]) -} - -output "open_service_mesh_enabled" { - description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." - value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled -} - -output "password" { - description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].password -} - -output "username" { - description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster." - sensitive = true - value = azurerm_kubernetes_cluster.main.kube_config[0].username -} - -output "web_app_routing_identity" { - description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object." - value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, []) -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf deleted file mode 100644 index e9601eaf0..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf +++ /dev/null @@ -1,126 +0,0 @@ -resource "azurerm_role_assignment" "acr" { - for_each = var.attached_acr_id_map - - principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id - scope = each.value - role_definition_name = "AcrPull" - skip_service_principal_aad_check = true -} - -# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity -data "azurerm_user_assigned_identity" "cluster_identity" { - count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 - - name = split("/", var.identity_ids[0])[8] - resource_group_name = split("/", var.identity_ids[0])[4] -} - -# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) -# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets -# used by the system node pool and by any additional node pools. -# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites -# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites -# https://github.com/Azure/terraform-azurerm-aks/issues/178 -resource "azurerm_role_assignment" "network_contributor" { - for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} - - principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) - scope = each.value.id - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 - error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." - } - } -} - -resource "azurerm_role_assignment" "network_contributor_on_subnet" { - for_each = var.network_contributor_role_assigned_subnet_ids - - principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) - scope = each.value - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = !var.create_role_assignment_network_contributor - error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." - } - } -} - -data "azurerm_client_config" "this" {} - -data "azurerm_virtual_network" "application_gateway_vnet" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - name = local.existing_application_gateway_subnet_vnet_name - resource_group_name = local.existing_application_gateway_subnet_resource_group_name -} - -resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = data.azurerm_virtual_network.application_gateway_vnet[0].id - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress - error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." - } - } -} - -resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) - role_definition_name = "Network Contributor" - - lifecycle { - precondition { - condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) - error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." - } - } -} - -resource "azurerm_role_assignment" "existing_application_gateway_contributor" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = var.brown_field_application_gateway_for_ingress.id - role_definition_name = "Contributor" - - lifecycle { - precondition { - condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress - error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." - } - } -} - -data "azurerm_resource_group" "ingress_gw" { - count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 - - name = local.existing_application_gateway_resource_group_for_ingress -} - -data "azurerm_resource_group" "aks_rg" { - count = var.create_role_assignments_for_application_gateway ? 1 : 0 - - name = var.resource_group_name -} - -resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { - count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 - - principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id - scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id - role_definition_name = "Reader" -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile deleted file mode 100644 index 7f28c53a5..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile +++ /dev/null @@ -1,85 +0,0 @@ -REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" - -fmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash - -fumpt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash - -gosec: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash - -tffmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash - -tffmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash - -tfvalidatecheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash - -terrafmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash - -gofmtcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash - -golint: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash - -tflint: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash - -lint: golint tflint gosec - -checkovcheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash - -checkovplancheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash - -fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck - -pr-check: depscheck fmtcheck lint unit-test checkovcheck - -unit-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash - -e2e-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash - -version-upgrade-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash - -terrafmt: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash - -pre-commit: tffmt terrafmt depsensure fmt fumpt generate - -depsensure: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash - -depscheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash - -generate: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash - -gencheck: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash - -yor-tag: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash - -autofix: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash - -test: fmtcheck - @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash - @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash - -build-test: - curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash - -.PHONY: fmt fmtcheck pr-check \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf deleted file mode 100644 index c819f9b89..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf +++ /dev/null @@ -1,1601 +0,0 @@ -variable "location" { - type = string - description = "Location of cluster, if not defined it will be read from the resource-group" -} - -variable "resource_group_name" { - type = string - description = "The existing resource group name to use" -} - -variable "aci_connector_linux_enabled" { - type = bool - default = false - description = "Enable Virtual Node pool" -} - -variable "aci_connector_linux_subnet_name" { - type = string - default = null - description = "(Optional) aci_connector_linux subnet name" -} - -variable "admin_username" { - type = string - default = null - description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created." -} - -variable "agents_availability_zones" { - type = list(string) - default = null - description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." -} - -variable "agents_count" { - type = number - default = 2 - description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes." -} - -variable "agents_labels" { - type = map(string) - default = {} - description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." -} - -variable "agents_max_count" { - type = number - default = null - description = "Maximum number of nodes in a pool" -} - -variable "agents_max_pods" { - type = number - default = null - description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." -} - -variable "agents_min_count" { - type = number - default = null - description = "Minimum number of nodes in a pool" -} - -variable "agents_pool_drain_timeout_in_minutes" { - type = number - default = null - description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." -} - -variable "agents_pool_kubelet_configs" { - type = list(object({ - cpu_manager_policy = optional(string) - cpu_cfs_quota_enabled = optional(bool, true) - cpu_cfs_quota_period = optional(string) - image_gc_high_threshold = optional(number) - image_gc_low_threshold = optional(number) - topology_manager_policy = optional(string) - allowed_unsafe_sysctls = optional(set(string)) - container_log_max_size_mb = optional(number) - container_log_max_line = optional(number) - pod_max_pid = optional(number) - })) - default = [] - description = <<-EOT - list(object({ - cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. - cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. - cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. - image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. - image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. - topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. - allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. - container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. - container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. - pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. - })) -EOT - nullable = false -} - -variable "agents_pool_linux_os_configs" { - type = list(object({ - sysctl_configs = optional(list(object({ - fs_aio_max_nr = optional(number) - fs_file_max = optional(number) - fs_inotify_max_user_watches = optional(number) - fs_nr_open = optional(number) - kernel_threads_max = optional(number) - net_core_netdev_max_backlog = optional(number) - net_core_optmem_max = optional(number) - net_core_rmem_default = optional(number) - net_core_rmem_max = optional(number) - net_core_somaxconn = optional(number) - net_core_wmem_default = optional(number) - net_core_wmem_max = optional(number) - net_ipv4_ip_local_port_range_min = optional(number) - net_ipv4_ip_local_port_range_max = optional(number) - net_ipv4_neigh_default_gc_thresh1 = optional(number) - net_ipv4_neigh_default_gc_thresh2 = optional(number) - net_ipv4_neigh_default_gc_thresh3 = optional(number) - net_ipv4_tcp_fin_timeout = optional(number) - net_ipv4_tcp_keepalive_intvl = optional(number) - net_ipv4_tcp_keepalive_probes = optional(number) - net_ipv4_tcp_keepalive_time = optional(number) - net_ipv4_tcp_max_syn_backlog = optional(number) - net_ipv4_tcp_max_tw_buckets = optional(number) - net_ipv4_tcp_tw_reuse = optional(bool) - net_netfilter_nf_conntrack_buckets = optional(number) - net_netfilter_nf_conntrack_max = optional(number) - vm_max_map_count = optional(number) - vm_swappiness = optional(number) - vm_vfs_cache_pressure = optional(number) - })), []) - transparent_huge_page_enabled = optional(string) - transparent_huge_page_defrag = optional(string) - swap_file_size_mb = optional(number) - })) - default = [] - description = <<-EOT - list(object({ - sysctl_configs = optional(list(object({ - fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. - fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. - fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. - fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. - kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. - net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. - net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. - net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. - net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. - net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. - net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. - vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. - vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. - vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. - })), []) - transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. - transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. - swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created. - })) -EOT - nullable = false -} - -variable "agents_pool_max_surge" { - type = string - default = "10%" - description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." -} - -variable "agents_pool_name" { - type = string - default = "nodepool" - description = "The default Azure AKS agentpool (nodepool) name." - nullable = false -} - -variable "agents_pool_node_soak_duration_in_minutes" { - type = number - default = 0 - description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." -} - -variable "agents_proximity_placement_group_id" { - type = string - default = null - description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." -} - -variable "agents_size" { - type = string - default = "Standard_D2s_v3" - description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." -} - -variable "agents_tags" { - type = map(string) - default = {} - description = "(Optional) A mapping of tags to assign to the Node Pool." -} - -variable "agents_type" { - type = string - default = "VirtualMachineScaleSets" - description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." -} - -variable "api_server_authorized_ip_ranges" { - type = set(string) - default = null - description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes." -} - -variable "attached_acr_id_map" { - type = map(string) - default = {} - description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created." - nullable = false -} - -variable "auto_scaler_profile_balance_similar_node_groups" { - type = bool - default = false - description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`." -} - -variable "auto_scaler_profile_empty_bulk_delete_max" { - type = number - default = 10 - description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`." -} - -variable "auto_scaler_profile_enabled" { - type = bool - default = false - description = "Enable configuring the auto scaler profile" - nullable = false -} - -variable "auto_scaler_profile_expander" { - type = string - default = "random" - description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`." - - validation { - condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander) - error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`." - } -} - -variable "auto_scaler_profile_max_graceful_termination_sec" { - type = string - default = "600" - description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`." -} - -variable "auto_scaler_profile_max_node_provisioning_time" { - type = string - default = "15m" - description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`." -} - -variable "auto_scaler_profile_max_unready_nodes" { - type = number - default = 3 - description = "Maximum Number of allowed unready nodes. Defaults to `3`." -} - -variable "auto_scaler_profile_max_unready_percentage" { - type = number - default = 45 - description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`." -} - -variable "auto_scaler_profile_new_pod_scale_up_delay" { - type = string - default = "10s" - description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`." -} - -variable "auto_scaler_profile_scale_down_delay_after_add" { - type = string - default = "10m" - description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`." -} - -variable "auto_scaler_profile_scale_down_delay_after_delete" { - type = string - default = null - description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`." -} - -variable "auto_scaler_profile_scale_down_delay_after_failure" { - type = string - default = "3m" - description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`." -} - -variable "auto_scaler_profile_scale_down_unneeded" { - type = string - default = "10m" - description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`." -} - -variable "auto_scaler_profile_scale_down_unready" { - type = string - default = "20m" - description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`." -} - -variable "auto_scaler_profile_scale_down_utilization_threshold" { - type = string - default = "0.5" - description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`." -} - -variable "auto_scaler_profile_scan_interval" { - type = string - default = "10s" - description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`." -} - -variable "auto_scaler_profile_skip_nodes_with_local_storage" { - type = bool - default = true - description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`." -} - -variable "auto_scaler_profile_skip_nodes_with_system_pods" { - type = bool - default = true - description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`." -} - -variable "automatic_channel_upgrade" { - type = string - default = null - description = <<-EOT - (Optional) Defines the automatic upgrade channel for the AKS cluster. - Possible values: - * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").** - * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.** - - By default, automatic upgrades are disabled. - More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster - EOT - - validation { - condition = var.automatic_channel_upgrade == null ? true : contains([ - "patch", "stable", "rapid", "node-image" - ], var.automatic_channel_upgrade) - error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`." - } -} - -variable "azure_policy_enabled" { - type = bool - default = false - description = "Enable Azure Policy Addon." -} - -variable "brown_field_application_gateway_for_ingress" { - type = object({ - id = string - subnet_id = string - }) - default = null - description = <<-EOT - [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing) - * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress. - * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. - EOT -} - -variable "client_id" { - type = string - default = "" - description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment" - nullable = false -} - -variable "client_secret" { - type = string - default = "" - description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment" - nullable = false - sensitive = true -} - -variable "cluster_log_analytics_workspace_name" { - type = string - default = null - description = "(Optional) The name of the Analytics workspace" -} - -variable "cluster_name" { - type = string - default = null - description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)" -} - -variable "cluster_name_random_suffix" { - type = bool - default = false - description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict." - nullable = false -} - -variable "confidential_computing" { - type = object({ - sgx_quote_helper_enabled = bool - }) - default = null - description = "(Optional) Enable Confidential Computing." -} - -variable "cost_analysis_enabled" { - type = bool - default = false - description = "(Optional) Enable Cost Analysis." -} - -variable "create_monitor_data_collection_rule" { - type = bool - default = true - description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`." - nullable = false -} - -variable "create_role_assignment_network_contributor" { - type = bool - default = false - description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster" - nullable = false -} - -variable "create_role_assignments_for_application_gateway" { - type = bool - default = true - description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`." - nullable = false -} - -variable "data_collection_settings" { - type = object({ - data_collection_interval = string - namespace_filtering_mode_for_data_collection = string - namespaces_for_data_collection = list(string) - container_log_v2_enabled = bool - }) - default = { - data_collection_interval = "1m" - namespace_filtering_mode_for_data_collection = "Off" - namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"] - container_log_v2_enabled = true - } - description = <<-EOT - `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m. - `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection. - `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode. - `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs. - See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 - EOT -} - -variable "default_node_pool_fips_enabled" { - type = bool - default = null - description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." -} - -variable "disk_encryption_set_id" { - type = string - default = null - description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created." -} - -variable "dns_prefix_private_cluster" { - type = string - default = null - description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created." -} - -variable "ebpf_data_plane" { - type = string - default = null - description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created." -} - -variable "enable_auto_scaling" { - type = bool - default = false - description = "Enable node pool autoscaling" -} - -variable "enable_host_encryption" { - type = bool - default = false - description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" -} - -variable "enable_node_public_ip" { - type = bool - default = false - description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." -} - -variable "green_field_application_gateway_for_ingress" { - type = object({ - name = optional(string) - subnet_cidr = optional(string) - subnet_id = optional(string) - }) - default = null - description = <<-EOT - [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new) - * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. - * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. - * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. -EOT - - validation { - condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr))) - error_message = "One of `subnet_cidr` and `subnet_id` must be specified." - } -} - -variable "http_proxy_config" { - type = object({ - http_proxy = optional(string) - https_proxy = optional(string) - no_proxy = optional(list(string)) - trusted_ca = optional(string) - }) - default = null - description = <<-EOT - optional(object({ - http_proxy = (Optional) The proxy address to be used when communicating over HTTP. - https_proxy = (Optional) The proxy address to be used when communicating over HTTPS. - no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field. - trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format. - })) - Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. -EOT - - validation { - condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)) - error_message = "`http_proxy` and `https_proxy` cannot be both empty." - } -} - -variable "identity_ids" { - type = list(string) - default = null - description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster." -} - -variable "identity_type" { - type = string - default = "SystemAssigned" - description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well." - - validation { - condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned" - error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`" - } -} - -variable "image_cleaner_enabled" { - type = bool - default = false - description = "(Optional) Specifies whether Image Cleaner is enabled." -} - -variable "image_cleaner_interval_hours" { - type = number - default = 48 - description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`." -} - -variable "interval_before_cluster_update" { - type = string - default = "30s" - description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update." -} - -variable "key_vault_secrets_provider_enabled" { - type = bool - default = false - description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver" - nullable = false -} - -variable "kms_enabled" { - type = bool - default = false - description = "(Optional) Enable Azure KeyVault Key Management Service." - nullable = false -} - -variable "kms_key_vault_key_id" { - type = string - default = null - description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier." -} - -variable "kms_key_vault_network_access" { - type = string - default = "Public" - description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`." - - validation { - condition = contains(["Private", "Public"], var.kms_key_vault_network_access) - error_message = "Possible values are `Private` and `Public`" - } -} - -variable "kubelet_identity" { - type = object({ - client_id = optional(string) - object_id = optional(string) - user_assigned_identity_id = optional(string) - }) - default = null - description = <<-EOT - - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. - - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. - - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. -EOT -} - -variable "kubernetes_version" { - type = string - default = null - description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region" -} - -variable "load_balancer_profile_enabled" { - type = bool - default = false - description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." - nullable = false -} - -variable "load_balancer_profile_idle_timeout_in_minutes" { - type = number - default = 30 - description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." -} - -variable "load_balancer_profile_managed_outbound_ip_count" { - type = number - default = null - description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" -} - -variable "load_balancer_profile_managed_outbound_ipv6_count" { - type = number - default = null - description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" -} - -variable "load_balancer_profile_outbound_ip_address_ids" { - type = set(string) - default = null - description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." -} - -variable "load_balancer_profile_outbound_ip_prefix_ids" { - type = set(string) - default = null - description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." -} - -variable "load_balancer_profile_outbound_ports_allocated" { - type = number - default = 0 - description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" -} - -variable "load_balancer_sku" { - type = string - default = "standard" - description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." - - validation { - condition = contains(["basic", "standard"], var.load_balancer_sku) - error_message = "Possible values are `basic` and `standard`" - } -} - -variable "local_account_disabled" { - type = bool - default = null - description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information." -} - -variable "log_analytics_solution" { - type = object({ - id = string - }) - default = null - description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution." - - validation { - condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != "" - error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`." - } -} - -variable "log_analytics_workspace" { - type = object({ - id = string - name = string - location = optional(string) - resource_group_name = optional(string) - }) - default = null - description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace." -} - -variable "log_analytics_workspace_allow_resource_only_permissions" { - type = bool - default = null - description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`." -} - -variable "log_analytics_workspace_cmk_for_query_forced" { - type = bool - default = null - description = "(Optional) Is Customer Managed Storage mandatory for query management?" -} - -variable "log_analytics_workspace_daily_quota_gb" { - type = number - default = null - description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted." -} - -variable "log_analytics_workspace_data_collection_rule_id" { - type = string - default = null - description = "(Optional) The ID of the Data Collection Rule to use for this workspace." -} - -variable "log_analytics_workspace_enabled" { - type = bool - default = true - description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard" - nullable = false -} - -variable "log_analytics_workspace_identity" { - type = object({ - identity_ids = optional(set(string)) - type = string - }) - default = null - description = <<-EOT - - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`. - - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. -EOT -} - -variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" { - type = bool - default = null - description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days." -} - -variable "log_analytics_workspace_internet_ingestion_enabled" { - type = bool - default = null - description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`." -} - -variable "log_analytics_workspace_internet_query_enabled" { - type = bool - default = null - description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`." -} - -variable "log_analytics_workspace_local_authentication_disabled" { - type = bool - default = null - description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`." -} - -variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" { - type = number - default = null - description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`." -} - -variable "log_analytics_workspace_resource_group_name" { - type = string - default = null - description = "(Optional) Resource group name to create azurerm_log_analytics_solution." -} - -variable "log_analytics_workspace_sku" { - type = string - default = "PerGB2018" - description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018" -} - -variable "log_retention_in_days" { - type = number - default = 30 - description = "The retention period for the logs in days" -} - -variable "maintenance_window" { - type = object({ - allowed = optional(list(object({ - day = string - hours = set(number) - })), [ - ]), - not_allowed = optional(list(object({ - end = string - start = string - })), []), - }) - default = null - description = "(Optional) Maintenance configuration of the managed cluster." -} - -variable "maintenance_window_auto_upgrade" { - type = object({ - day_of_month = optional(number) - day_of_week = optional(string) - duration = number - frequency = string - interval = number - start_date = optional(string) - start_time = optional(string) - utc_offset = optional(string) - week_index = optional(string) - not_allowed = optional(set(object({ - end = string - start = string - }))) - }) - default = null - description = <<-EOT - - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). - - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. - - `duration` - (Required) The duration of the window for maintenance to run in hours. - - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. - - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. - - `start_date` - (Optional) The date on which the maintenance window begins to take effect. - - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. - - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. - - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. - - --- - `not_allowed` block supports the following: - - `end` - (Required) The end of a time span, formatted as an RFC3339 string. - - `start` - (Required) The start of a time span, formatted as an RFC3339 string. -EOT -} - -variable "maintenance_window_node_os" { - type = object({ - day_of_month = optional(number) - day_of_week = optional(string) - duration = number - frequency = string - interval = number - start_date = optional(string) - start_time = optional(string) - utc_offset = optional(string) - week_index = optional(string) - not_allowed = optional(set(object({ - end = string - start = string - }))) - }) - default = null - description = <<-EOT - - `day_of_month` - - - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. - - `duration` - (Required) The duration of the window for maintenance to run in hours. - - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. - - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. - - `start_date` - (Optional) The date on which the maintenance window begins to take effect. - - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. - - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. - - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. - - --- - `not_allowed` block supports the following: - - `end` - (Required) The end of a time span, formatted as an RFC3339 string. - - `start` - (Required) The start of a time span, formatted as an RFC3339 string. -EOT -} - -variable "microsoft_defender_enabled" { - type = bool - default = false - description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`." - nullable = false -} - -variable "monitor_data_collection_rule_data_sources_syslog_facilities" { - type = list(string) - default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"] - description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog" -} - -variable "monitor_data_collection_rule_data_sources_syslog_levels" { - type = list(string) - default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] - description = "List of syslog levels" -} - -variable "monitor_data_collection_rule_extensions_streams" { - type = list(any) - default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"] - description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr" -} - -variable "monitor_metrics" { - type = object({ - annotations_allowed = optional(string) - labels_allowed = optional(string) - }) - default = null - description = <<-EOT - (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster - object({ - annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric." - labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric." - }) -EOT -} - -variable "msi_auth_for_monitoring_enabled" { - type = bool - default = null - description = "(Optional) Is managed identity authentication for monitoring enabled?" -} - -variable "nat_gateway_profile" { - type = object({ - idle_timeout_in_minutes = optional(number) - managed_outbound_ip_count = optional(number) - }) - default = null - description = <<-EOT - `nat_gateway_profile` block supports the following: - - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`. - - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. -EOT -} - -variable "net_profile_dns_service_ip" { - type = string - default = null - description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created." -} - -variable "net_profile_outbound_type" { - type = string - default = "loadBalancer" - description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer." -} - -variable "net_profile_pod_cidr" { - type = string - default = null - description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created." -} - -variable "net_profile_pod_cidrs" { - type = list(string) - default = null - description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." -} - -variable "net_profile_service_cidr" { - type = string - default = null - description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created." -} - -variable "net_profile_service_cidrs" { - type = list(string) - default = null - description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." -} - -variable "network_contributor_role_assigned_subnet_ids" { - type = map(string) - default = {} - description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id" - nullable = false -} - -variable "network_data_plane" { - type = string - default = null - description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created." -} - -variable "network_ip_versions" { - type = list(string) - default = null - description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created." -} - -variable "network_mode" { - type = string - default = null - description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created." -} - -variable "network_plugin" { - type = string - default = "kubenet" - description = "Network plugin to use for networking." - nullable = false -} - -variable "network_plugin_mode" { - type = string - default = null - description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created." -} - -variable "network_policy" { - type = string - default = null - description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created." -} - -variable "node_network_profile" { - type = object({ - node_public_ip_tags = optional(map(string)) - application_security_group_ids = optional(list(string)) - allowed_host_ports = optional(list(object({ - port_start = optional(number) - port_end = optional(number) - protocol = optional(string) - }))) - }) - default = null - description = <<-EOT - - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. - - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. ---- - An `allowed_host_ports` block supports the following: - - `port_start`: (Optional) Specifies the start of the port range. - - `port_end`: (Optional) Specifies the end of the port range. - - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. -EOT -} - -variable "node_os_channel_upgrade" { - type = string - default = null - description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." -} - -variable "node_pools" { - type = map(object({ - name = string - node_count = optional(number) - tags = optional(map(string)) - vm_size = string - host_group_id = optional(string) - capacity_reservation_group_id = optional(string) - custom_ca_trust_enabled = optional(bool) - enable_auto_scaling = optional(bool) - enable_host_encryption = optional(bool) - enable_node_public_ip = optional(bool) - eviction_policy = optional(string) - gpu_instance = optional(string) - kubelet_config = optional(object({ - cpu_manager_policy = optional(string) - cpu_cfs_quota_enabled = optional(bool) - cpu_cfs_quota_period = optional(string) - image_gc_high_threshold = optional(number) - image_gc_low_threshold = optional(number) - topology_manager_policy = optional(string) - allowed_unsafe_sysctls = optional(set(string)) - container_log_max_size_mb = optional(number) - container_log_max_files = optional(number) - pod_max_pid = optional(number) - })) - linux_os_config = optional(object({ - sysctl_config = optional(object({ - fs_aio_max_nr = optional(number) - fs_file_max = optional(number) - fs_inotify_max_user_watches = optional(number) - fs_nr_open = optional(number) - kernel_threads_max = optional(number) - net_core_netdev_max_backlog = optional(number) - net_core_optmem_max = optional(number) - net_core_rmem_default = optional(number) - net_core_rmem_max = optional(number) - net_core_somaxconn = optional(number) - net_core_wmem_default = optional(number) - net_core_wmem_max = optional(number) - net_ipv4_ip_local_port_range_min = optional(number) - net_ipv4_ip_local_port_range_max = optional(number) - net_ipv4_neigh_default_gc_thresh1 = optional(number) - net_ipv4_neigh_default_gc_thresh2 = optional(number) - net_ipv4_neigh_default_gc_thresh3 = optional(number) - net_ipv4_tcp_fin_timeout = optional(number) - net_ipv4_tcp_keepalive_intvl = optional(number) - net_ipv4_tcp_keepalive_probes = optional(number) - net_ipv4_tcp_keepalive_time = optional(number) - net_ipv4_tcp_max_syn_backlog = optional(number) - net_ipv4_tcp_max_tw_buckets = optional(number) - net_ipv4_tcp_tw_reuse = optional(bool) - net_netfilter_nf_conntrack_buckets = optional(number) - net_netfilter_nf_conntrack_max = optional(number) - vm_max_map_count = optional(number) - vm_swappiness = optional(number) - vm_vfs_cache_pressure = optional(number) - })) - transparent_huge_page_enabled = optional(string) - transparent_huge_page_defrag = optional(string) - swap_file_size_mb = optional(number) - })) - fips_enabled = optional(bool) - kubelet_disk_type = optional(string) - max_count = optional(number) - max_pods = optional(number) - message_of_the_day = optional(string) - mode = optional(string, "User") - min_count = optional(number) - node_network_profile = optional(object({ - node_public_ip_tags = optional(map(string)) - application_security_group_ids = optional(list(string)) - allowed_host_ports = optional(list(object({ - port_start = optional(number) - port_end = optional(number) - protocol = optional(string) - }))) - })) - node_labels = optional(map(string)) - node_public_ip_prefix_id = optional(string) - node_taints = optional(list(string)) - orchestrator_version = optional(string) - os_disk_size_gb = optional(number) - os_disk_type = optional(string, "Managed") - os_sku = optional(string) - os_type = optional(string, "Linux") - pod_subnet = optional(object({ - id = string - }), null) - priority = optional(string, "Regular") - proximity_placement_group_id = optional(string) - spot_max_price = optional(number) - scale_down_mode = optional(string, "Delete") - snapshot_id = optional(string) - ultra_ssd_enabled = optional(bool) - vnet_subnet = optional(object({ - id = string - }), null) - upgrade_settings = optional(object({ - drain_timeout_in_minutes = number - node_soak_duration_in_minutes = number - max_surge = string - })) - windows_profile = optional(object({ - outbound_nat_enabled = optional(bool, true) - })) - workload_runtime = optional(string) - zones = optional(set(string)) - create_before_destroy = optional(bool, true) - })) - default = {} - description = <<-EOT - A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: - map(object({ - name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. - node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. - tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. - vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. - host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. - capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. - custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. - enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). - enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. - enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. - eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. - gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. - kubelet_config = optional(object({ - cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. - cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. - cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. - image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. - image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. - topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. - allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. - container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. - container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. - pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. - })) - linux_os_config = optional(object({ - sysctl_config = optional(object({ - fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. - fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. - fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. - fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. - kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. - net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. - net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. - net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. - net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. - net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. - net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. - net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. - net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. - net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. - net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. - vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. - vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. - vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. - })) - transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. - transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. - swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. - })) - fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). - kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. - max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. - max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. - message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. - mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. - min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. - node_network_profile = optional(object({ - node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. - application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. - allowed_host_ports = optional(object({ - port_start = (Optional) Specifies the start of the port range. - port_end = (Optional) Specifies the end of the port range. - protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. - })) - })) - node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. - node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. - node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. - orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. - os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. - os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. - os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. - os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. - pod_subnet = optional(object({ - id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. - })) - priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. - proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). - spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. - scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. - snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. - ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. - vnet_subnet = optional(object({ - id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. - })) - upgrade_settings = optional(object({ - drain_timeout_in_minutes = number - node_soak_duration_in_minutes = number - max_surge = string - })) - windows_profile = optional(object({ - outbound_nat_enabled = optional(bool, true) - })) - workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) - zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. - create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. - })) - EOT - nullable = false -} - -variable "node_resource_group" { - type = string - default = null - description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created." -} - -variable "oidc_issuer_enabled" { - type = bool - default = false - description = "Enable or Disable the OIDC issuer URL. Defaults to false." -} - -variable "oms_agent_enabled" { - type = bool - default = true - description = "Enable OMS Agent Addon." - nullable = false -} - -variable "only_critical_addons_enabled" { - type = bool - default = null - description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." -} - -variable "open_service_mesh_enabled" { - type = bool - default = null - description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." -} - -variable "orchestrator_version" { - type = string - default = null - description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" -} - -variable "os_disk_size_gb" { - type = number - default = 50 - description = "Disk size of nodes in GBs." -} - -variable "os_disk_type" { - type = string - default = "Managed" - description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." - nullable = false -} - -variable "os_sku" { - type = string - default = null - description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." -} - -variable "pod_subnet" { - type = object({ - id = string - }) - default = null - description = <<-EOT - object({ - id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. - }) -EOT -} - -variable "prefix" { - type = string - default = "" - description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." -} - -variable "private_cluster_enabled" { - type = bool - default = false - description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet." -} - -variable "private_cluster_public_fqdn_enabled" { - type = bool - default = false - description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`." -} - -variable "private_dns_zone_id" { - type = string - default = null - description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created." -} - -variable "public_ssh_key" { - type = string - default = "" - description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created." -} - -variable "rbac_aad" { - type = bool - default = true - description = "(Optional) Is Azure Active Directory integration enabled?" - nullable = false -} - -variable "rbac_aad_admin_group_object_ids" { - type = list(string) - default = null - description = "Object ID of groups with admin access." -} - -variable "rbac_aad_azure_rbac_enabled" { - type = bool - default = null - description = "(Optional) Is Role Based Access Control based on Azure AD enabled?" -} - -variable "rbac_aad_tenant_id" { - type = string - default = null - description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used." -} - -variable "role_based_access_control_enabled" { - type = bool - default = false - description = "Enable Role Based Access Control." - nullable = false -} - -variable "run_command_enabled" { - type = bool - default = true - description = "(Optional) Whether to enable run command for the cluster or not." -} - -variable "scale_down_mode" { - type = string - default = "Delete" - description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." -} - -variable "secret_rotation_enabled" { - type = bool - default = false - description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`" - nullable = false -} - -variable "secret_rotation_interval" { - type = string - default = "2m" - description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`" - nullable = false -} - -variable "service_mesh_profile" { - type = object({ - mode = string - internal_ingress_gateway_enabled = optional(bool, true) - external_ingress_gateway_enabled = optional(bool, true) - }) - default = null - description = <<-EOT - `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. - `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. - `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. - EOT -} - -variable "sku_tier" { - type = string - default = "Free" - description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`" - - validation { - condition = contains(["Free", "Standard", "Premium"], var.sku_tier) - error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0." - } -} - -variable "snapshot_id" { - type = string - default = null - description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." -} - -variable "storage_profile_blob_driver_enabled" { - type = bool - default = false - description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`" -} - -variable "storage_profile_disk_driver_enabled" { - type = bool - default = true - description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`" -} - -variable "storage_profile_disk_driver_version" { - type = string - default = "v1" - description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`." -} - -variable "storage_profile_enabled" { - type = bool - default = false - description = "Enable storage profile" - nullable = false -} - -variable "storage_profile_file_driver_enabled" { - type = bool - default = true - description = "(Optional) Is the File CSI driver enabled? Defaults to `true`" -} - -variable "storage_profile_snapshot_controller_enabled" { - type = bool - default = true - description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`" -} - -variable "support_plan" { - type = string - default = "KubernetesOfficial" - description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." - - validation { - condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan) - error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`." - } -} - -variable "tags" { - type = map(string) - default = {} - description = "Any tags that should be present on the AKS cluster resources" -} - -variable "temporary_name_for_rotation" { - type = string - default = null - description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" -} - -variable "ultra_ssd_enabled" { - type = bool - default = false - description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." -} - -variable "vnet_subnet" { - type = object({ - id = string - }) - default = null - description = <<-EOT - object({ - id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. - }) -EOT -} - -variable "web_app_routing" { - type = object({ - dns_zone_ids = list(string) - }) - default = null - description = <<-EOT - object({ - dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list." - }) -EOT -} - -variable "workload_autoscaler_profile" { - type = object({ - keda_enabled = optional(bool, false) - vertical_pod_autoscaler_enabled = optional(bool, false) - }) - default = null - description = <<-EOT - `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads. - `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. -EOT -} - -variable "workload_identity_enabled" { - type = bool - default = false - description = "Enable or Disable Workload Identity. Defaults to false." -} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf deleted file mode 100644 index 7859b9fae..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf +++ /dev/null @@ -1,26 +0,0 @@ -terraform { - required_version = ">= 1.3" - - required_providers { - azapi = { - source = "Azure/azapi" - version = ">=2.0, < 3.0" - } - azurerm = { - source = "hashicorp/azurerm" - version = ">= 3.107.0" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" - } - time = { - source = "hashicorp/time" - version = ">= 0.5" - } - tls = { - source = "hashicorp/tls" - version = ">= 3.1" - } - } -} From 862fa01bffad08fb9c5af958e407612cd55bfbb4 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 12:18:17 +0530 Subject: [PATCH 22/36] fixes --- .../azure_aks/0.2/facets.yaml | 10 +++++++-- .../kubernetes_cluster/azure_aks/0.2/main.tf | 22 +++++++++++-------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 7b39f8908..8c6baf647 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -27,8 +27,13 @@ spec: kubernetes_version: type: string title: Kubernetes Version - description: Version of Kubernetes to use for the AKS cluster. + description: Version of Kubernetes to use for the AKS cluster. Only used + when auto-upgrade is disabled or using patch channel. default: '1.31' + x-ui-visible-if: + field: spec.auto_upgrade_settings.enable_auto_upgrade + values: + - false cluster_endpoint_public_access: type: boolean title: Cluster Endpoint Public Access @@ -109,7 +114,8 @@ spec: automatic_channel_upgrade: type: string title: Automatic Channel Upgrade - description: Auto-upgrade channel for the cluster. + description: Auto-upgrade channel for the cluster. Note - when using stable/rapid/node-image, + the Kubernetes version will be managed automatically by Azure. default: stable enum: - rapid diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index 03788d71f..bec25e96c 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -4,7 +4,7 @@ module "name" { environment = var.environment limit = 63 resource_name = var.instance_name - resource_type = "kubernetes_cluster" + resource_type = "k8s" globally_unique = true } @@ -18,11 +18,15 @@ module "k8scluster" { location = var.inputs.network_details.attributes.region # Basic cluster configuration - cluster_name = local.name - prefix = "" + cluster_name = local.name + prefix = local.name + node_resource_group = "MC_${local.name}" - # Kubernetes version - kubernetes_version = var.instance.spec.cluster.kubernetes_version + # Kubernetes version - only set when auto-upgrade is disabled or using patch channel + kubernetes_version = ( + var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && + contains(["stable", "rapid", "node-image"], var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade) + ) ? null : var.instance.spec.cluster.kubernetes_version # SKU tier sku_tier = var.instance.spec.cluster.sku_tier @@ -126,10 +130,10 @@ module "k8scluster" { var.instance.spec.tags != null ? var.instance.spec.tags : {} ) - # Disable local accounts for better security - local_account_disabled = true - - # Enable RBAC with Azure AD + # Azure AD and RBAC configuration rbac_aad = true rbac_aad_azure_rbac_enabled = true + + # Keep local accounts enabled for compatibility with client certificate auth + local_account_disabled = false } From 73f603bd008b60e1eb0ff3669eadefbc1b83b68f Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 13:10:56 +0530 Subject: [PATCH 23/36] added azurerm provider input --- modules/kubernetes_cluster/azure_aks/0.2/facets.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 8c6baf647..a2941ae45 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -312,6 +312,7 @@ inputs: optional: false providers: - azurerm + - azapi outputs: default: type: '@facets/azure_aks' From 475e03d1e6785372b649389f524dbc0866cb6e8c Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 13:25:49 +0530 Subject: [PATCH 24/36] fixes --- modules/kubernetes_cluster/azure_aks/0.2/versions.tf | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/versions.tf new file mode 100644 index 000000000..ee853a09f --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/versions.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + azapi = { + source = "Azure/azapi" + } + } +} \ No newline at end of file From b9451878a6326b641fec732f248ef6f673dd7127 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 13:47:48 +0530 Subject: [PATCH 25/36] removed validation for kubernetes version --- modules/kubernetes_cluster/azure_aks/0.2/variables.tf | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index a8e828a3b..a85d7b1c9 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -46,11 +46,6 @@ variable "instance" { }) }) - validation { - condition = contains(["1.29", "1.30", "1.31", "1.32"], var.instance.spec.cluster.kubernetes_version) - error_message = "Kubernetes version must be a supported version (1.29, 1.30, 1.31, or 1.32)." - } - validation { condition = contains(["Free", "Standard", "Premium"], var.instance.spec.cluster.sku_tier) error_message = "SKU tier must be one of: Free, Standard, Premium." From 428502d0c723e986b54567dec9b2e2267574f9c3 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 14:35:32 +0530 Subject: [PATCH 26/36] updated kubernetes outputs --- .../azure_aks/0.2/facets.yaml | 4 +-- .../azure_aks/0.2/outputs.tf | 30 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index a2941ae45..6f900cbae 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -323,10 +323,10 @@ outputs: source: hashicorp/kubernetes version: 2.17.0 attributes: - host: attributes.auth_host + host: attributes.cluster_endpoint client_certificate: attributes.client_certificate client_key: attributes.client_key - cluster_ca_certificate: attributes.auth_cluster_ca_certificate + cluster_ca_certificate: attributes.cluster_ca_certificate helm: source: hashicorp/helm version: 2.8.0 diff --git a/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf index 443d29afe..06c25ddb5 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf @@ -1,20 +1,19 @@ locals { output_attributes = { - oidc_issuer_url = module.k8scluster.oidc_issuer_url - cluster_id = module.k8scluster.aks_id - cluster_name = module.k8scluster.aks_name - cluster_fqdn = module.k8scluster.cluster_fqdn - cluster_private_fqdn = module.k8scluster.cluster_private_fqdn - cluster_endpoint = module.k8scluster.host - cluster_location = module.k8scluster.location - cluster_sku_tier = var.instance.spec.cluster.sku_tier - cluster_kubernetes_version = var.instance.spec.cluster.kubernetes_version - node_resource_group = module.k8scluster.node_resource_group - resource_group_name = var.inputs.network_details.attributes.resource_group_name - auth_host = module.k8scluster.host - auth_cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate) - client_certificate = base64decode(module.k8scluster.client_certificate) - client_key = base64decode(module.k8scluster.client_key) + oidc_issuer_url = module.k8scluster.oidc_issuer_url + cluster_id = module.k8scluster.aks_id + cluster_name = module.k8scluster.aks_name + cluster_fqdn = module.k8scluster.cluster_fqdn + cluster_private_fqdn = module.k8scluster.cluster_private_fqdn + cluster_endpoint = module.k8scluster.host + cluster_location = module.k8scluster.location + kubernetes_version = var.instance.spec.cluster.kubernetes_version + node_resource_group = module.k8scluster.node_resource_group + resource_group_name = var.inputs.network_details.attributes.resource_group_name + cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate) + client_certificate = base64decode(module.k8scluster.client_certificate) + client_key = base64decode(module.k8scluster.client_key) + secrets = ["client_key", "client_certificate", "cluster_ca_certificate"] } output_interfaces = { kubernetes = { @@ -22,6 +21,7 @@ locals { client_key = base64decode(module.k8scluster.client_key) client_certificate = base64decode(module.k8scluster.client_certificate) cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate) + secrets = ["client_key", "client_certificate", "cluster_ca_certificate"] } } } \ No newline at end of file From 0274b87162f01323bc03a695f981ba9488537471 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 17:02:26 +0530 Subject: [PATCH 27/36] updated kubernetes outputs --- modules/kubernetes_cluster/azure_aks/0.2/facets.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 6f900cbae..2dc93a5a8 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -332,18 +332,18 @@ outputs: version: 2.8.0 attributes: kubernetes: - host: attributes.auth_host + host: attributes.cluster_endpoint client_certificate: attributes.client_certificate client_key: attributes.client_key - cluster_ca_certificate: attributes.auth_cluster_ca_certificate + cluster_ca_certificate: attributes.cluster_ca_certificate kubernetes-alpha: source: hashicorp/kubernetes-alpha version: 0.6.0 attributes: - host: attributes.auth_host + host: attributes.cluster_endpoint client_certificate: attributes.client_certificate client_key: attributes.client_key - cluster_ca_certificate: attributes.auth_cluster_ca_certificate + cluster_ca_certificate: attributes.cluster_ca_certificate sample: kind: kubernetes_cluster flavor: azure_aks_cluster From 3a3609e8f50968e9dc2bd162fb73f0eac8df627a Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 17:26:23 +0530 Subject: [PATCH 28/36] removing unsupported fields --- .../azure_aks/0.2/facets.yaml | 49 +------------------ .../kubernetes_cluster/azure_aks/0.2/main.tf | 13 ++++- .../azure_aks/0.2/variables.tf | 30 ++++++------ 3 files changed, 27 insertions(+), 65 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 2dc93a5a8..5dc46db50 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -15,7 +15,6 @@ spec: - cluster - auto_upgrade_settings - node_pools - - features - tags properties: cluster: @@ -39,13 +38,11 @@ spec: title: Cluster Endpoint Public Access description: Whether the AKS public API server endpoint is enabled. default: true - x-ui-overrides-only: true cluster_endpoint_private_access: type: boolean title: Cluster Endpoint Private Access description: Whether the AKS private API server endpoint is enabled. default: false - x-ui-overrides-only: true cluster_endpoint_public_access_cidrs: type: array title: Cluster Endpoint Public Access CIDRs @@ -53,14 +50,14 @@ spec: endpoint. default: - 0.0.0.0/0 - x-ui-overrides-only: true + x-ui-override-disable: true cluster_endpoint_private_access_cidrs: type: array title: Cluster Endpoint Private Access CIDRs description: List of CIDR blocks which can access the AKS private API server endpoint. default: [] - x-ui-overrides-only: true + x-ui-override-disable: true cluster_enabled_log_types: type: array title: Cluster Enabled Log Types @@ -75,15 +72,6 @@ spec: - authenticator - controllerManager - scheduler - default_reclaim_policy: - type: string - title: Default Reclaim Policy - description: Default reclaim policy for the AKS cluster. - default: Delete - x-ui-overrides-only: true - enum: - - Delete - - Retain sku_tier: type: string title: SKU Tier @@ -92,12 +80,6 @@ spec: enum: - Free - Standard - storage_account_last_access_time_enabled: - type: boolean - title: Storage Account Last Access Time Enabled - description: Enable last access time tracking for storage account. - default: true - x-ui-overrides-only: true required: - kubernetes_version auto_upgrade_settings: @@ -269,33 +251,6 @@ spec: field: spec.node_pools.system_np.enabled values: - true - features: - type: object - title: Features - description: Additional cluster features. - x-ui-toggle: false - properties: - enable_agic: - type: boolean - title: Enable AGIC - description: Enable Application Gateway Ingress Controller. - default: false - enable_overprovisioner: - type: boolean - title: Enable Overprovisioner - description: Enable cluster overprovisioner for better resource management. - default: true - overprovisioner_replicas: - type: integer - title: Overprovisioner Replicas - description: Number of overprovisioner replicas. - default: 1 - minimum: 0 - maximum: 10 - x-ui-visible-if: - field: spec.features.enable_overprovisioner - values: - - true required: - cluster inputs: diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index bec25e96c..b3d1816d4 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -41,8 +41,9 @@ module "k8scluster" { net_profile_dns_service_ip = "10.254.0.254" # Private cluster configuration - private_cluster_enabled = !var.instance.spec.cluster.cluster_endpoint_public_access - api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access ? var.instance.spec.cluster.cluster_endpoint_public_access_cidrs : null + private_cluster_enabled = var.instance.spec.cluster.cluster_endpoint_private_access + private_cluster_public_fqdn_enabled = var.instance.spec.cluster.cluster_endpoint_public_access + api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access ? var.instance.spec.cluster.cluster_endpoint_public_access_cidrs : null # Node pool configuration agents_count = var.instance.spec.node_pools.system_np.node_count @@ -98,6 +99,14 @@ module "k8scluster" { name = split("/", var.inputs.network_details.attributes.log_analytics_workspace_id)[8] } : null + # Enable AKS cluster logging + log_analytics_solution = var.inputs.network_details.attributes.log_analytics_workspace_id != null && length(var.instance.spec.cluster.cluster_enabled_log_types) > 0 ? { + enabled = true + id = var.inputs.network_details.attributes.log_analytics_workspace_id + log_analytics_workspace_id = var.inputs.network_details.attributes.log_analytics_workspace_id + log_retention_in_days = 30 + } : null + # Auto-scaler profile configuration auto_scaler_profile_enabled = var.instance.spec.node_pools.system_np.enable_auto_scaling auto_scaler_profile_balance_similar_node_groups = false diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index a85d7b1c9..17e13857a 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -6,15 +6,13 @@ variable "instance" { version = string spec = object({ cluster = object({ - kubernetes_version = string - cluster_endpoint_public_access = optional(bool, true) - cluster_endpoint_private_access = optional(bool, false) - cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"]) - cluster_endpoint_private_access_cidrs = optional(list(string), []) - cluster_enabled_log_types = optional(list(string), []) - default_reclaim_policy = optional(string, "Delete") - sku_tier = optional(string, "Free") - storage_account_last_access_time_enabled = optional(bool, true) + kubernetes_version = string + cluster_endpoint_public_access = optional(bool, true) + cluster_endpoint_private_access = optional(bool, false) + cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"]) + cluster_endpoint_private_access_cidrs = optional(list(string), []) + cluster_enabled_log_types = optional(list(string), []) + sku_tier = optional(string, "Free") }) auto_upgrade_settings = object({ enable_auto_upgrade = optional(bool, true) @@ -37,18 +35,18 @@ variable "instance" { enable_auto_scaling = optional(bool, false) }) }) - features = optional(object({ - enable_agic = optional(bool, false) - enable_overprovisioner = optional(bool, true) - overprovisioner_replicas = optional(number, 1) - }), {}) tags = optional(map(string), {}) }) }) validation { - condition = contains(["Free", "Standard", "Premium"], var.instance.spec.cluster.sku_tier) - error_message = "SKU tier must be one of: Free, Standard, Premium." + condition = contains(["1.29", "1.30", "1.31", "1.32"], var.instance.spec.cluster.kubernetes_version) + error_message = "Kubernetes version must be a supported version (1.29, 1.30, 1.31, or 1.32)." + } + + validation { + condition = contains(["Free", "Standard"], var.instance.spec.cluster.sku_tier) + error_message = "SKU tier must be one of: Free, Standard." } validation { From a4ad34c7eded22165b2df078c9bd4ae3be996959 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 17:42:07 +0530 Subject: [PATCH 29/36] commited module locally --- .../azure_aks/0.2/.terraform.lock.hcl | 102 ++ .../azure_aks/0.2/facets.yaml | 8 + .../0.2/k8scluster/.checkov_config.yaml | 30 + .../0.2/k8scluster/CODE_OF_CONDUCT.md | 5 + .../azure_aks/0.2/k8scluster/GNUmakefile | 4 + .../azure_aks/0.2/k8scluster/LICENSE | 21 + .../azure_aks/0.2/k8scluster/README.md | 490 +++++ .../azure_aks/0.2/k8scluster/SECURITY.md | 41 + .../0.2/k8scluster/extra_node_pool.tf | 317 ++++ .../k8scluster/extra_node_pool_override.tf | 17 + .../azure_aks/0.2/k8scluster/locals.tf | 74 + .../azure_aks/0.2/k8scluster/log_analytics.tf | 124 ++ .../azure_aks/0.2/k8scluster/main.tf | 741 ++++++++ .../azure_aks/0.2/k8scluster/main_override.tf | 6 + .../azure_aks/0.2/k8scluster/outputs.tf | 231 +++ .../0.2/k8scluster/role_assignments.tf | 126 ++ .../azure_aks/0.2/k8scluster/tfvmmakefile | 85 + .../0.2/k8scluster/v4/extra_node_pool.tf | 1 + .../k8scluster/v4/extra_node_pool_override.tf | 15 + .../azure_aks/0.2/k8scluster/v4/locals.tf | 1 + .../0.2/k8scluster/v4/log_analytics.tf | 1 + .../azure_aks/0.2/k8scluster/v4/main.tf | 1 + .../0.2/k8scluster/v4/main_override.tf | 307 ++++ .../azure_aks/0.2/k8scluster/v4/outputs.tf | 1 + .../0.2/k8scluster/v4/role_assignments.tf | 1 + .../0.2/k8scluster/v4/v4_variables.tf | 11 + .../azure_aks/0.2/k8scluster/v4/variables.tf | 1 + .../0.2/k8scluster/v4/variables_override.tf | 231 +++ .../azure_aks/0.2/k8scluster/v4/versions.tf | 1 + .../0.2/k8scluster/v4/versions_override.tf | 20 + .../azure_aks/0.2/k8scluster/variables.tf | 1601 +++++++++++++++++ .../azure_aks/0.2/k8scluster/versions.tf | 26 + .../kubernetes_cluster/azure_aks/0.2/main.tf | 3 +- .../azure_aks/0.2/variables.tf | 5 - 34 files changed, 4642 insertions(+), 7 deletions(-) create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf create mode 120000 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf create mode 100644 modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf diff --git a/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl b/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl new file mode 100644 index 000000000..8f6a2d044 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl @@ -0,0 +1,102 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/azure/azapi" { + version = "2.5.0" + constraints = ">= 2.0.0, < 3.0.0" + hashes = [ + "h1:/jBZRd/dvuFm1CxS+WKHuXm4H++fkUHAvoZdlm4oScw=", + "zh:24a2ebfec4a79b7ef122e07afe6ddda51ce91b3dbed7007757b9c53511976396", + "zh:2a26868f8243666b6d0cd29ea5d1b1ecfedb1bbccc3801d383ab7a3f7930aa69", + "zh:4c3ced3ce1f937dc5bcea61f638eee9134570af6cbe7e4db7c60d9bb517da689", + "zh:52acef0e6d57ed6e98a4ae587449071c91069d59387912f69ec008dfb48fd3bd", + "zh:658bc2e92374ca941a79df0e343599a7e8819a0ff5a5f47957acbf4ee524c751", + "zh:68e66d8b5ceb581a1919782492b7caf57334ea07a982ee4c440d92bb5af2b0b8", + "zh:94779341444106af22b5ca823e371c97480b17d1765aee236c81b4b4e96bbaa4", + "zh:9bbddd9312a8e0819c2262d640a6852be64414347f068ffe4513c09b36b7cfe7", + "zh:9d0319cf08c0aebfc20f88fd63aec39361d7b7044bf47296d97543daa7727344", + "zh:b201c491da70270299e1cfc40e26b01290dbe9ee8e36e12fa8a6b63393092803", + "zh:d0593258943e546c8c241b12232ab1e39e1741aebd7a02e4abfe910424a1d36b", + "zh:d489a31c3d1ee18111a6238484d1636a483024fa43c19468394ec8ec214ef503", + ] +} + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "4.38.1" + constraints = ">= 4.16.0, < 5.0.0" + hashes = [ + "h1:MV45L+OKw7+UrxI9SczAllqrAZjQomKKJbhSwIlkmYA=", + "zh:2d4085678cad85782b0097d2f1d03d96862deb3684b14a1125bd46b36091fd30", + "zh:3fe8037a4e94bc694caca4a68c0e15436dedc91b70aa95a06e2770e3e8dde6df", + "zh:4178b3783fca42ebac4435db3531bd23069723caf1509b0e915c524a4dee25d3", + "zh:61c6d21e854696c1c6d3fadce9aa9ab433e9a8791340760f9e8fdd1327d1a391", + "zh:8ef26b97aed168b7b91b868c1e4493a79cdbdc4ecb987f0e2a4e402ab6cb2474", + "zh:b4b1edfb49a36a109c69d661bb26b961fcdf50058690deed9d906c09254e5c1a", + "zh:b5e07b1c160cf0cefc2e4bc8d2b4c0a382dd76513797dc70b0c2fd3bee7b8495", + "zh:b87029f89e7d445c85ee7a8940f4a2740a745124802c461d1e51cd8b11d7c106", + "zh:c21d488f12aa6750f4525fc120b1405dd1a37f0b59586960e78beeb0e4fffcca", + "zh:cd1402d0e004e23c2ee36744fa26d4daafa291a05d5410b7beca6dc8c30857ba", + "zh:e8a7eb3d937d27e779ae426ac9f4529bdc7053634f219df8c76b2b8180fbed71", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/null" { + version = "3.2.4" + constraints = ">= 3.0.0" + hashes = [ + "h1:L5V05xwp/Gto1leRryuesxjMfgZwjb7oool4WS1UEFQ=", + "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", + "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", + "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", + "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", + "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", + "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", + "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", + "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", + "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", + "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.13.1" + constraints = ">= 0.5.0" + hashes = [ + "h1:ZT5ppCNIModqk3iOkVt5my8b8yBHmDpl663JtXAIRqM=", + "zh:02cb9aab1002f0f2a94a4f85acec8893297dc75915f7404c165983f720a54b74", + "zh:04429b2b31a492d19e5ecf999b116d396dac0b24bba0d0fb19ecaefe193fdb8f", + "zh:26f8e51bb7c275c404ba6028c1b530312066009194db721a8427a7bc5cdbc83a", + "zh:772ff8dbdbef968651ab3ae76d04afd355c32f8a868d03244db3f8496e462690", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:898db5d2b6bd6ca5457dccb52eedbc7c5b1a71e4a4658381bcbb38cedbbda328", + "zh:8de913bf09a3fa7bedc29fec18c47c571d0c7a3d0644322c46f3aa648cf30cd8", + "zh:9402102c86a87bdfe7e501ffbb9c685c32bbcefcfcf897fd7d53df414c36877b", + "zh:b18b9bb1726bb8cfbefc0a29cf3657c82578001f514bcf4c079839b6776c47f0", + "zh:b9d31fdc4faecb909d7c5ce41d2479dd0536862a963df434be4b16e8e4edc94d", + "zh:c951e9f39cca3446c060bd63933ebb89cedde9523904813973fbc3d11863ba75", + "zh:e5b773c0d07e962291be0e9b413c7a22c044b8c7b58c76e8aa91d1659990dfb5", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "4.1.0" + constraints = ">= 3.1.0" + hashes = [ + "h1:zEv9tY1KR5vaLSyp2lkrucNJ+Vq3c+sTFK9GyQGLtFs=", + "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", + "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", + "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", + "zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc", + "zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac", + "zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882", + "zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d", + "zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298", + "zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297", + "zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54", + ] +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 5dc46db50..44f1fa968 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -51,6 +51,10 @@ spec: default: - 0.0.0.0/0 x-ui-override-disable: true + x-ui-visible-if: + field: spec.cluster.cluster_endpoint_public_access + values: + - true cluster_endpoint_private_access_cidrs: type: array title: Cluster Endpoint Private Access CIDRs @@ -58,6 +62,10 @@ spec: endpoint. default: [] x-ui-override-disable: true + x-ui-visible-if: + field: spec.cluster.cluster_endpoint_private_access + values: + - true cluster_enabled_log_types: type: array title: Cluster Enabled Log Types diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml new file mode 100644 index 000000000..b39c33402 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/.checkov_config.yaml @@ -0,0 +1,30 @@ +block-list-secret-scan: [] +branch: master +directory: + - ./ +download-external-modules: false +evaluate-variables: true +external-modules-download-path: .external_modules +framework: + - all +quiet: true +secrets-scan-file-type: [] +skip-check: + - CKV_GHA_3 + - CKV_AZURE_5 + - CKV_AZURE_6 + - CKV_AZURE_112 + - CKV_AZURE_115 + - CKV_AZURE_116 + - CKV_AZURE_168 + - CKV_AZURE_170 + - CKV_AZURE_139 + - CKV_AZURE_165 + - CKV_AZURE_166 + - CKV_AZURE_164 +skip-framework: + - dockerfile + - kubernetes +skip-path: + - test/vendor +summary-position: top diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..af8b0207d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +This code of conduct outlines expectations for participation in Microsoft-managed open source communities, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. + +Please read the full text at [https://opensource.microsoft.com/codeofconduct/](https://opensource.microsoft.com/codeofconduct/) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile new file mode 100644 index 000000000..3db7ccd9d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/GNUmakefile @@ -0,0 +1,4 @@ +SHELL := /bin/bash + +$(shell curl -H 'Cache-Control: no-cache, no-store' -sSL "https://raw.githubusercontent.com/Azure/tfmod-scaffold/refs/heads/main/GNUmakefile" -o tfvmmakefile) +-include tfvmmakefile \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE new file mode 100644 index 000000000..21071075c --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md new file mode 100644 index 000000000..e754e5a7f --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/README.md @@ -0,0 +1,490 @@ +# terraform-azurerm-aks + +## Deploys a Kubernetes cluster (AKS) on Azure with monitoring support through Azure Log Analytics + +This Terraform module deploys a Kubernetes cluster on Azure using AKS (Azure Kubernetes Service) and adds support for monitoring with Log Analytics. + +-> **NOTE:** If you have not assigned `client_id` or `client_secret`, A `SystemAssigned` identity will be created. + +-> **NOTE:** If you're using AzureRM `v4`, you can use this module by setting `source` to `Azure/aks/azurerm//v4`. + +## Notice on breaking changes + +Please be aware that major version(e.g., from 6.8.0 to 7.0.0) update contains breaking changes that may impact your infrastructure. It is crucial to review these changes with caution before proceeding with the upgrade. + +In most cases, you will need to adjust your Terraform code to accommodate the changes introduced in the new major version. We strongly recommend reviewing the changelog and migration guide to understand the modifications and ensure a smooth transition. + +To help you in this process, we have provided detailed documentation on the breaking changes, new features, and any deprecated functionalities. Please take the time to read through these resources to avoid any potential issues or disruptions to your infrastructure. + +* [Notice on Upgrade to v10.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov10.0.md) +* [Notice on Upgrade to v9.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov9.0.md) +* [Notice on Upgrade to v8.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov8.0.md) +* [Notice on Upgrade to v7.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov7.0.md) +* [Notice on Upgrade to v6.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov6.0.md) +* [Notice on Upgrade to v5.x](https://github.com/Azure/terraform-azurerm-aks/blob/main/NoticeOnUpgradeTov5.0.md) + +Remember, upgrading to a major version with breaking changes should be done carefully and thoroughly tested in your environment. If you have any questions or concerns, please don't hesitate to reach out to our support team for assistance. + +## Usage in Terraform 1.2.0 + +Please view folders in `examples`. + +The module supports some outputs that may be used to configure a kubernetes +provider after deploying an AKS cluster. + +```hcl +provider "kubernetes" { + host = module.aks.host + client_certificate = base64decode(module.aks.client_certificate) + client_key = base64decode(module.aks.client_key) + cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate) +} +``` + +There're some examples in the examples folder. You can execute `terraform apply` command in `examples`'s sub folder to try the module. These examples are tested against every PR with the [E2E Test](#Pre-Commit--Pr-Check--Test). + +## Enable or disable tracing tags + +We're using [BridgeCrew Yor](https://github.com/bridgecrewio/yor) and [yorbox](https://github.com/lonegunmanb/yorbox) to help manage tags consistently across infrastructure as code (IaC) frameworks. In this module you might see tags like: + +```hcl +resource "azurerm_resource_group" "rg" { + location = "eastus" + name = random_pet.name + tags = merge(var.tags, (/**/ (var.tracing_tags_enabled ? { for k, v in /**/ { + avm_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" + avm_git_file = "main.tf" + avm_git_last_modified_at = "2023-05-05 08:57:54" + avm_git_org = "lonegunmanb" + avm_git_repo = "terraform-yor-tag-test-module" + avm_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" + } /**/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /**/)) +} +``` + +To enable tracing tags, set the variable to true: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_enabled = true +} +``` + +The `tracing_tags_enabled` is default to `false`. + +To customize the prefix for your tracing tags, set the `tracing_tags_prefix` variable value in your Terraform configuration: + +```hcl +module "example" { +source = "{module_source}" +... +tracing_tags_prefix = "custom_prefix_" +} +``` + +The actual applied tags would be: + +```text +{ +custom_prefix_git_commit = "3077cc6d0b70e29b6e106b3ab98cee6740c916f6" +custom_prefix_git_file = "main.tf" +custom_prefix_git_last_modified_at = "2023-05-05 08:57:54" +custom_prefix_git_org = "lonegunmanb" +custom_prefix_git_repo = "terraform-yor-tag-test-module" +custom_prefix_yor_trace = "a0425718-c57d-401c-a7d5-f3d88b2551a4" +} +``` + +## Pre-Commit & Pr-Check & Test + +### Configurations + +- [Configure Terraform for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/terraform-install-configure) + +We assumed that you have setup service principal's credentials in your environment variables like below: + +```shell +export ARM_SUBSCRIPTION_ID="" +export ARM_TENANT_ID="" +export ARM_CLIENT_ID="" +export ARM_CLIENT_SECRET="" +``` + +On Windows Powershell: + +```shell +$env:ARM_SUBSCRIPTION_ID="" +$env:ARM_TENANT_ID="" +$env:ARM_CLIENT_ID="" +$env:ARM_CLIENT_SECRET="" +``` + +We provide a docker image to run the pre-commit checks and tests for you: `mcr.microsoft.com/azterraform:latest` + +To run the pre-commit task, we can run the following command: + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pre-commit +``` + +In pre-commit task, we will: + +1. Run `terraform fmt -recursive` command for your Terraform code. +2. Run `terrafmt fmt -f` command for markdown files and go code files to ensure that the Terraform code embedded in these files are well formatted. +3. Run `go mod tidy` and `go mod vendor` for test folder to ensure that all the dependencies have been synced. +4. Run `gofmt` for all go code files. +5. Run `gofumpt` for all go code files. +6. Run `terraform-docs` on `README.md` file, then run `markdown-table-formatter` to format markdown tables in `README.md`. + +Then we can run the pr-check task to check whether our code meets our pipeline's requirement(We strongly recommend you run the following command before you commit): + +```shell +$ docker run --rm -v $(pwd):/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +On Windows Powershell: + +```shell +$ docker run --rm -v ${pwd}:/src -w /src mcr.microsoft.com/azterraform:latest make pr-check +``` + +To run the e2e-test, we can run the following command: + +```text +docker run --rm -v $(pwd):/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: + +```text +docker run --rm -v ${pwd}:/src -w /src -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +To follow [**Ensure AKS uses disk encryption set**](https://docs.bridgecrew.io/docs/ensure-that-aks-uses-disk-encryption-set) policy we've used `azurerm_key_vault` in example codes, and to follow [**Key vault does not allow firewall rules settings**](https://docs.bridgecrew.io/docs/ensure-that-key-vault-allows-firewall-rules-settings) we've limited the ip cidr on it's `network_acls`. By default we'll use the ip returned by `https://api.ipify.org?format=json` api as your public ip, but in case you need to use another cidr, you can set an environment variable like below: + +```text +docker run --rm -v $(pwd):/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +On Windows Powershell: +```text +docker run --rm -v ${pwd}:/src -w /src -e TF_VAR_key_vault_firewall_bypass_ip_cidr="" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test +``` + +#### Prerequisites + +- [Docker](https://www.docker.com/community-edition#/download) + +## Authors + +Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d) + +## License + +[MIT](LICENSE) + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Module Spec + +The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!** + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3 | +| [azapi](#requirement\_azapi) | >=2.0, < 3.0 | +| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#requirement\_null) | >= 3.0 | +| [time](#requirement\_time) | >= 0.5 | +| [tls](#requirement\_tls) | >= 3.1 | + +## Providers + +| Name | Version | +|------|---------| +| [azapi](#provider\_azapi) | >=2.0, < 3.0 | +| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 | +| [null](#provider\_null) | >= 3.0 | +| [time](#provider\_time) | >= 0.5 | +| [tls](#provider\_tls) | >= 3.1 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource | +| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource | +| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource | +| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource | +| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource | +| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | +| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source | +| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source | +| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source | +| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source | +| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no | +| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no | +| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no | +| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no | +| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no | +| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no | +| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no | +| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no | +| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) |
list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
}))
| `[]` | no | +| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) |
list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
| `[]` | no | +| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no | +| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no | +| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no | +| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no | +| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no | +| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no | +| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no | +| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no | +| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no | +| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no | +| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no | +| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no | +| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no | +| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no | +| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no | +| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no | +| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no | +| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no | +| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no | +| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no | +| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no | +| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no | +| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no | +| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**

By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no | +| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no | +| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. |
object({
id = string
subnet_id = string
})
| `null` | no | +| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no | +| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no | +| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no | +| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no | +| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. |
object({
sgx_quote_helper_enabled = bool
})
| `null` | no | +| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no | +| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no | +| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no | +| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no | +| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 |
object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
})
|
{
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
}
| no | +| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no | +| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no | +| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no | +| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no | +| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no | +| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no | +| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. |
object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
})
| `null` | no | +| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. |
object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
})
| `null` | no | +| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no | +| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no | +| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no | +| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no | +| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no | +| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no | +| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no | +| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no | +| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no | +| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. |
object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
})
| `null` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no | +| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no | +| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no | +| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no | +| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no | +| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no | +| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no | +| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes | +| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. |
object({
id = string
})
| `null` | no | +| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. |
object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
})
| `null` | no | +| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no | +| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no | +| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no | +| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no | +| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. |
object({
identity_ids = optional(set(string))
type = string
})
| `null` | no | +| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no | +| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no | +| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no | +| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no | +| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no | +| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. |
object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
})
| `null` | no | +| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.

---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. |
object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
})
| `null` | no | +| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` |
[
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no | +| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` |
[
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no | +| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` |
[
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no | +| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) |
object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
})
| `null` | no | +| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no | +| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. |
object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
})
| `null` | no | +| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no | +| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no | +| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no | +| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no | +| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no | +| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no | +| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no | +| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no | +| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. |
object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
})
| `null` | no | +| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no | +| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) |
map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
}))
| `{}` | no | +| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no | +| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no | +| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no | +| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no | +| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no | +| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no | +| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no | +| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no | +| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no | +| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no | +| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no | +| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no | +| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no | +| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no | +| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no | +| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no | +| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no | +| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes | +| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no | +| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no | +| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no | +| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no | +| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no | +| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. |
object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
})
| `null` | no | +| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no | +| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no | +| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no | +| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no | +| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no | +| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no | +| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no | +| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no | +| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no | +| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no | +| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no | +| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) |
object({
id = string
})
| `null` | no | +| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) |
object({
dns_zone_ids = list(string)
})
| `null` | no | +| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. |
object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
})
| `null` | no | +| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. | +| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? | +| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. | +| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. | +| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. | +| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. | +| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) | +| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace | +| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace | +| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. | +| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. | +| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. | +| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. | +| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. | +| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. | +| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. | +| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). | +| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. | +| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. | +| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. | +| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? | +| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. | +| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? | +| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. | +| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. | +| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. | +| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. | +| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block | +| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. | +| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. | +| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. | +| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. | +| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? | +| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | +| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. | +| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. | +| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. | + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md new file mode 100644 index 000000000..869fdfe2b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf new file mode 100644 index 000000000..7f368600b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf @@ -0,0 +1,317 @@ +moved { + from = azurerm_kubernetes_cluster_node_pool.node_pool + to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + for_each = local.node_pools_create_before_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = "${each.value.name}${substr(md5(uuid()), 0, 4)}" + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + gpu_instance = each.value.gpu_instance + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + application_security_group_ids = each.value.node_network_profile.application_security_group_ids + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + create_before_destroy = true + ignore_changes = [ + name + ] + replace_triggered_by = [ + null_resource.pool_name_keeper[each.key], + ] + + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + for_each = local.node_pools_create_after_destroy + + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + name = each.value.name + capacity_reservation_group_id = each.value.capacity_reservation_group_id + eviction_policy = each.value.eviction_policy + fips_enabled = each.value.fips_enabled + host_group_id = each.value.host_group_id + kubelet_disk_type = each.value.kubelet_disk_type + max_count = each.value.max_count + max_pods = each.value.max_pods + min_count = each.value.min_count + mode = each.value.mode + node_count = each.value.node_count + node_labels = each.value.node_labels + node_public_ip_prefix_id = each.value.node_public_ip_prefix_id + node_taints = each.value.node_taints + orchestrator_version = each.value.orchestrator_version + os_disk_size_gb = each.value.os_disk_size_gb + os_disk_type = each.value.os_disk_type + os_sku = each.value.os_sku + os_type = each.value.os_type + pod_subnet_id = try(each.value.pod_subnet.id, null) + priority = each.value.priority + proximity_placement_group_id = each.value.proximity_placement_group_id + scale_down_mode = each.value.scale_down_mode + snapshot_id = each.value.snapshot_id + spot_max_price = each.value.spot_max_price + tags = each.value.tags + ultra_ssd_enabled = each.value.ultra_ssd_enabled + vm_size = each.value.vm_size + vnet_subnet_id = try(each.value.vnet_subnet.id, null) + workload_runtime = each.value.workload_runtime + zones = each.value.zones + + dynamic "kubelet_config" { + for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"] + + content { + allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls + container_log_max_line = each.value.kubelet_config.container_log_max_files + container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb + cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled + cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period + cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy + image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold + image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold + pod_max_pid = each.value.kubelet_config.pod_max_pid + topology_manager_policy = each.value.kubelet_config.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"] + + content { + swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb + transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag + transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"] + + content { + fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr + fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max + fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches + fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open + kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max + net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog + net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max + net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default + net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max + net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn + net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default + net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max + net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max + vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count + vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness + vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"] + + content { + node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags + } + } + dynamic "upgrade_settings" { + for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"] + + content { + max_surge = each.value.upgrade_settings.max_surge + drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes + node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes + } + } + dynamic "windows_profile" { + for_each = each.value.windows_profile == null ? [] : ["windows_profile"] + + content { + outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + precondition { + condition = can(regex("[a-z0-9]{1,8}", each.value.name)) + error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)" + } + precondition { + condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size)) + error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. " + } + precondition { + condition = var.agents_type == "VirtualMachineScaleSets" + error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets." + } + } +} + +resource "null_resource" "pool_name_keeper" { + for_each = var.node_pools + + triggers = { + pool_name = each.value.name + } + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids) + error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself." + } + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf new file mode 100644 index 000000000..500f27ece --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf @@ -0,0 +1,17 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + custom_ca_trust_enabled = each.value.custom_ca_trust_enabled + enable_auto_scaling = each.value.enable_auto_scaling + enable_host_encryption = each.value.enable_host_encryption + enable_node_public_ip = each.value.enable_node_public_ip + message_of_the_day = each.value.message_of_the_day +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf new file mode 100644 index 000000000..2b69dfe13 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf @@ -0,0 +1,74 @@ +locals { + # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval. + auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete + # automatic upgrades are either: + # - null + # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null + # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null + automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : ( + (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) || + (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null) + ) + cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks") + # Abstract the decision whether to create an Analytics Workspace or not. + create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null + create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null + default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), []) + # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1 + existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null) + existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4] + existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id) + existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null) + # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1 + existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), []) + existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null) + existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null) + existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null) + ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress + # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null. + # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled + # is set to `true`. + log_analytics_workspace = var.log_analytics_workspace_enabled ? ( + # The Log Analytics Workspace should be enabled: + var.log_analytics_workspace == null ? { + # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied. + # Create an `azurerm_log_analytics_workspace` resource and use that. + id = local.azurerm_log_analytics_workspace_id + name = local.azurerm_log_analytics_workspace_name + location = local.azurerm_log_analytics_workspace_location + resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name + } : { + # `log_analytics_workspace` is supplied. Let's use that. + id = var.log_analytics_workspace.id + name = var.log_analytics_workspace.name + location = var.log_analytics_workspace.location + # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1 + resource_group_name = split("/", var.log_analytics_workspace.id)[4] + } + ) : null # Finally, the Log Analytics Workspace should be disabled. + node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true } + node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true } + private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null) + query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false) + subnet_ids = [for _, s in local.subnets : s.id] + subnets = merge({ for k, v in merge( + [ + for key, pool in var.node_pools : { + "${key}-vnet-subnet" : pool.vnet_subnet, + "${key}-pod-subnet" : pool.pod_subnet, + } + ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : { + "vnet-subnet" : { + id = var.vnet_subnet.id + } + }) + # subnet_ids = for id in local.potential_subnet_ids : id if id != null + use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null + use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null + valid_private_dns_zone_regexs = [ + "private\\.[a-z0-9]+\\.azmk8s\\.io", + "privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io", + "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io", + ] +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf new file mode 100644 index 000000000..fe51625be --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf @@ -0,0 +1,124 @@ +resource "azurerm_log_analytics_workspace" "main" { + count = local.create_analytics_workspace ? 1 : 0 + + location = var.location + name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace") + resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name) + allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions + cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced + daily_quota_gb = var.log_analytics_workspace_daily_quota_gb + data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id + immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled + internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled + internet_query_enabled = var.log_analytics_workspace_internet_query_enabled + local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled + reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day + retention_in_days = var.log_retention_in_days + sku = var.log_analytics_workspace_sku + tags = var.tags + + dynamic "identity" { + for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity] + + content { + type = identity.value.type + identity_ids = identity.value.identity_ids + } + } + + lifecycle { + precondition { + condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix)) + error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`." + } + } +} + +locals { + azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null) + azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null) + azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null) + azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null) +} + +data "azurerm_log_analytics_workspace" "main" { + count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0 + + name = var.log_analytics_workspace.name + resource_group_name = local.log_analytics_workspace.resource_group_name +} + +resource "azurerm_log_analytics_solution" "main" { + count = local.create_analytics_solution ? 1 : 0 + + location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null)) + resource_group_name = local.log_analytics_workspace.resource_group_name + solution_name = "ContainerInsights" + workspace_name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + tags = var.tags + + plan { + product = "OMSGallery/ContainerInsights" + publisher = "Microsoft" + } +} + +locals { + dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null) +} + +resource "azurerm_monitor_data_collection_rule" "dcr" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + location = local.dcr_location + name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}" + resource_group_name = var.resource_group_name + description = "DCR for Azure Monitor Container Insights" + tags = var.tags + + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = var.monitor_data_collection_rule_extensions_streams + } + data_flow { + destinations = [local.log_analytics_workspace.name] + streams = ["Microsoft-Syslog"] + } + destinations { + log_analytics { + name = local.log_analytics_workspace.name + workspace_resource_id = local.log_analytics_workspace.id + } + } + data_sources { + extension { + extension_name = "ContainerInsights" + name = "ContainerInsightsExtension" + streams = var.monitor_data_collection_rule_extensions_streams + extension_json = jsonencode({ + "dataCollectionSettings" : { + interval = var.data_collection_settings.data_collection_interval + namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection + namespaces = var.data_collection_settings.namespaces_for_data_collection + enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled + } + }) + } + syslog { + facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities + log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels + name = "sysLogsDataSource" + streams = ["Microsoft-Syslog"] + } + } +} + +resource "azurerm_monitor_data_collection_rule_association" "dcra" { + count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0 + + target_resource_id = azurerm_kubernetes_cluster.main.id + data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id + description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster." + name = "ContainerInsightsExtension" +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf new file mode 100644 index 000000000..0a8dc8e59 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf @@ -0,0 +1,741 @@ +moved { + from = module.ssh-key.tls_private_key.ssh + to = tls_private_key.ssh[0] +} + +resource "tls_private_key" "ssh" { + count = var.admin_username == null ? 0 : 1 + + algorithm = "RSA" + rsa_bits = 2048 +} + +resource "azurerm_kubernetes_cluster" "main" { + location = var.location + name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}" + resource_group_name = var.resource_group_name + azure_policy_enabled = var.azure_policy_enabled + cost_analysis_enabled = var.cost_analysis_enabled + disk_encryption_set_id = var.disk_encryption_set_id + dns_prefix = var.prefix + dns_prefix_private_cluster = var.dns_prefix_private_cluster + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + kubernetes_version = var.kubernetes_version + local_account_disabled = var.local_account_disabled + node_resource_group = var.node_resource_group + oidc_issuer_enabled = var.oidc_issuer_enabled + open_service_mesh_enabled = var.open_service_mesh_enabled + private_cluster_enabled = var.private_cluster_enabled + private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled + private_dns_zone_id = var.private_dns_zone_id + role_based_access_control_enabled = var.role_based_access_control_enabled + run_command_enabled = var.run_command_enabled + sku_tier = var.sku_tier + support_plan = var.support_plan + tags = var.tags + workload_identity_enabled = var.workload_identity_enabled + + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "node_network_profile" { + for_each = var.node_network_profile == null ? [] : [var.node_network_profile] + + content { + application_security_group_ids = node_network_profile.value.application_security_group_ids + node_public_ip_tags = node_network_profile.value.node_public_ip_tags + + dynamic "allowed_host_ports" { + for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports + + content { + port_end = allowed_host_ports.value.port_end + port_start = allowed_host_ports.value.port_start + protocol = allowed_host_ports.value.protocol + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + enable_auto_scaling = var.enable_auto_scaling + enable_host_encryption = var.enable_host_encryption + enable_node_public_ip = var.enable_node_public_ip + fips_enabled = var.default_node_pool_fips_enabled + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vm_size = var.agents_size + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] + + content { + subnet_name = var.aci_connector_linux_subnet_name + } + } + dynamic "api_server_access_profile" { + for_each = var.api_server_authorized_ip_ranges != null ? [ + "api_server_access_profile" + ] : [] + + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : [] + + content { + balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups + empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max + expander = var.auto_scaler_profile_expander + max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec + max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time + max_unready_nodes = var.auto_scaler_profile_max_unready_nodes + max_unready_percentage = var.auto_scaler_profile_max_unready_percentage + new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay + scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add + scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete + scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure + scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded + scale_down_unready = var.auto_scaler_profile_scale_down_unready + scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold + scan_interval = var.auto_scaler_profile_scan_interval + skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage + skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : [] + + content { + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled + managed = true + tenant_id = var.rbac_aad_tenant_id + } + } + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + dynamic "http_proxy_config" { + for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"] + + content { + http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy) + https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy) + no_proxy = var.http_proxy_config.no_proxy + trusted_ca = var.http_proxy_config.trusted_ca + } + } + dynamic "identity" { + for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : [] + + content { + type = var.identity_type + identity_ids = var.identity_ids + } + } + dynamic "ingress_application_gateway" { + for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : [] + + content { + gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null) + gateway_name = try(var.green_field_application_gateway_for_ingress.name, null) + subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null) + subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null) + } + } + dynamic "key_management_service" { + for_each = var.kms_enabled ? ["key_management_service"] : [] + + content { + key_vault_key_id = var.kms_key_vault_key_id + key_vault_network_access = var.kms_key_vault_network_access + } + } + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] + + content { + secret_rotation_enabled = var.secret_rotation_enabled + secret_rotation_interval = var.secret_rotation_interval + } + } + dynamic "kubelet_identity" { + for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] + + content { + client_id = kubelet_identity.value.client_id + object_id = kubelet_identity.value.object_id + user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id + } + } + dynamic "linux_profile" { + for_each = var.admin_username == null ? [] : ["linux_profile"] + + content { + admin_username = var.admin_username + + ssh_key { + key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "") + } + } + } + dynamic "maintenance_window" { + for_each = var.maintenance_window != null ? ["maintenance_window"] : [] + + content { + dynamic "allowed" { + for_each = var.maintenance_window.allowed + + content { + day = allowed.value.day + hours = allowed.value.hours + } + } + dynamic "not_allowed" { + for_each = var.maintenance_window.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + + content { + duration = maintenance_window_auto_upgrade.value.duration + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + start_date = maintenance_window_auto_upgrade.value.start_date + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + week_index = maintenance_window_auto_upgrade.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed + + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + dynamic "microsoft_defender" { + for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + } + } + dynamic "monitor_metrics" { + for_each = var.monitor_metrics != null ? ["monitor_metrics"] : [] + + content { + annotations_allowed = var.monitor_metrics.annotations_allowed + labels_allowed = var.monitor_metrics.labels_allowed + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + ebpf_data_plane = var.ebpf_data_plane + ip_versions = var.network_ip_versions + load_balancer_sku = var.load_balancer_sku + network_data_plane = var.network_data_plane + network_mode = var.network_mode + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + pod_cidrs = var.net_profile_pod_cidrs + service_cidr = var.net_profile_service_cidr + service_cidrs = var.net_profile_service_cidrs + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + dynamic "nat_gateway_profile" { + for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile] + + content { + idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes + managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count + } + } + } + dynamic "oms_agent" { + for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : [] + + content { + log_analytics_workspace_id = local.log_analytics_workspace.id + msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled + } + } + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + + content { + mode = var.service_mesh_profile.mode + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + } + } + dynamic "service_principal" { + for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] + + content { + client_id = var.client_id + client_secret = var.client_secret + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile_blob_driver_enabled + disk_driver_enabled = var.storage_profile_disk_driver_enabled + disk_driver_version = var.storage_profile_disk_driver_version + file_driver_enabled = var.storage_profile_file_driver_enabled + snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled + } + } + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_ids + } + } + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] + + content { + keda_enabled = workload_autoscaler_profile.value.keda_enabled + vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled + } + } + + depends_on = [ + null_resource.pool_name_keeper, + ] + + lifecycle { + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + ] + replace_triggered_by = [ + null_resource.kubernetes_cluster_name_keeper.id + ] + + precondition { + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "") + error_message = "Either `client_id` and `client_secret` or `identity_type` must be set." + } + precondition { + # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128 + condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0) + error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well." + } + precondition { + condition = var.identity_ids == null || var.client_id == "" + error_message = "Cannot set both `client_id` and `identity_ids`." + } + precondition { + condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium") + error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled." + } + precondition { + condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled) + error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true." + } + precondition { + condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard") + error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`" + } + precondition { + condition = local.automatic_channel_upgrade_check + error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`." + } + precondition { + condition = !(var.kms_enabled && var.identity_type != "UserAssigned") + error_message = "KMS etcd encryption doesn't work with system-assigned managed identity." + } + precondition { + condition = !var.workload_identity_enabled || var.oidc_issuer_enabled + error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity" + } + precondition { + condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure" + error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure." + } + precondition { + condition = var.network_policy != "azure" || var.network_plugin == "azure" + error_message = "network_policy must be `azure` when network_plugin is `azure`" + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure" + error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure." + } + precondition { + condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null + error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified." + } + precondition { + condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster)) + error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`." + } + precondition { + condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage" + error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`." + } + precondition { + condition = (var.kubelet_identity == null) || ( + (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0) + error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set." + } + precondition { + condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets" + error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes." + } + precondition { + condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null + error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`." + } + precondition { + condition = var.prefix == null || var.dns_prefix_private_cluster == null + error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled + error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`." + } + precondition { + condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != "" + error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone" + } + precondition { + condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)])) + error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`" + } + } +} + +resource "null_resource" "kubernetes_cluster_name_keeper" { + triggers = { + name = local.cluster_name + } +} + +resource "null_resource" "kubernetes_version_keeper" { + triggers = { + version = var.kubernetes_version + } +} + +resource "time_sleep" "interval_before_cluster_update" { + count = var.interval_before_cluster_update == null ? 0 : 1 + + create_duration = var.interval_before_cluster_update + + depends_on = [ + azurerm_kubernetes_cluster.main, + ] + + lifecycle { + replace_triggered_by = [ + null_resource.kubernetes_version_keeper.id, + ] + } +} + +resource "azapi_update_resource" "aks_cluster_post_create" { + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + kubernetesVersion = var.kubernetes_version + } + } + + depends_on = [ + time_sleep.interval_before_cluster_update, + ] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.kubernetes_version_keeper.id] + } +} + +resource "null_resource" "http_proxy_config_no_proxy_keeper" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + triggers = { + http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "") + } +} + +resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" { + count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0 + + resource_id = azurerm_kubernetes_cluster.main.id + type = "Microsoft.ContainerService/managedClusters@2024-02-01" + body = { + properties = { + httpProxyConfig = { + noProxy = var.http_proxy_config.no_proxy + } + } + } + + depends_on = [azapi_update_resource.aks_cluster_post_create] + + lifecycle { + ignore_changes = all + replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id] + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf new file mode 100644 index 000000000..a1f537658 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf @@ -0,0 +1,6 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster" "main" { + automatic_channel_upgrade = var.automatic_channel_upgrade + node_os_channel_upgrade = var.node_os_channel_upgrade +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf new file mode 100644 index 000000000..e3d37ce76 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf @@ -0,0 +1,231 @@ +output "aci_connector_linux" { + description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource." + value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null) +} + +output "aci_connector_linux_enabled" { + description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?" + value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0]) +} + +output "admin_client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "") +} + +output "admin_client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "") +} + +output "admin_cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "") +} + +output "admin_host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "") +} + +output "admin_password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "") +} + +output "admin_username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "") +} + +output "aks_id" { + description = "The `azurerm_kubernetes_cluster`'s id." + value = azurerm_kubernetes_cluster.main.id +} + +output "aks_name" { + description = "The `azurerm_kubernetes_cluster`'s name." + value = azurerm_kubernetes_cluster.main.name +} + +output "azure_policy_enabled" { + description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)" + value = azurerm_kubernetes_cluster.main.azure_policy_enabled +} + +output "azurerm_log_analytics_workspace_id" { + description = "The id of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].id, null) +} + +output "azurerm_log_analytics_workspace_name" { + description = "The name of the created Log Analytics workspace" + value = try(azurerm_log_analytics_workspace.main[0].name, null) +} + +output "azurerm_log_analytics_workspace_primary_shared_key" { + description = "Specifies the workspace key of the log analytics workspace" + sensitive = true + value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null) +} + +output "client_certificate" { + description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate +} + +output "client_key" { + description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].client_key +} + +output "cluster_ca_certificate" { + description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate +} + +output "cluster_fqdn" { + description = "The FQDN of the Azure Kubernetes Managed Cluster." + value = azurerm_kubernetes_cluster.main.fqdn +} + +output "cluster_identity" { + description = "The `azurerm_kubernetes_cluster`'s `identity` block." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.identity[0], null) +} + +output "cluster_portal_fqdn" { + description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.portal_fqdn +} + +output "cluster_private_fqdn" { + description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.private_fqdn +} + +output "generated_cluster_private_ssh_key" { + description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format." + sensitive = true + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null +} + +output "generated_cluster_public_ssh_key" { + description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)." + value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null +} + +output "host" { + description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].host +} + +output "http_application_routing_zone_name" { + description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing." + value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : "" +} + +output "ingress_application_gateway" { + description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block." + value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null) +} + +output "ingress_application_gateway_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?" + value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0]) +} + +output "key_vault_secrets_provider" { + description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block." + value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null) +} + +output "key_vault_secrets_provider_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?" + value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0]) +} + +output "kube_admin_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_admin_config_raw +} + +output "kube_config_raw" { + description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config_raw +} + +output "kubelet_identity" { + description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block." + value = azurerm_kubernetes_cluster.main.kubelet_identity +} + +output "location" { + description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created." + value = azurerm_kubernetes_cluster.main.location +} + +output "network_profile" { + description = "The `azurerm_kubernetes_cluster`'s `network_profile` block" + value = azurerm_kubernetes_cluster.main.network_profile +} + +output "node_resource_group" { + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group +} + +output "node_resource_group_id" { + description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster." + value = azurerm_kubernetes_cluster.main.node_resource_group_id +} + +output "oidc_issuer_url" { + description = "The OIDC issuer URL that is associated with the cluster." + value = azurerm_kubernetes_cluster.main.oidc_issuer_url +} + +output "oms_agent" { + description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument." + value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null) +} + +output "oms_agent_enabled" { + description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?" + value = can(azurerm_kubernetes_cluster.main.oms_agent[0]) +} + +output "open_service_mesh_enabled" { + description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." + value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled +} + +output "password" { + description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].password +} + +output "username" { + description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster." + sensitive = true + value = azurerm_kubernetes_cluster.main.kube_config[0].username +} + +output "web_app_routing_identity" { + description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object." + value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, []) +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf new file mode 100644 index 000000000..e9601eaf0 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf @@ -0,0 +1,126 @@ +resource "azurerm_role_assignment" "acr" { + for_each = var.attached_acr_id_map + + principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id + scope = each.value + role_definition_name = "AcrPull" + skip_service_principal_aad_check = true +} + +# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity +data "azurerm_user_assigned_identity" "cluster_identity" { + count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0 + + name = split("/", var.identity_ids[0])[8] + resource_group_name = split("/", var.identity_ids[0])[4] +} + +# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus) +# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets +# used by the system node pool and by any additional node pools. +# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites +# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites +# https://github.com/Azure/terraform-azurerm-aks/issues/178 +resource "azurerm_role_assignment" "network_contributor" { + for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {} + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value.id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = length(var.network_contributor_role_assigned_subnet_ids) == 0 + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +resource "azurerm_role_assignment" "network_contributor_on_subnet" { + for_each = var.network_contributor_role_assigned_subnet_ids + + principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id) + scope = each.value + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = !var.create_role_assignment_network_contributor + error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`." + } + } +} + +data "azurerm_client_config" "this" {} + +data "azurerm_virtual_network" "application_gateway_vnet" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_subnet_vnet_name + resource_group_name = local.existing_application_gateway_subnet_resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = data.azurerm_virtual_network.application_gateway_vnet[0].id + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress + error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`." + } + } +} + +resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2)) + role_definition_name = "Network Contributor" + + lifecycle { + precondition { + condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null) + error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`." + } + } +} + +resource "azurerm_role_assignment" "existing_application_gateway_contributor" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = var.brown_field_application_gateway_for_ingress.id + role_definition_name = "Contributor" + + lifecycle { + precondition { + condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress + error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`." + } + } +} + +data "azurerm_resource_group" "ingress_gw" { + count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0 + + name = local.existing_application_gateway_resource_group_for_ingress +} + +data "azurerm_resource_group" "aks_rg" { + count = var.create_role_assignments_for_application_gateway ? 1 : 0 + + name = var.resource_group_name +} + +resource "azurerm_role_assignment" "application_gateway_resource_group_reader" { + count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0 + + principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id + scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id + role_definition_name = "Reader" +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile new file mode 100644 index 000000000..7f28c53a5 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile @@ -0,0 +1,85 @@ +REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts" + +fmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash + +fumpt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash + +gosec: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash + +tffmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash + +tffmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash + +tfvalidatecheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash + +terrafmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash + +gofmtcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash + +golint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash + +tflint: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash + +lint: golint tflint gosec + +checkovcheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash + +checkovplancheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash + +fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck + +pr-check: depscheck fmtcheck lint unit-test checkovcheck + +unit-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash + +e2e-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash + +version-upgrade-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash + +terrafmt: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash + +pre-commit: tffmt terrafmt depsensure fmt fumpt generate + +depsensure: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash + +depscheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash + +generate: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash + +gencheck: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash + +yor-tag: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash + +autofix: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash + +test: fmtcheck + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash + @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash + +build-test: + curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash + +.PHONY: fmt fmtcheck pr-check \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf new file mode 120000 index 000000000..9cbc29686 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf @@ -0,0 +1 @@ +../extra_node_pool.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf new file mode 100644 index 000000000..4ba39e77c --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf @@ -0,0 +1,15 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" { + auto_scaling_enabled = each.value.enable_auto_scaling + host_encryption_enabled = each.value.enable_host_encryption + node_public_ip_enabled = each.value.enable_node_public_ip + temporary_name_for_rotation = each.value.temporary_name_for_rotation +} + +resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" { + auto_scaling_enabled = each.value.enable_auto_scaling + host_encryption_enabled = each.value.enable_host_encryption + node_public_ip_enabled = each.value.enable_node_public_ip + temporary_name_for_rotation = each.value.temporary_name_for_rotation +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf new file mode 120000 index 000000000..1b032e65b --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf @@ -0,0 +1 @@ +../locals.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf new file mode 120000 index 000000000..639a396cd --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf @@ -0,0 +1 @@ +../log_analytics.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf new file mode 120000 index 000000000..6c481fa32 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf @@ -0,0 +1 @@ +../main.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf new file mode 100644 index 000000000..49dc0a773 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf @@ -0,0 +1,307 @@ +# tflint-ignore-file: azurerm_resource_tag + +resource "azurerm_kubernetes_cluster" "main" { + automatic_upgrade_channel = var.automatic_channel_upgrade + node_os_upgrade_channel = var.node_os_channel_upgrade + + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + auto_scaling_enabled = var.enable_auto_scaling + fips_enabled = var.default_node_pool_fips_enabled + host_encryption_enabled = var.enable_host_encryption + max_count = null + max_pods = var.agents_max_pods + min_count = null + node_count = var.agents_count + node_labels = var.agents_labels + node_public_ip_enabled = var.enable_node_public_ip + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "default_node_pool" { + for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] + + content { + name = var.agents_pool_name + vm_size = var.agents_size + auto_scaling_enabled = var.enable_auto_scaling + fips_enabled = var.default_node_pool_fips_enabled + host_encryption_enabled = var.enable_host_encryption + max_count = var.agents_max_count + max_pods = var.agents_max_pods + min_count = var.agents_min_count + node_labels = var.agents_labels + node_public_ip_enabled = var.enable_node_public_ip + only_critical_addons_enabled = var.only_critical_addons_enabled + orchestrator_version = var.orchestrator_version + os_disk_size_gb = var.os_disk_size_gb + os_disk_type = var.os_disk_type + os_sku = var.os_sku + pod_subnet_id = try(var.pod_subnet.id, null) + proximity_placement_group_id = var.agents_proximity_placement_group_id + scale_down_mode = var.scale_down_mode + snapshot_id = var.snapshot_id + tags = merge(var.tags, var.agents_tags) + temporary_name_for_rotation = var.temporary_name_for_rotation + type = var.agents_type + ultra_ssd_enabled = var.ultra_ssd_enabled + vnet_subnet_id = try(var.vnet_subnet.id, null) + zones = var.agents_availability_zones + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + } + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + + content { + mode = var.service_mesh_profile.mode + revisions = var.service_mesh_profile.revisions + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + } + } + dynamic "api_server_access_profile" { + for_each = var.api_server_authorized_ip_ranges != null ? [ + "api_server_access_profile" + ] : [] + + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control_enabled ? ["rbac"] : [] + + content { + admin_group_object_ids = var.rbac_aad_admin_group_object_ids + azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled + tenant_id = var.rbac_aad_tenant_id + } + } + network_profile { + network_plugin = var.network_plugin + dns_service_ip = var.net_profile_dns_service_ip + load_balancer_sku = var.load_balancer_sku + network_data_plane = var.ebpf_data_plane + network_plugin_mode = var.network_plugin_mode + network_policy = var.network_policy + outbound_type = var.net_profile_outbound_type + pod_cidr = var.net_profile_pod_cidr + service_cidr = var.net_profile_service_cidr + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [ + "load_balancer_profile" + ] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile_blob_driver_enabled + disk_driver_enabled = var.storage_profile_disk_driver_enabled + file_driver_enabled = var.storage_profile_file_driver_enabled + snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled + } + } + + dynamic "upgrade_override" { + for_each = var.upgrade_override != null ? ["use_upgrade_override"] : [] + content { + effective_until = var.upgrade_override.effective_until + force_upgrade_enabled = var.upgrade_override.force_upgrade_enabled + } + + } + + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_ids + } + } + + lifecycle { + prevent_destroy = true + + ignore_changes = [ + http_application_routing_enabled, + http_proxy_config[0].no_proxy, + kubernetes_version, + # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource. + name, + network_profile[0].load_balancer_profile[0].outbound_ip_address_ids, + network_profile[0].load_balancer_profile[0].outbound_ip_prefix_ids, + ] + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf new file mode 120000 index 000000000..1a861df4d --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf @@ -0,0 +1 @@ +../outputs.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf new file mode 120000 index 000000000..705ff1c97 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf @@ -0,0 +1 @@ +../role_assignments.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf new file mode 100644 index 000000000..dee4388f6 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf @@ -0,0 +1,11 @@ +variable "upgrade_override" { + type = object({ + force_upgrade_enabled = bool + effective_until = optional(string) + }) + default = null + description = <<-EOT + `force_upgrade_enabled` - (Required) Whether to force upgrade the cluster. Possible values are `true` or `false`. + `effective_until` - (Optional) Specifies the duration, in RFC 3339 format (e.g., `2025-10-01T13:00:00Z`), the upgrade_override values are effective. This field must be set for the `upgrade_override` values to take effect. The date-time must be within the next 30 days. + EOT +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf new file mode 120000 index 000000000..3a65dccd2 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf @@ -0,0 +1 @@ +../variables.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf new file mode 100644 index 000000000..30c4e22bc --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf @@ -0,0 +1,231 @@ +variable "node_pools" { + type = map(object({ + name = string + node_count = optional(number) + tags = optional(map(string)) + vm_size = string + host_group_id = optional(string) + capacity_reservation_group_id = optional(string) + custom_ca_trust_enabled = optional(bool) + enable_auto_scaling = optional(bool) + enable_host_encryption = optional(bool) + enable_node_public_ip = optional(bool) + eviction_policy = optional(string) + gpu_instance = optional(string) + kubelet_config = optional(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_files = optional(number) + pod_max_pid = optional(number) + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + fips_enabled = optional(bool) + kubelet_disk_type = optional(string) + max_count = optional(number) + max_pods = optional(number) + message_of_the_day = optional(string) + mode = optional(string, "User") + min_count = optional(number) + node_network_profile = optional(object({ + node_public_ip_tags = optional(map(string)) + })) + node_labels = optional(map(string)) + node_public_ip_prefix_id = optional(string) + node_taints = optional(list(string)) + orchestrator_version = optional(string) + os_disk_size_gb = optional(number) + os_disk_type = optional(string, "Managed") + os_sku = optional(string) + os_type = optional(string, "Linux") + pod_subnet = optional(object({ + id = string + }), null) + priority = optional(string, "Regular") + proximity_placement_group_id = optional(string) + spot_max_price = optional(number) + scale_down_mode = optional(string, "Delete") + snapshot_id = optional(string) + ultra_ssd_enabled = optional(bool) + vnet_subnet = optional(object({ + id = string + }), null) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = optional(string) + zones = optional(set(string)) + create_before_destroy = optional(bool, true) + temporary_name_for_rotation = optional(string) + })) + default = {} + description = <<-EOT + A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: + map(object({ + name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. + node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. + tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. + vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. + enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). + enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. + eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. + gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + kubelet_config = optional(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + })) + fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). + kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. + max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. + max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. + min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + node_network_profile = optional(object({ + node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + })) + node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. + node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. + node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. + orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. + os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. + os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + pod_subnet = optional(object({ + id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + })) + priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. + proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). + spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. + scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. + snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. + vnet_subnet = optional(object({ + id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. + })) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) + zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. + temporary_name_for_rotation = (Optional) Specifies the name of the temporary node pool used to cycle the node pool when one of the relevant properties are updated. + })) + EOT + nullable = false +} + +variable "service_mesh_profile" { + type = object({ + mode = string + revisions = list(string) + internal_ingress_gateway_enabled = optional(bool, true) + external_ingress_gateway_enabled = optional(bool, true) + }) + default = null + description = <<-EOT + `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. + `revisions` - (Required) Specify 1 or 2 Istio control plane revisions for managing minor upgrades using the canary upgrade process. For example, create the resource with `revisions` set to `["asm-1-20"]`, or leave it empty (the `revisions` will only be known after apply). To start the canary upgrade, change `revisions` to `["asm-1-20", "asm-1-21"]`. To roll back the canary upgrade, revert to `["asm-1-20"]`. To confirm the upgrade, change to `["asm-1-21"]`. + `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. + `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. + EOT +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf new file mode 120000 index 000000000..8bd0ff140 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf new file mode 100644 index 000000000..45d44abe3 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + azapi = { + source = "Azure/azapi" + version = ">=2.0, < 3.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = ">= 4.16.0, < 5.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.1" + } + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf new file mode 100644 index 000000000..c819f9b89 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf @@ -0,0 +1,1601 @@ +variable "location" { + type = string + description = "Location of cluster, if not defined it will be read from the resource-group" +} + +variable "resource_group_name" { + type = string + description = "The existing resource group name to use" +} + +variable "aci_connector_linux_enabled" { + type = bool + default = false + description = "Enable Virtual Node pool" +} + +variable "aci_connector_linux_subnet_name" { + type = string + default = null + description = "(Optional) aci_connector_linux subnet name" +} + +variable "admin_username" { + type = string + default = null + description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created." +} + +variable "agents_availability_zones" { + type = list(string) + default = null + description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created." +} + +variable "agents_count" { + type = number + default = 2 + description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes." +} + +variable "agents_labels" { + type = map(string) + default = {} + description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." +} + +variable "agents_max_count" { + type = number + default = null + description = "Maximum number of nodes in a pool" +} + +variable "agents_max_pods" { + type = number + default = null + description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." +} + +variable "agents_min_count" { + type = number + default = null + description = "Minimum number of nodes in a pool" +} + +variable "agents_pool_drain_timeout_in_minutes" { + type = number + default = null + description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." +} + +variable "agents_pool_kubelet_configs" { + type = list(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool, true) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_line = optional(number) + pod_max_pid = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_linux_os_configs" { + type = list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })), []) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + default = [] + description = <<-EOT + list(object({ + sysctl_configs = optional(list(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })), []) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created. + })) +EOT + nullable = false +} + +variable "agents_pool_max_surge" { + type = string + default = "10%" + description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." +} + +variable "agents_pool_name" { + type = string + default = "nodepool" + description = "The default Azure AKS agentpool (nodepool) name." + nullable = false +} + +variable "agents_pool_node_soak_duration_in_minutes" { + type = number + default = 0 + description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." +} + +variable "agents_proximity_placement_group_id" { + type = string + default = null + description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." +} + +variable "agents_size" { + type = string + default = "Standard_D2s_v3" + description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." +} + +variable "agents_tags" { + type = map(string) + default = {} + description = "(Optional) A mapping of tags to assign to the Node Pool." +} + +variable "agents_type" { + type = string + default = "VirtualMachineScaleSets" + description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." +} + +variable "api_server_authorized_ip_ranges" { + type = set(string) + default = null + description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes." +} + +variable "attached_acr_id_map" { + type = map(string) + default = {} + description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created." + nullable = false +} + +variable "auto_scaler_profile_balance_similar_node_groups" { + type = bool + default = false + description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`." +} + +variable "auto_scaler_profile_empty_bulk_delete_max" { + type = number + default = 10 + description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`." +} + +variable "auto_scaler_profile_enabled" { + type = bool + default = false + description = "Enable configuring the auto scaler profile" + nullable = false +} + +variable "auto_scaler_profile_expander" { + type = string + default = "random" + description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`." + + validation { + condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander) + error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`." + } +} + +variable "auto_scaler_profile_max_graceful_termination_sec" { + type = string + default = "600" + description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`." +} + +variable "auto_scaler_profile_max_node_provisioning_time" { + type = string + default = "15m" + description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`." +} + +variable "auto_scaler_profile_max_unready_nodes" { + type = number + default = 3 + description = "Maximum Number of allowed unready nodes. Defaults to `3`." +} + +variable "auto_scaler_profile_max_unready_percentage" { + type = number + default = 45 + description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`." +} + +variable "auto_scaler_profile_new_pod_scale_up_delay" { + type = string + default = "10s" + description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`." +} + +variable "auto_scaler_profile_scale_down_delay_after_add" { + type = string + default = "10m" + description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_delay_after_delete" { + type = string + default = null + description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`." +} + +variable "auto_scaler_profile_scale_down_delay_after_failure" { + type = string + default = "3m" + description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`." +} + +variable "auto_scaler_profile_scale_down_unneeded" { + type = string + default = "10m" + description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`." +} + +variable "auto_scaler_profile_scale_down_unready" { + type = string + default = "20m" + description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`." +} + +variable "auto_scaler_profile_scale_down_utilization_threshold" { + type = string + default = "0.5" + description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`." +} + +variable "auto_scaler_profile_scan_interval" { + type = string + default = "10s" + description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`." +} + +variable "auto_scaler_profile_skip_nodes_with_local_storage" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`." +} + +variable "auto_scaler_profile_skip_nodes_with_system_pods" { + type = bool + default = true + description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`." +} + +variable "automatic_channel_upgrade" { + type = string + default = null + description = <<-EOT + (Optional) Defines the automatic upgrade channel for the AKS cluster. + Possible values: + * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").** + * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.** + + By default, automatic upgrades are disabled. + More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster + EOT + + validation { + condition = var.automatic_channel_upgrade == null ? true : contains([ + "patch", "stable", "rapid", "node-image" + ], var.automatic_channel_upgrade) + error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`." + } +} + +variable "azure_policy_enabled" { + type = bool + default = false + description = "Enable Azure Policy Addon." +} + +variable "brown_field_application_gateway_for_ingress" { + type = object({ + id = string + subnet_id = string + }) + default = null + description = <<-EOT + [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing) + * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress. + * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. + EOT +} + +variable "client_id" { + type = string + default = "" + description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment" + nullable = false +} + +variable "client_secret" { + type = string + default = "" + description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment" + nullable = false + sensitive = true +} + +variable "cluster_log_analytics_workspace_name" { + type = string + default = null + description = "(Optional) The name of the Analytics workspace" +} + +variable "cluster_name" { + type = string + default = null + description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)" +} + +variable "cluster_name_random_suffix" { + type = bool + default = false + description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict." + nullable = false +} + +variable "confidential_computing" { + type = object({ + sgx_quote_helper_enabled = bool + }) + default = null + description = "(Optional) Enable Confidential Computing." +} + +variable "cost_analysis_enabled" { + type = bool + default = false + description = "(Optional) Enable Cost Analysis." +} + +variable "create_monitor_data_collection_rule" { + type = bool + default = true + description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`." + nullable = false +} + +variable "create_role_assignment_network_contributor" { + type = bool + default = false + description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster" + nullable = false +} + +variable "create_role_assignments_for_application_gateway" { + type = bool + default = true + description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`." + nullable = false +} + +variable "data_collection_settings" { + type = object({ + data_collection_interval = string + namespace_filtering_mode_for_data_collection = string + namespaces_for_data_collection = list(string) + container_log_v2_enabled = bool + }) + default = { + data_collection_interval = "1m" + namespace_filtering_mode_for_data_collection = "Off" + namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"] + container_log_v2_enabled = true + } + description = <<-EOT + `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m. + `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection. + `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode. + `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs. + See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 + EOT +} + +variable "default_node_pool_fips_enabled" { + type = bool + default = null + description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." +} + +variable "disk_encryption_set_id" { + type = string + default = null + description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created." +} + +variable "dns_prefix_private_cluster" { + type = string + default = null + description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created." +} + +variable "ebpf_data_plane" { + type = string + default = null + description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created." +} + +variable "enable_auto_scaling" { + type = bool + default = false + description = "Enable node pool autoscaling" +} + +variable "enable_host_encryption" { + type = bool + default = false + description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" +} + +variable "enable_node_public_ip" { + type = bool + default = false + description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." +} + +variable "green_field_application_gateway_for_ingress" { + type = object({ + name = optional(string) + subnet_cidr = optional(string) + subnet_id = optional(string) + }) + default = null + description = <<-EOT + [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new) + * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. + * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. +EOT + + validation { + condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr))) + error_message = "One of `subnet_cidr` and `subnet_id` must be specified." + } +} + +variable "http_proxy_config" { + type = object({ + http_proxy = optional(string) + https_proxy = optional(string) + no_proxy = optional(list(string)) + trusted_ca = optional(string) + }) + default = null + description = <<-EOT + optional(object({ + http_proxy = (Optional) The proxy address to be used when communicating over HTTP. + https_proxy = (Optional) The proxy address to be used when communicating over HTTPS. + no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field. + trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format. + })) + Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. +EOT + + validation { + condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)) + error_message = "`http_proxy` and `https_proxy` cannot be both empty." + } +} + +variable "identity_ids" { + type = list(string) + default = null + description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster." +} + +variable "identity_type" { + type = string + default = "SystemAssigned" + description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well." + + validation { + condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned" + error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`" + } +} + +variable "image_cleaner_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether Image Cleaner is enabled." +} + +variable "image_cleaner_interval_hours" { + type = number + default = 48 + description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`." +} + +variable "interval_before_cluster_update" { + type = string + default = "30s" + description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update." +} + +variable "key_vault_secrets_provider_enabled" { + type = bool + default = false + description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver" + nullable = false +} + +variable "kms_enabled" { + type = bool + default = false + description = "(Optional) Enable Azure KeyVault Key Management Service." + nullable = false +} + +variable "kms_key_vault_key_id" { + type = string + default = null + description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier." +} + +variable "kms_key_vault_network_access" { + type = string + default = "Public" + description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`." + + validation { + condition = contains(["Private", "Public"], var.kms_key_vault_network_access) + error_message = "Possible values are `Private` and `Public`" + } +} + +variable "kubelet_identity" { + type = object({ + client_id = optional(string) + object_id = optional(string) + user_assigned_identity_id = optional(string) + }) + default = null + description = <<-EOT + - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. + - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. +EOT +} + +variable "kubernetes_version" { + type = string + default = null + description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region" +} + +variable "load_balancer_profile_enabled" { + type = bool + default = false + description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`." + nullable = false +} + +variable "load_balancer_profile_idle_timeout_in_minutes" { + type = number + default = 30 + description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive." +} + +variable "load_balancer_profile_managed_outbound_ip_count" { + type = number + default = null + description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive" +} + +variable "load_balancer_profile_managed_outbound_ipv6_count" { + type = number + default = null + description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature" +} + +variable "load_balancer_profile_outbound_ip_address_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ip_prefix_ids" { + type = set(string) + default = null + description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer." +} + +variable "load_balancer_profile_outbound_ports_allocated" { + type = number + default = 0 + description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`" +} + +variable "load_balancer_sku" { + type = string + default = "standard" + description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created." + + validation { + condition = contains(["basic", "standard"], var.load_balancer_sku) + error_message = "Possible values are `basic` and `standard`" + } +} + +variable "local_account_disabled" { + type = bool + default = null + description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information." +} + +variable "log_analytics_solution" { + type = object({ + id = string + }) + default = null + description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution." + + validation { + condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != "" + error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`." + } +} + +variable "log_analytics_workspace" { + type = object({ + id = string + name = string + location = optional(string) + resource_group_name = optional(string) + }) + default = null + description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace." +} + +variable "log_analytics_workspace_allow_resource_only_permissions" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`." +} + +variable "log_analytics_workspace_cmk_for_query_forced" { + type = bool + default = null + description = "(Optional) Is Customer Managed Storage mandatory for query management?" +} + +variable "log_analytics_workspace_daily_quota_gb" { + type = number + default = null + description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted." +} + +variable "log_analytics_workspace_data_collection_rule_id" { + type = string + default = null + description = "(Optional) The ID of the Data Collection Rule to use for this workspace." +} + +variable "log_analytics_workspace_enabled" { + type = bool + default = true + description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard" + nullable = false +} + +variable "log_analytics_workspace_identity" { + type = object({ + identity_ids = optional(set(string)) + type = string + }) + default = null + description = <<-EOT + - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`. + - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. +EOT +} + +variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" { + type = bool + default = null + description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days." +} + +variable "log_analytics_workspace_internet_ingestion_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_internet_query_enabled" { + type = bool + default = null + description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`." +} + +variable "log_analytics_workspace_local_authentication_disabled" { + type = bool + default = null + description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`." +} + +variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" { + type = number + default = null + description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`." +} + +variable "log_analytics_workspace_resource_group_name" { + type = string + default = null + description = "(Optional) Resource group name to create azurerm_log_analytics_solution." +} + +variable "log_analytics_workspace_sku" { + type = string + default = "PerGB2018" + description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018" +} + +variable "log_retention_in_days" { + type = number + default = 30 + description = "The retention period for the logs in days" +} + +variable "maintenance_window" { + type = object({ + allowed = optional(list(object({ + day = string + hours = set(number) + })), [ + ]), + not_allowed = optional(list(object({ + end = string + start = string + })), []), + }) + default = null + description = "(Optional) Maintenance configuration of the managed cluster." +} + +variable "maintenance_window_auto_upgrade" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "maintenance_window_node_os" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(set(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "microsoft_defender_enabled" { + type = bool + default = false + description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`." + nullable = false +} + +variable "monitor_data_collection_rule_data_sources_syslog_facilities" { + type = list(string) + default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"] + description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog" +} + +variable "monitor_data_collection_rule_data_sources_syslog_levels" { + type = list(string) + default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"] + description = "List of syslog levels" +} + +variable "monitor_data_collection_rule_extensions_streams" { + type = list(any) + default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"] + description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr" +} + +variable "monitor_metrics" { + type = object({ + annotations_allowed = optional(string) + labels_allowed = optional(string) + }) + default = null + description = <<-EOT + (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster + object({ + annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric." + labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric." + }) +EOT +} + +variable "msi_auth_for_monitoring_enabled" { + type = bool + default = null + description = "(Optional) Is managed identity authentication for monitoring enabled?" +} + +variable "nat_gateway_profile" { + type = object({ + idle_timeout_in_minutes = optional(number) + managed_outbound_ip_count = optional(number) + }) + default = null + description = <<-EOT + `nat_gateway_profile` block supports the following: + - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`. + - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. +EOT +} + +variable "net_profile_dns_service_ip" { + type = string + default = null + description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created." +} + +variable "net_profile_outbound_type" { + type = string + default = "loadBalancer" + description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer." +} + +variable "net_profile_pod_cidr" { + type = string + default = null + description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created." +} + +variable "net_profile_pod_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidr" { + type = string + default = null + description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created." +} + +variable "net_profile_service_cidrs" { + type = list(string) + default = null + description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created." +} + +variable "network_contributor_role_assigned_subnet_ids" { + type = map(string) + default = {} + description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id" + nullable = false +} + +variable "network_data_plane" { + type = string + default = null + description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created." +} + +variable "network_ip_versions" { + type = list(string) + default = null + description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created." +} + +variable "network_mode" { + type = string + default = null + description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created." +} + +variable "network_plugin" { + type = string + default = "kubenet" + description = "Network plugin to use for networking." + nullable = false +} + +variable "network_plugin_mode" { + type = string + default = null + description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created." +} + +variable "network_policy" { + type = string + default = null + description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created." +} + +variable "node_network_profile" { + type = object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + }) + default = null + description = <<-EOT + - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. +--- + An `allowed_host_ports` block supports the following: + - `port_start`: (Optional) Specifies the start of the port range. + - `port_end`: (Optional) Specifies the end of the port range. + - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. +EOT +} + +variable "node_os_channel_upgrade" { + type = string + default = null + description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`." +} + +variable "node_pools" { + type = map(object({ + name = string + node_count = optional(number) + tags = optional(map(string)) + vm_size = string + host_group_id = optional(string) + capacity_reservation_group_id = optional(string) + custom_ca_trust_enabled = optional(bool) + enable_auto_scaling = optional(bool) + enable_host_encryption = optional(bool) + enable_node_public_ip = optional(bool) + eviction_policy = optional(string) + gpu_instance = optional(string) + kubelet_config = optional(object({ + cpu_manager_policy = optional(string) + cpu_cfs_quota_enabled = optional(bool) + cpu_cfs_quota_period = optional(string) + image_gc_high_threshold = optional(number) + image_gc_low_threshold = optional(number) + topology_manager_policy = optional(string) + allowed_unsafe_sysctls = optional(set(string)) + container_log_max_size_mb = optional(number) + container_log_max_files = optional(number) + pod_max_pid = optional(number) + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = optional(number) + fs_file_max = optional(number) + fs_inotify_max_user_watches = optional(number) + fs_nr_open = optional(number) + kernel_threads_max = optional(number) + net_core_netdev_max_backlog = optional(number) + net_core_optmem_max = optional(number) + net_core_rmem_default = optional(number) + net_core_rmem_max = optional(number) + net_core_somaxconn = optional(number) + net_core_wmem_default = optional(number) + net_core_wmem_max = optional(number) + net_ipv4_ip_local_port_range_min = optional(number) + net_ipv4_ip_local_port_range_max = optional(number) + net_ipv4_neigh_default_gc_thresh1 = optional(number) + net_ipv4_neigh_default_gc_thresh2 = optional(number) + net_ipv4_neigh_default_gc_thresh3 = optional(number) + net_ipv4_tcp_fin_timeout = optional(number) + net_ipv4_tcp_keepalive_intvl = optional(number) + net_ipv4_tcp_keepalive_probes = optional(number) + net_ipv4_tcp_keepalive_time = optional(number) + net_ipv4_tcp_max_syn_backlog = optional(number) + net_ipv4_tcp_max_tw_buckets = optional(number) + net_ipv4_tcp_tw_reuse = optional(bool) + net_netfilter_nf_conntrack_buckets = optional(number) + net_netfilter_nf_conntrack_max = optional(number) + vm_max_map_count = optional(number) + vm_swappiness = optional(number) + vm_vfs_cache_pressure = optional(number) + })) + transparent_huge_page_enabled = optional(string) + transparent_huge_page_defrag = optional(string) + swap_file_size_mb = optional(number) + })) + fips_enabled = optional(bool) + kubelet_disk_type = optional(string) + max_count = optional(number) + max_pods = optional(number) + message_of_the_day = optional(string) + mode = optional(string, "User") + min_count = optional(number) + node_network_profile = optional(object({ + node_public_ip_tags = optional(map(string)) + application_security_group_ids = optional(list(string)) + allowed_host_ports = optional(list(object({ + port_start = optional(number) + port_end = optional(number) + protocol = optional(string) + }))) + })) + node_labels = optional(map(string)) + node_public_ip_prefix_id = optional(string) + node_taints = optional(list(string)) + orchestrator_version = optional(string) + os_disk_size_gb = optional(number) + os_disk_type = optional(string, "Managed") + os_sku = optional(string) + os_type = optional(string, "Linux") + pod_subnet = optional(object({ + id = string + }), null) + priority = optional(string, "Regular") + proximity_placement_group_id = optional(string) + spot_max_price = optional(number) + scale_down_mode = optional(string, "Delete") + snapshot_id = optional(string) + ultra_ssd_enabled = optional(bool) + vnet_subnet = optional(object({ + id = string + }), null) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = optional(string) + zones = optional(set(string)) + create_before_destroy = optional(bool, true) + })) + default = {} + description = <<-EOT + A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below: + map(object({ + name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates. + node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`. + tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API. + vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created. + host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created. + capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created. + custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information. + enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). + enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created. + enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created. + eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified. + gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + kubelet_config = optional(object({ + cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created. + cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created. + cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created. + image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created. + image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created. + topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created. + allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created. + container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created. + container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created. + pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created. + })) + linux_os_config = optional(object({ + sysctl_config = optional(object({ + fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created. + fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created. + fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created. + fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created. + kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created. + net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created. + net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created. + net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created. + net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created. + net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created. + net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created. + net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created. + net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created. + net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created. + net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created. + vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created. + vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created. + vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created. + })) + transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created. + transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created. + swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created. + })) + fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). + kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. + max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`. + max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created. + mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. + min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`. + node_network_profile = optional(object({ + node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created. + application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool. + allowed_host_ports = optional(object({ + port_start = (Optional) Specifies the start of the port range. + port_end = (Optional) Specifies the end of the port range. + protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. + })) + })) + node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. + node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created. + node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. + orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. + os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. + os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. + os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. + pod_subnet = optional(object({ + id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created. + })) + priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. + proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool). + spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`. + scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. + snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created. + ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created. + vnet_subnet = optional(object({ + id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet. + })) + upgrade_settings = optional(object({ + drain_timeout_in_minutes = number + node_soak_duration_in_minutes = number + max_surge = string + })) + windows_profile = optional(object({ + outbound_nat_enabled = optional(bool, true) + })) + workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools) + zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created. + create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`. + })) + EOT + nullable = false +} + +variable "node_resource_group" { + type = string + default = null + description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created." +} + +variable "oidc_issuer_enabled" { + type = bool + default = false + description = "Enable or Disable the OIDC issuer URL. Defaults to false." +} + +variable "oms_agent_enabled" { + type = bool + default = true + description = "Enable OMS Agent Addon." + nullable = false +} + +variable "only_critical_addons_enabled" { + type = bool + default = null + description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." +} + +variable "open_service_mesh_enabled" { + type = bool + default = null + description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)." +} + +variable "orchestrator_version" { + type = string + default = null + description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" +} + +variable "os_disk_size_gb" { + type = number + default = 50 + description = "Disk size of nodes in GBs." +} + +variable "os_disk_type" { + type = string + default = "Managed" + description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." + nullable = false +} + +variable "os_sku" { + type = string + default = null + description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." +} + +variable "pod_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "prefix" { + type = string + default = "" + description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified." +} + +variable "private_cluster_enabled" { + type = bool + default = false + description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet." +} + +variable "private_cluster_public_fqdn_enabled" { + type = bool + default = false + description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`." +} + +variable "private_dns_zone_id" { + type = string + default = null + description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created." +} + +variable "public_ssh_key" { + type = string + default = "" + description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created." +} + +variable "rbac_aad" { + type = bool + default = true + description = "(Optional) Is Azure Active Directory integration enabled?" + nullable = false +} + +variable "rbac_aad_admin_group_object_ids" { + type = list(string) + default = null + description = "Object ID of groups with admin access." +} + +variable "rbac_aad_azure_rbac_enabled" { + type = bool + default = null + description = "(Optional) Is Role Based Access Control based on Azure AD enabled?" +} + +variable "rbac_aad_tenant_id" { + type = string + default = null + description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used." +} + +variable "role_based_access_control_enabled" { + type = bool + default = false + description = "Enable Role Based Access Control." + nullable = false +} + +variable "run_command_enabled" { + type = bool + default = true + description = "(Optional) Whether to enable run command for the cluster or not." +} + +variable "scale_down_mode" { + type = string + default = "Delete" + description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." +} + +variable "secret_rotation_enabled" { + type = bool + default = false + description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`" + nullable = false +} + +variable "secret_rotation_interval" { + type = string + default = "2m" + description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`" + nullable = false +} + +variable "service_mesh_profile" { + type = object({ + mode = string + internal_ingress_gateway_enabled = optional(bool, true) + external_ingress_gateway_enabled = optional(bool, true) + }) + default = null + description = <<-EOT + `mode` - (Required) The mode of the service mesh. Possible value is `Istio`. + `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`. + `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. + EOT +} + +variable "sku_tier" { + type = string + default = "Free" + description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`" + + validation { + condition = contains(["Free", "Standard", "Premium"], var.sku_tier) + error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0." + } +} + +variable "snapshot_id" { + type = string + default = null + description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." +} + +variable "storage_profile_blob_driver_enabled" { + type = bool + default = false + description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`" +} + +variable "storage_profile_disk_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_disk_driver_version" { + type = string + default = "v1" + description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`." +} + +variable "storage_profile_enabled" { + type = bool + default = false + description = "Enable storage profile" + nullable = false +} + +variable "storage_profile_file_driver_enabled" { + type = bool + default = true + description = "(Optional) Is the File CSI driver enabled? Defaults to `true`" +} + +variable "storage_profile_snapshot_controller_enabled" { + type = bool + default = true + description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`" +} + +variable "support_plan" { + type = string + default = "KubernetesOfficial" + description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`." + + validation { + condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan) + error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`." + } +} + +variable "tags" { + type = map(string) + default = {} + description = "Any tags that should be present on the AKS cluster resources" +} + +variable "temporary_name_for_rotation" { + type = string + default = null + description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" +} + +variable "ultra_ssd_enabled" { + type = bool + default = false + description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." +} + +variable "vnet_subnet" { + type = object({ + id = string + }) + default = null + description = <<-EOT + object({ + id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. + }) +EOT +} + +variable "web_app_routing" { + type = object({ + dns_zone_ids = list(string) + }) + default = null + description = <<-EOT + object({ + dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list." + }) +EOT +} + +variable "workload_autoscaler_profile" { + type = object({ + keda_enabled = optional(bool, false) + vertical_pod_autoscaler_enabled = optional(bool, false) + }) + default = null + description = <<-EOT + `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads. + `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. +EOT +} + +variable "workload_identity_enabled" { + type = bool + default = false + description = "Enable or Disable Workload Identity. Defaults to false." +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf new file mode 100644 index 000000000..c9d2fe8f1 --- /dev/null +++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + azapi = { + source = "Azure/azapi" + version = ">=2.0, < 3.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = ">= 3.107.0, < 4.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.5" + } + tls = { + source = "hashicorp/tls" + version = ">= 3.1" + } + } +} diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index b3d1816d4..5e6632d2c 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -10,8 +10,7 @@ module "name" { # Create the AKS cluster using the locally modified Azure module module "k8scluster" { - source = "Azure/aks/azurerm//v4" - version = "10.2.0" + source = "./k8scluster/v4" # Required variables resource_group_name = var.inputs.network_details.attributes.resource_group_name diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index 17e13857a..336d18f89 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -39,11 +39,6 @@ variable "instance" { }) }) - validation { - condition = contains(["1.29", "1.30", "1.31", "1.32"], var.instance.spec.cluster.kubernetes_version) - error_message = "Kubernetes version must be a supported version (1.29, 1.30, 1.31, or 1.32)." - } - validation { condition = contains(["Free", "Standard"], var.instance.spec.cluster.sku_tier) error_message = "SKU tier must be one of: Free, Standard." From c91102f666eb192a5ec7101c20b20addadf03bae Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 18:12:42 +0530 Subject: [PATCH 30/36] removed private cluster support --- .../innersourcing/control_planes.json | 8 +++++ .github/workflows/innersourcing/secrets.json | 3 ++ control_planes.json | 8 +++++ .../azure_aks/0.2/facets.yaml | 28 ---------------- .../kubernetes_cluster/azure_aks/0.2/main.tf | 7 ++-- .../azure_aks/0.2/variables.tf | 11 +++---- secrets.json | 3 ++ test.sh | 32 +++++++++++++++++++ 8 files changed, 61 insertions(+), 39 deletions(-) create mode 100644 .github/workflows/innersourcing/control_planes.json create mode 100644 .github/workflows/innersourcing/secrets.json create mode 100644 control_planes.json create mode 100644 secrets.json create mode 100755 test.sh diff --git a/.github/workflows/innersourcing/control_planes.json b/.github/workflows/innersourcing/control_planes.json new file mode 100644 index 000000000..d432e518b --- /dev/null +++ b/.github/workflows/innersourcing/control_planes.json @@ -0,0 +1,8 @@ +{ + "facetsdemo": { + "Name": "facetsdemo", + "URL": "https://facetsdemo.console.facets.cloud", + "Username": "ishaan.kalra@facets.cloud", + "TokenRef": "FACETSDEMO_TOKEN" + } +} \ No newline at end of file diff --git a/.github/workflows/innersourcing/secrets.json b/.github/workflows/innersourcing/secrets.json new file mode 100644 index 000000000..d1893d752 --- /dev/null +++ b/.github/workflows/innersourcing/secrets.json @@ -0,0 +1,3 @@ +{ + "FACETSDEMO_TOKEN": "f84ac169-4ce5-45d9-86c0-5c2e1fa473b5" +} \ No newline at end of file diff --git a/control_planes.json b/control_planes.json new file mode 100644 index 000000000..7edcc50ed --- /dev/null +++ b/control_planes.json @@ -0,0 +1,8 @@ +{ + "provided": { + "Name": "provided", + "URL": "https://facetsdemo.console.facets.cloud", + "Username": "***", + "TokenRef": "PROVIDED_TOKEN" + } +} \ No newline at end of file diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 44f1fa968..f373a07f3 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -33,16 +33,6 @@ spec: field: spec.auto_upgrade_settings.enable_auto_upgrade values: - false - cluster_endpoint_public_access: - type: boolean - title: Cluster Endpoint Public Access - description: Whether the AKS public API server endpoint is enabled. - default: true - cluster_endpoint_private_access: - type: boolean - title: Cluster Endpoint Private Access - description: Whether the AKS private API server endpoint is enabled. - default: false cluster_endpoint_public_access_cidrs: type: array title: Cluster Endpoint Public Access CIDRs @@ -51,21 +41,6 @@ spec: default: - 0.0.0.0/0 x-ui-override-disable: true - x-ui-visible-if: - field: spec.cluster.cluster_endpoint_public_access - values: - - true - cluster_endpoint_private_access_cidrs: - type: array - title: Cluster Endpoint Private Access CIDRs - description: List of CIDR blocks which can access the AKS private API server - endpoint. - default: [] - x-ui-override-disable: true - x-ui-visible-if: - field: spec.cluster.cluster_endpoint_private_access - values: - - true cluster_enabled_log_types: type: array title: Cluster Enabled Log Types @@ -318,11 +293,8 @@ sample: spec: cluster: kubernetes_version: '1.31' - cluster_endpoint_public_access: true - cluster_endpoint_private_access: false cluster_endpoint_public_access_cidrs: - 0.0.0.0/0 - cluster_endpoint_private_access_cidrs: [] sku_tier: Free auto_upgrade_settings: enable_auto_upgrade: true diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index 5e6632d2c..b5d6b8cd9 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -39,10 +39,9 @@ module "k8scluster" { net_profile_service_cidr = "10.254.0.0/16" net_profile_dns_service_ip = "10.254.0.254" - # Private cluster configuration - private_cluster_enabled = var.instance.spec.cluster.cluster_endpoint_private_access - private_cluster_public_fqdn_enabled = var.instance.spec.cluster.cluster_endpoint_public_access - api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access ? var.instance.spec.cluster.cluster_endpoint_public_access_cidrs : null + # Public cluster configuration - always enabled + private_cluster_enabled = false + api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access_cidrs # Node pool configuration agents_count = var.instance.spec.node_pools.system_np.node_count diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index 336d18f89..2ff6595b3 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -6,13 +6,10 @@ variable "instance" { version = string spec = object({ cluster = object({ - kubernetes_version = string - cluster_endpoint_public_access = optional(bool, true) - cluster_endpoint_private_access = optional(bool, false) - cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"]) - cluster_endpoint_private_access_cidrs = optional(list(string), []) - cluster_enabled_log_types = optional(list(string), []) - sku_tier = optional(string, "Free") + kubernetes_version = string + cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"]) + cluster_enabled_log_types = optional(list(string), []) + sku_tier = optional(string, "Free") }) auto_upgrade_settings = object({ enable_auto_upgrade = optional(bool, true) diff --git a/secrets.json b/secrets.json new file mode 100644 index 000000000..bbcdb616d --- /dev/null +++ b/secrets.json @@ -0,0 +1,3 @@ +{ + "PROVIDED_TOKEN": "b55f9bb3-a295-42bf-959c-a75464632d50" +} \ No newline at end of file diff --git a/test.sh b/test.sh new file mode 100755 index 000000000..3c93d5bd9 --- /dev/null +++ b/test.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Array of directories to check +dirs=("./modules" "./modules/kubernetes_cluster/aws_eks/0.3" "modules/kubernetes_cluster/aws_eks/0.3/aws-terraform-eks") + +# Array to store directories containing facets.yaml +facets_dirs=() + +for dir in "${dirs[@]}"; do + current_dir="$dir" + while [[ "$current_dir" != "/" && "$current_dir" != "." ]]; do + if [[ -f "$current_dir/facets.yaml" ]] && ls $current_dir/*.tf &> /dev/null; then + # Check if the directory is already in the facets_dirs array + if ! [[ " ${facets_dirs[@]} " =~ " ${current_dir} " ]]; then + # Add the directory to the facets_dirs array + facets_dirs+=("$current_dir") + fi + break + else + # Move up to the parent directory + current_dir=$(dirname "$current_dir") + fi + done + if [[ "$current_dir" == "/" || "$current_dir" == "." ]]; then + echo "No facets.yaml along with terraform files found in $dir or any of its parent directories" + fi +done + +# Perform the curl command for each directory in facets_dirs +for dir in "${facets_dirs[@]}"; do + curl -s https://facets-cloud.github.io/facets-schemas/scripts/module_register.sh | bash -s -- -c "$URL" -u "$USERNAME" -t "$TOKEN" -p "$dir" -r "${GITHUB_REF}" +done \ No newline at end of file From 82b5a6623f1f4940fe6f387eece7fea5e5aac3c0 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 18:13:16 +0530 Subject: [PATCH 31/36] removed terraform lock file --- .../azure_aks/0.2/.terraform.lock.hcl | 102 ------------------ 1 file changed, 102 deletions(-) delete mode 100644 modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl diff --git a/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl b/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl deleted file mode 100644 index 8f6a2d044..000000000 --- a/modules/kubernetes_cluster/azure_aks/0.2/.terraform.lock.hcl +++ /dev/null @@ -1,102 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/azure/azapi" { - version = "2.5.0" - constraints = ">= 2.0.0, < 3.0.0" - hashes = [ - "h1:/jBZRd/dvuFm1CxS+WKHuXm4H++fkUHAvoZdlm4oScw=", - "zh:24a2ebfec4a79b7ef122e07afe6ddda51ce91b3dbed7007757b9c53511976396", - "zh:2a26868f8243666b6d0cd29ea5d1b1ecfedb1bbccc3801d383ab7a3f7930aa69", - "zh:4c3ced3ce1f937dc5bcea61f638eee9134570af6cbe7e4db7c60d9bb517da689", - "zh:52acef0e6d57ed6e98a4ae587449071c91069d59387912f69ec008dfb48fd3bd", - "zh:658bc2e92374ca941a79df0e343599a7e8819a0ff5a5f47957acbf4ee524c751", - "zh:68e66d8b5ceb581a1919782492b7caf57334ea07a982ee4c440d92bb5af2b0b8", - "zh:94779341444106af22b5ca823e371c97480b17d1765aee236c81b4b4e96bbaa4", - "zh:9bbddd9312a8e0819c2262d640a6852be64414347f068ffe4513c09b36b7cfe7", - "zh:9d0319cf08c0aebfc20f88fd63aec39361d7b7044bf47296d97543daa7727344", - "zh:b201c491da70270299e1cfc40e26b01290dbe9ee8e36e12fa8a6b63393092803", - "zh:d0593258943e546c8c241b12232ab1e39e1741aebd7a02e4abfe910424a1d36b", - "zh:d489a31c3d1ee18111a6238484d1636a483024fa43c19468394ec8ec214ef503", - ] -} - -provider "registry.terraform.io/hashicorp/azurerm" { - version = "4.38.1" - constraints = ">= 4.16.0, < 5.0.0" - hashes = [ - "h1:MV45L+OKw7+UrxI9SczAllqrAZjQomKKJbhSwIlkmYA=", - "zh:2d4085678cad85782b0097d2f1d03d96862deb3684b14a1125bd46b36091fd30", - "zh:3fe8037a4e94bc694caca4a68c0e15436dedc91b70aa95a06e2770e3e8dde6df", - "zh:4178b3783fca42ebac4435db3531bd23069723caf1509b0e915c524a4dee25d3", - "zh:61c6d21e854696c1c6d3fadce9aa9ab433e9a8791340760f9e8fdd1327d1a391", - "zh:8ef26b97aed168b7b91b868c1e4493a79cdbdc4ecb987f0e2a4e402ab6cb2474", - "zh:b4b1edfb49a36a109c69d661bb26b961fcdf50058690deed9d906c09254e5c1a", - "zh:b5e07b1c160cf0cefc2e4bc8d2b4c0a382dd76513797dc70b0c2fd3bee7b8495", - "zh:b87029f89e7d445c85ee7a8940f4a2740a745124802c461d1e51cd8b11d7c106", - "zh:c21d488f12aa6750f4525fc120b1405dd1a37f0b59586960e78beeb0e4fffcca", - "zh:cd1402d0e004e23c2ee36744fa26d4daafa291a05d5410b7beca6dc8c30857ba", - "zh:e8a7eb3d937d27e779ae426ac9f4529bdc7053634f219df8c76b2b8180fbed71", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/null" { - version = "3.2.4" - constraints = ">= 3.0.0" - hashes = [ - "h1:L5V05xwp/Gto1leRryuesxjMfgZwjb7oool4WS1UEFQ=", - "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", - "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", - "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", - "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", - "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", - "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", - "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", - "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", - "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", - "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", - ] -} - -provider "registry.terraform.io/hashicorp/time" { - version = "0.13.1" - constraints = ">= 0.5.0" - hashes = [ - "h1:ZT5ppCNIModqk3iOkVt5my8b8yBHmDpl663JtXAIRqM=", - "zh:02cb9aab1002f0f2a94a4f85acec8893297dc75915f7404c165983f720a54b74", - "zh:04429b2b31a492d19e5ecf999b116d396dac0b24bba0d0fb19ecaefe193fdb8f", - "zh:26f8e51bb7c275c404ba6028c1b530312066009194db721a8427a7bc5cdbc83a", - "zh:772ff8dbdbef968651ab3ae76d04afd355c32f8a868d03244db3f8496e462690", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:898db5d2b6bd6ca5457dccb52eedbc7c5b1a71e4a4658381bcbb38cedbbda328", - "zh:8de913bf09a3fa7bedc29fec18c47c571d0c7a3d0644322c46f3aa648cf30cd8", - "zh:9402102c86a87bdfe7e501ffbb9c685c32bbcefcfcf897fd7d53df414c36877b", - "zh:b18b9bb1726bb8cfbefc0a29cf3657c82578001f514bcf4c079839b6776c47f0", - "zh:b9d31fdc4faecb909d7c5ce41d2479dd0536862a963df434be4b16e8e4edc94d", - "zh:c951e9f39cca3446c060bd63933ebb89cedde9523904813973fbc3d11863ba75", - "zh:e5b773c0d07e962291be0e9b413c7a22c044b8c7b58c76e8aa91d1659990dfb5", - ] -} - -provider "registry.terraform.io/hashicorp/tls" { - version = "4.1.0" - constraints = ">= 3.1.0" - hashes = [ - "h1:zEv9tY1KR5vaLSyp2lkrucNJ+Vq3c+sTFK9GyQGLtFs=", - "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", - "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", - "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", - "zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc", - "zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac", - "zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882", - "zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d", - "zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298", - "zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297", - "zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54", - ] -} From cd17dfd940f3545824e4f651e8659cea04483902 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 18:35:31 +0530 Subject: [PATCH 32/36] maintenance days_of week fixes --- .../azure_aks/0.2/facets.yaml | 18 ++++++++--------- .../kubernetes_cluster/azure_aks/0.2/main.tf | 20 ++++++------------- .../azure_aks/0.2/variables.tf | 6 +++--- 3 files changed, 18 insertions(+), 26 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index f373a07f3..15702ae12 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -124,15 +124,15 @@ spec: type: string title: Day of Week description: Day of week for maintenance. - default: SUN + default: Sunday enum: - - SUN - - MON - - TUE - - WED - - THU - - FRI - - SAT + - Sunday + - Monday + - Tuesday + - Wednesday + - Thursday + - Friday + - Saturday x-ui-visible-if: field: spec.auto_upgrade_settings.maintenance_window.is_disabled values: @@ -302,7 +302,7 @@ sample: max_surge: '1' maintenance_window: is_disabled: true - day_of_week: SUN + day_of_week: Sunday start_time: 2 end_time: 6 node_pools: diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index b5d6b8cd9..adc7136ab 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -64,20 +64,12 @@ module "k8scluster" { # Maintenance window configuration maintenance_window_auto_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && !var.instance.spec.auto_upgrade_settings.maintenance_window.is_disabled ? { - frequency = "Weekly" - interval = 1 - duration = var.instance.spec.auto_upgrade_settings.maintenance_window.end_time - var.instance.spec.auto_upgrade_settings.maintenance_window.start_time - day_of_week = lookup({ - "SUN" = "Sunday" - "MON" = "Monday" - "TUE" = "Tuesday" - "WED" = "Wednesday" - "THU" = "Thursday" - "FRI" = "Friday" - "SAT" = "Saturday" - }, var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week, "Sunday") - start_time = format("%02d:00", var.instance.spec.auto_upgrade_settings.maintenance_window.start_time) - utc_offset = "+00:00" + frequency = "Weekly" + interval = 1 + duration = var.instance.spec.auto_upgrade_settings.maintenance_window.end_time - var.instance.spec.auto_upgrade_settings.maintenance_window.start_time + day_of_week = var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week + start_time = format("%02d:00", var.instance.spec.auto_upgrade_settings.maintenance_window.start_time) + utc_offset = "+00:00" } : null # Node surge configuration for upgrades diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index 2ff6595b3..b6e460aed 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -17,7 +17,7 @@ variable "instance" { max_surge = optional(string, "1") maintenance_window = object({ is_disabled = optional(bool, true) - day_of_week = optional(string, "SUN") + day_of_week = optional(string, "Sunday") start_time = optional(number, 2) end_time = optional(number, 6) }) @@ -70,9 +70,9 @@ variable "instance" { validation { condition = contains([ - "SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT" + "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ], var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week) - error_message = "Maintenance window day_of_week must be one of: SUN, MON, TUE, WED, THU, FRI, SAT." + error_message = "Maintenance window day_of_week must be one of: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday." } validation { From cf69ef209946632bd03965e7f7680a71547a733f Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 18:41:36 +0530 Subject: [PATCH 33/36] made maintenance_window flag is_enabled --- .../azure_aks/0.2/facets.yaml | 20 +++++++++---------- .../kubernetes_cluster/azure_aks/0.2/main.tf | 2 +- .../azure_aks/0.2/variables.tf | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml index 15702ae12..b3b8d911a 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml +++ b/modules/kubernetes_cluster/azure_aks/0.2/facets.yaml @@ -115,10 +115,10 @@ spec: values: - true properties: - is_disabled: + is_enabled: type: boolean - title: Disable Maintenance Window - description: Disable maintenance window (allow upgrades anytime). + title: Enable Maintenance Window + description: Enable maintenance window for scheduled upgrades. default: true day_of_week: type: string @@ -134,9 +134,9 @@ spec: - Friday - Saturday x-ui-visible-if: - field: spec.auto_upgrade_settings.maintenance_window.is_disabled + field: spec.auto_upgrade_settings.maintenance_window.is_enabled values: - - false + - true start_time: type: integer title: Start Time @@ -145,9 +145,9 @@ spec: minimum: 0 maximum: 23 x-ui-visible-if: - field: spec.auto_upgrade_settings.maintenance_window.is_disabled + field: spec.auto_upgrade_settings.maintenance_window.is_enabled values: - - false + - true end_time: type: integer title: End Time @@ -156,9 +156,9 @@ spec: minimum: 0 maximum: 23 x-ui-visible-if: - field: spec.auto_upgrade_settings.maintenance_window.is_disabled + field: spec.auto_upgrade_settings.maintenance_window.is_enabled values: - - false + - true tags: type: object title: Tags @@ -301,7 +301,7 @@ sample: automatic_channel_upgrade: stable max_surge: '1' maintenance_window: - is_disabled: true + is_enabled: true day_of_week: Sunday start_time: 2 end_time: 6 diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf index adc7136ab..5a5c322ac 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/main.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf @@ -63,7 +63,7 @@ module "k8scluster" { automatic_channel_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade ? var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade : null # Maintenance window configuration - maintenance_window_auto_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && !var.instance.spec.auto_upgrade_settings.maintenance_window.is_disabled ? { + maintenance_window_auto_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && var.instance.spec.auto_upgrade_settings.maintenance_window.is_enabled ? { frequency = "Weekly" interval = 1 duration = var.instance.spec.auto_upgrade_settings.maintenance_window.end_time - var.instance.spec.auto_upgrade_settings.maintenance_window.start_time diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index b6e460aed..018c46777 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -16,7 +16,7 @@ variable "instance" { automatic_channel_upgrade = optional(string, "stable") max_surge = optional(string, "1") maintenance_window = object({ - is_disabled = optional(bool, true) + is_enabled = optional(bool, true) day_of_week = optional(string, "Sunday") start_time = optional(number, 2) end_time = optional(number, 6) From f97afa33ff3afbf53610f877131b5b95f54a8528 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 5 Aug 2025 18:56:46 +0530 Subject: [PATCH 34/36] made kubernetes version optional field --- modules/kubernetes_cluster/azure_aks/0.2/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf index 018c46777..7bcf9562a 100644 --- a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf +++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf @@ -6,7 +6,7 @@ variable "instance" { version = string spec = object({ cluster = object({ - kubernetes_version = string + kubernetes_version = optional(string, null) cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"]) cluster_enabled_log_types = optional(list(string), []) sku_tier = optional(string, "Free") From b81e8df7531ab45983d6ecaf4efd23a1e7536cef Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Wed, 6 Aug 2025 12:43:08 +0530 Subject: [PATCH 35/36] removed innersourcing changes --- .github/workflows/innersourcing/control_planes.json | 8 -------- .github/workflows/innersourcing/secrets.json | 3 --- 2 files changed, 11 deletions(-) delete mode 100644 .github/workflows/innersourcing/control_planes.json delete mode 100644 .github/workflows/innersourcing/secrets.json diff --git a/.github/workflows/innersourcing/control_planes.json b/.github/workflows/innersourcing/control_planes.json deleted file mode 100644 index d432e518b..000000000 --- a/.github/workflows/innersourcing/control_planes.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "facetsdemo": { - "Name": "facetsdemo", - "URL": "https://facetsdemo.console.facets.cloud", - "Username": "ishaan.kalra@facets.cloud", - "TokenRef": "FACETSDEMO_TOKEN" - } -} \ No newline at end of file diff --git a/.github/workflows/innersourcing/secrets.json b/.github/workflows/innersourcing/secrets.json deleted file mode 100644 index d1893d752..000000000 --- a/.github/workflows/innersourcing/secrets.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "FACETSDEMO_TOKEN": "f84ac169-4ce5-45d9-86c0-5c2e1fa473b5" -} \ No newline at end of file From 02e222adf9e4c9e98347c018ef7f6b325f6f7bd8 Mon Sep 17 00:00:00 2001 From: ishaankalra Date: Tue, 12 Aug 2025 13:29:24 +0530 Subject: [PATCH 36/36] removed unnecessary files --- control_planes.json | 8 -------- secrets.json | 3 --- test.sh | 32 -------------------------------- 3 files changed, 43 deletions(-) delete mode 100644 control_planes.json delete mode 100644 secrets.json delete mode 100755 test.sh diff --git a/control_planes.json b/control_planes.json deleted file mode 100644 index 7edcc50ed..000000000 --- a/control_planes.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "provided": { - "Name": "provided", - "URL": "https://facetsdemo.console.facets.cloud", - "Username": "***", - "TokenRef": "PROVIDED_TOKEN" - } -} \ No newline at end of file diff --git a/secrets.json b/secrets.json deleted file mode 100644 index bbcdb616d..000000000 --- a/secrets.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "PROVIDED_TOKEN": "b55f9bb3-a295-42bf-959c-a75464632d50" -} \ No newline at end of file diff --git a/test.sh b/test.sh deleted file mode 100755 index 3c93d5bd9..000000000 --- a/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Array of directories to check -dirs=("./modules" "./modules/kubernetes_cluster/aws_eks/0.3" "modules/kubernetes_cluster/aws_eks/0.3/aws-terraform-eks") - -# Array to store directories containing facets.yaml -facets_dirs=() - -for dir in "${dirs[@]}"; do - current_dir="$dir" - while [[ "$current_dir" != "/" && "$current_dir" != "." ]]; do - if [[ -f "$current_dir/facets.yaml" ]] && ls $current_dir/*.tf &> /dev/null; then - # Check if the directory is already in the facets_dirs array - if ! [[ " ${facets_dirs[@]} " =~ " ${current_dir} " ]]; then - # Add the directory to the facets_dirs array - facets_dirs+=("$current_dir") - fi - break - else - # Move up to the parent directory - current_dir=$(dirname "$current_dir") - fi - done - if [[ "$current_dir" == "/" || "$current_dir" == "." ]]; then - echo "No facets.yaml along with terraform files found in $dir or any of its parent directories" - fi -done - -# Perform the curl command for each directory in facets_dirs -for dir in "${facets_dirs[@]}"; do - curl -s https://facets-cloud.github.io/facets-schemas/scripts/module_register.sh | bash -s -- -c "$URL" -u "$USERNAME" -t "$TOKEN" -p "$dir" -r "${GITHUB_REF}" -done \ No newline at end of file