" -e ARM_SUBSCRIPTION_ID -e ARM_TENANT_ID -e ARM_CLIENT_ID -e ARM_CLIENT_SECRET mcr.microsoft.com/azterraform:latest make e2e-test
+```
+
+#### Prerequisites
+
+- [Docker](https://www.docker.com/community-edition#/download)
+
+## Authors
+
+Originally created by [Damien Caro](http://github.com/dcaro) and [Malte Lantin](http://github.com/n01d)
+
+## License
+
+[MIT](LICENSE)
+
+# Contributing
+
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit https://cla.microsoft.com.
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## Module Spec
+
+The following sections are generated by [terraform-docs](https://github.com/terraform-docs/terraform-docs) and [markdown-table-formatter](https://github.com/nvuillam/markdown-table-formatter), please **DO NOT MODIFY THEM MANUALLY!**
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.3 |
+| [azapi](#requirement\_azapi) | >=2.0, < 3.0 |
+| [azurerm](#requirement\_azurerm) | >= 3.107.0, < 4.0 |
+| [null](#requirement\_null) | >= 3.0 |
+| [time](#requirement\_time) | >= 0.5 |
+| [tls](#requirement\_tls) | >= 3.1 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [azapi](#provider\_azapi) | >=2.0, < 3.0 |
+| [azurerm](#provider\_azurerm) | >= 3.107.0, < 4.0 |
+| [null](#provider\_null) | >= 3.0 |
+| [time](#provider\_time) | >= 0.5 |
+| [tls](#provider\_tls) | >= 3.1 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [azapi_update_resource.aks_cluster_http_proxy_config_no_proxy](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource |
+| [azapi_update_resource.aks_cluster_post_create](https://registry.terraform.io/providers/Azure/azapi/latest/docs/resources/update_resource) | resource |
+| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource |
+| [azurerm_kubernetes_cluster_node_pool.node_pool_create_after_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource |
+| [azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource |
+| [azurerm_log_analytics_solution.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_solution) | resource |
+| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/log_analytics_workspace) | resource |
+| [azurerm_monitor_data_collection_rule.dcr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule) | resource |
+| [azurerm_monitor_data_collection_rule_association.dcra](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_data_collection_rule_association) | resource |
+| [azurerm_role_assignment.acr](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.application_gateway_byo_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.application_gateway_existing_vnet_network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.application_gateway_resource_group_reader](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.existing_application_gateway_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.network_contributor](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [azurerm_role_assignment.network_contributor_on_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/role_assignment) | resource |
+| [null_resource.http_proxy_config_no_proxy_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [null_resource.kubernetes_cluster_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [null_resource.kubernetes_version_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [null_resource.pool_name_keeper](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [time_sleep.interval_before_cluster_update](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource |
+| [tls_private_key.ssh](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
+| [azurerm_client_config.this](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/client_config) | data source |
+| [azurerm_log_analytics_workspace.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/log_analytics_workspace) | data source |
+| [azurerm_resource_group.aks_rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source |
+| [azurerm_resource_group.ingress_gw](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source |
+| [azurerm_user_assigned_identity.cluster_identity](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/user_assigned_identity) | data source |
+| [azurerm_virtual_network.application_gateway_vnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/virtual_network) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [aci\_connector\_linux\_enabled](#input\_aci\_connector\_linux\_enabled) | Enable Virtual Node pool | `bool` | `false` | no |
+| [aci\_connector\_linux\_subnet\_name](#input\_aci\_connector\_linux\_subnet\_name) | (Optional) aci\_connector\_linux subnet name | `string` | `null` | no |
+| [admin\_username](#input\_admin\_username) | The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [agents\_availability\_zones](#input\_agents\_availability\_zones) | (Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created. | `list(string)` | `null` | no |
+| [agents\_count](#input\_agents\_count) | The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes. | `number` | `2` | no |
+| [agents\_labels](#input\_agents\_labels) | (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. | `map(string)` | `{}` | no |
+| [agents\_max\_count](#input\_agents\_max\_count) | Maximum number of nodes in a pool | `number` | `null` | no |
+| [agents\_max\_pods](#input\_agents\_max\_pods) | (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. | `number` | `null` | no |
+| [agents\_min\_count](#input\_agents\_min\_count) | Minimum number of nodes in a pool | `number` | `null` | no |
+| [agents\_pool\_drain\_timeout\_in\_minutes](#input\_agents\_pool\_drain\_timeout\_in\_minutes) | (Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created. | `number` | `null` | no |
+| [agents\_pool\_kubelet\_configs](#input\_agents\_pool\_kubelet\_configs) | list(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
})) | list(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool, true)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_line = optional(number)
pod_max_pid = optional(number)
})) | `[]` | no |
+| [agents\_pool\_linux\_os\_configs](#input\_agents\_pool\_linux\_os\_configs) | list(object({
sysctl\_configs = optional(list(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) The sysctl setting net.ipv4.tcp\_tw\_reuse. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
})), [])
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
})) | list(object({
sysctl_configs = optional(list(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
})), [])
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
})) | `[]` | no |
+| [agents\_pool\_max\_surge](#input\_agents\_pool\_max\_surge) | The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade. | `string` | `"10%"` | no |
+| [agents\_pool\_name](#input\_agents\_pool\_name) | The default Azure AKS agentpool (nodepool) name. | `string` | `"nodepool"` | no |
+| [agents\_pool\_node\_soak\_duration\_in\_minutes](#input\_agents\_pool\_node\_soak\_duration\_in\_minutes) | (Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0. | `number` | `0` | no |
+| [agents\_proximity\_placement\_group\_id](#input\_agents\_proximity\_placement\_group\_id) | (Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created. | `string` | `null` | no |
+| [agents\_size](#input\_agents\_size) | The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created. | `string` | `"Standard_D2s_v3"` | no |
+| [agents\_tags](#input\_agents\_tags) | (Optional) A mapping of tags to assign to the Node Pool. | `map(string)` | `{}` | no |
+| [agents\_type](#input\_agents\_type) | (Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets. | `string` | `"VirtualMachineScaleSets"` | no |
+| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | (Optional) The IP ranges to allow for incoming traffic to the server nodes. | `set(string)` | `null` | no |
+| [attached\_acr\_id\_map](#input\_attached\_acr\_id\_map) | Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created. | `map(string)` | `{}` | no |
+| [auto\_scaler\_profile\_balance\_similar\_node\_groups](#input\_auto\_scaler\_profile\_balance\_similar\_node\_groups) | Detect similar node groups and balance the number of nodes between them. Defaults to `false`. | `bool` | `false` | no |
+| [auto\_scaler\_profile\_empty\_bulk\_delete\_max](#input\_auto\_scaler\_profile\_empty\_bulk\_delete\_max) | Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`. | `number` | `10` | no |
+| [auto\_scaler\_profile\_enabled](#input\_auto\_scaler\_profile\_enabled) | Enable configuring the auto scaler profile | `bool` | `false` | no |
+| [auto\_scaler\_profile\_expander](#input\_auto\_scaler\_profile\_expander) | Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`. | `string` | `"random"` | no |
+| [auto\_scaler\_profile\_max\_graceful\_termination\_sec](#input\_auto\_scaler\_profile\_max\_graceful\_termination\_sec) | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. | `string` | `"600"` | no |
+| [auto\_scaler\_profile\_max\_node\_provisioning\_time](#input\_auto\_scaler\_profile\_max\_node\_provisioning\_time) | Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`. | `string` | `"15m"` | no |
+| [auto\_scaler\_profile\_max\_unready\_nodes](#input\_auto\_scaler\_profile\_max\_unready\_nodes) | Maximum Number of allowed unready nodes. Defaults to `3`. | `number` | `3` | no |
+| [auto\_scaler\_profile\_max\_unready\_percentage](#input\_auto\_scaler\_profile\_max\_unready\_percentage) | Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`. | `number` | `45` | no |
+| [auto\_scaler\_profile\_new\_pod\_scale\_up\_delay](#input\_auto\_scaler\_profile\_new\_pod\_scale\_up\_delay) | For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. | `string` | `"10s"` | no |
+| [auto\_scaler\_profile\_scale\_down\_delay\_after\_add](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_add) | How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. | `string` | `"10m"` | no |
+| [auto\_scaler\_profile\_scale\_down\_delay\_after\_delete](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_delete) | How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. | `string` | `null` | no |
+| [auto\_scaler\_profile\_scale\_down\_delay\_after\_failure](#input\_auto\_scaler\_profile\_scale\_down\_delay\_after\_failure) | How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. | `string` | `"3m"` | no |
+| [auto\_scaler\_profile\_scale\_down\_unneeded](#input\_auto\_scaler\_profile\_scale\_down\_unneeded) | How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. | `string` | `"10m"` | no |
+| [auto\_scaler\_profile\_scale\_down\_unready](#input\_auto\_scaler\_profile\_scale\_down\_unready) | How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. | `string` | `"20m"` | no |
+| [auto\_scaler\_profile\_scale\_down\_utilization\_threshold](#input\_auto\_scaler\_profile\_scale\_down\_utilization\_threshold) | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. | `string` | `"0.5"` | no |
+| [auto\_scaler\_profile\_scan\_interval](#input\_auto\_scaler\_profile\_scan\_interval) | How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. | `string` | `"10s"` | no |
+| [auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_local\_storage) | If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`. | `bool` | `true` | no |
+| [auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods](#input\_auto\_scaler\_profile\_skip\_nodes\_with\_system\_pods) | If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`. | `bool` | `true` | no |
+| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | (Optional) Defines the automatic upgrade channel for the AKS cluster.
Possible values:
* `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
* `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**
By default, automatic upgrades are disabled.
More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster | `string` | `null` | no |
+| [azure\_policy\_enabled](#input\_azure\_policy\_enabled) | Enable Azure Policy Addon. | `bool` | `false` | no |
+| [brown\_field\_application\_gateway\_for\_ingress](#input\_brown\_field\_application\_gateway\_for\_ingress) | [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
* `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
* `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`. | object({
id = string
subnet_id = string
}) | `null` | no |
+| [client\_id](#input\_client\_id) | (Optional) The Client ID (appId) for the Service Principal used for the AKS deployment | `string` | `""` | no |
+| [client\_secret](#input\_client\_secret) | (Optional) The Client Secret (password) for the Service Principal used for the AKS deployment | `string` | `""` | no |
+| [cluster\_log\_analytics\_workspace\_name](#input\_cluster\_log\_analytics\_workspace\_name) | (Optional) The name of the Analytics workspace | `string` | `null` | no |
+| [cluster\_name](#input\_cluster\_name) | (Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns\_prefix if it is set) | `string` | `null` | no |
+| [cluster\_name\_random\_suffix](#input\_cluster\_name\_random\_suffix) | Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict. | `bool` | `false` | no |
+| [confidential\_computing](#input\_confidential\_computing) | (Optional) Enable Confidential Computing. | object({
sgx_quote_helper_enabled = bool
}) | `null` | no |
+| [cost\_analysis\_enabled](#input\_cost\_analysis\_enabled) | (Optional) Enable Cost Analysis. | `bool` | `false` | no |
+| [create\_monitor\_data\_collection\_rule](#input\_create\_monitor\_data\_collection\_rule) | Create monitor data collection rule resource for the AKS cluster. Defaults to `true`. | `bool` | `true` | no |
+| [create\_role\_assignment\_network\_contributor](#input\_create\_role\_assignment\_network\_contributor) | (Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster | `bool` | `false` | no |
+| [create\_role\_assignments\_for\_application\_gateway](#input\_create\_role\_assignments\_for\_application\_gateway) | (Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`. | `bool` | `true` | no |
+| [data\_collection\_settings](#input\_data\_collection\_settings) | `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
`namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
`namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
`container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1 | object({
data_collection_interval = string
namespace_filtering_mode_for_data_collection = string
namespaces_for_data_collection = list(string)
container_log_v2_enabled = bool
}) | {
"container_log_v2_enabled": true,
"data_collection_interval": "1m",
"namespace_filtering_mode_for_data_collection": "Off",
"namespaces_for_data_collection": [
"kube-system",
"gatekeeper-system",
"azure-arc"
]
} | no |
+| [default\_node\_pool\_fips\_enabled](#input\_default\_node\_pool\_fips\_enabled) | (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. | `bool` | `null` | no |
+| [disk\_encryption\_set\_id](#input\_disk\_encryption\_set\_id) | (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created. | `string` | `null` | no |
+| [dns\_prefix\_private\_cluster](#input\_dns\_prefix\_private\_cluster) | (Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [ebpf\_data\_plane](#input\_ebpf\_data\_plane) | (Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [enable\_auto\_scaling](#input\_enable\_auto\_scaling) | Enable node pool autoscaling | `bool` | `false` | no |
+| [enable\_host\_encryption](#input\_enable\_host\_encryption) | Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli | `bool` | `false` | no |
+| [enable\_node\_public\_ip](#input\_enable\_node\_public\_ip) | (Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false. | `bool` | `false` | no |
+| [green\_field\_application\_gateway\_for\_ingress](#input\_green\_field\_application\_gateway\_for\_ingress) | [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
* `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
* `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster. | object({
name = optional(string)
subnet_cidr = optional(string)
subnet_id = optional(string)
}) | `null` | no |
+| [http\_proxy\_config](#input\_http\_proxy\_config) | optional(object({
http\_proxy = (Optional) The proxy address to be used when communicating over HTTP.
https\_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
no\_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
trusted\_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
}))
Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift. | object({
http_proxy = optional(string)
https_proxy = optional(string)
no_proxy = optional(list(string))
trusted_ca = optional(string)
}) | `null` | no |
+| [identity\_ids](#input\_identity\_ids) | (Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster. | `list(string)` | `null` | no |
+| [identity\_type](#input\_identity\_type) | (Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well. | `string` | `"SystemAssigned"` | no |
+| [image\_cleaner\_enabled](#input\_image\_cleaner\_enabled) | (Optional) Specifies whether Image Cleaner is enabled. | `bool` | `false` | no |
+| [image\_cleaner\_interval\_hours](#input\_image\_cleaner\_interval\_hours) | (Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`. | `number` | `48` | no |
+| [interval\_before\_cluster\_update](#input\_interval\_before\_cluster\_update) | Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update. | `string` | `"30s"` | no |
+| [key\_vault\_secrets\_provider\_enabled](#input\_key\_vault\_secrets\_provider\_enabled) | (Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver | `bool` | `false` | no |
+| [kms\_enabled](#input\_kms\_enabled) | (Optional) Enable Azure KeyVault Key Management Service. | `bool` | `false` | no |
+| [kms\_key\_vault\_key\_id](#input\_kms\_key\_vault\_key\_id) | (Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier. | `string` | `null` | no |
+| [kms\_key\_vault\_network\_access](#input\_kms\_key\_vault\_network\_access) | (Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`. | `string` | `"Public"` | no |
+| [kubelet\_identity](#input\_kubelet\_identity) | - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
- `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created. | object({
client_id = optional(string)
object_id = optional(string)
user_assigned_identity_id = optional(string)
}) | `null` | no |
+| [kubernetes\_version](#input\_kubernetes\_version) | Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region | `string` | `null` | no |
+| [load\_balancer\_profile\_enabled](#input\_load\_balancer\_profile\_enabled) | (Optional) Enable a load\_balancer\_profile block. This can only be used when load\_balancer\_sku is set to `standard`. | `bool` | `false` | no |
+| [load\_balancer\_profile\_idle\_timeout\_in\_minutes](#input\_load\_balancer\_profile\_idle\_timeout\_in\_minutes) | (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. | `number` | `30` | no |
+| [load\_balancer\_profile\_managed\_outbound\_ip\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ip\_count) | (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive | `number` | `null` | no |
+| [load\_balancer\_profile\_managed\_outbound\_ipv6\_count](#input\_load\_balancer\_profile\_managed\_outbound\_ipv6\_count) | (Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed\_outbound\_ipv6\_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature | `number` | `null` | no |
+| [load\_balancer\_profile\_outbound\_ip\_address\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_address\_ids) | (Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer. | `set(string)` | `null` | no |
+| [load\_balancer\_profile\_outbound\_ip\_prefix\_ids](#input\_load\_balancer\_profile\_outbound\_ip\_prefix\_ids) | (Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer. | `set(string)` | `null` | no |
+| [load\_balancer\_profile\_outbound\_ports\_allocated](#input\_load\_balancer\_profile\_outbound\_ports\_allocated) | (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0` | `number` | `0` | no |
+| [load\_balancer\_sku](#input\_load\_balancer\_sku) | (Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created. | `string` | `"standard"` | no |
+| [local\_account\_disabled](#input\_local\_account\_disabled) | (Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information. | `bool` | `null` | no |
+| [location](#input\_location) | Location of cluster, if not defined it will be read from the resource-group | `string` | n/a | yes |
+| [log\_analytics\_solution](#input\_log\_analytics\_solution) | (Optional) Object which contains existing azurerm\_log\_analytics\_solution ID. Providing ID disables creation of azurerm\_log\_analytics\_solution. | object({
id = string
}) | `null` | no |
+| [log\_analytics\_workspace](#input\_log\_analytics\_workspace) | (Optional) Existing azurerm\_log\_analytics\_workspace to attach azurerm\_log\_analytics\_solution. Providing the config disables creation of azurerm\_log\_analytics\_workspace. | object({
id = string
name = string
location = optional(string)
resource_group_name = optional(string)
}) | `null` | no |
+| [log\_analytics\_workspace\_allow\_resource\_only\_permissions](#input\_log\_analytics\_workspace\_allow\_resource\_only\_permissions) | (Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`. | `bool` | `null` | no |
+| [log\_analytics\_workspace\_cmk\_for\_query\_forced](#input\_log\_analytics\_workspace\_cmk\_for\_query\_forced) | (Optional) Is Customer Managed Storage mandatory for query management? | `bool` | `null` | no |
+| [log\_analytics\_workspace\_daily\_quota\_gb](#input\_log\_analytics\_workspace\_daily\_quota\_gb) | (Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted. | `number` | `null` | no |
+| [log\_analytics\_workspace\_data\_collection\_rule\_id](#input\_log\_analytics\_workspace\_data\_collection\_rule\_id) | (Optional) The ID of the Data Collection Rule to use for this workspace. | `string` | `null` | no |
+| [log\_analytics\_workspace\_enabled](#input\_log\_analytics\_workspace\_enabled) | Enable the integration of azurerm\_log\_analytics\_workspace and azurerm\_log\_analytics\_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard | `bool` | `true` | no |
+| [log\_analytics\_workspace\_identity](#input\_log\_analytics\_workspace\_identity) | - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
- `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field. | object({
identity_ids = optional(set(string))
type = string
}) | `null` | no |
+| [log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled](#input\_log\_analytics\_workspace\_immediate\_data\_purge\_on\_30\_days\_enabled) | (Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days. | `bool` | `null` | no |
+| [log\_analytics\_workspace\_internet\_ingestion\_enabled](#input\_log\_analytics\_workspace\_internet\_ingestion\_enabled) | (Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`. | `bool` | `null` | no |
+| [log\_analytics\_workspace\_internet\_query\_enabled](#input\_log\_analytics\_workspace\_internet\_query\_enabled) | (Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`. | `bool` | `null` | no |
+| [log\_analytics\_workspace\_local\_authentication\_disabled](#input\_log\_analytics\_workspace\_local\_authentication\_disabled) | (Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`. | `bool` | `null` | no |
+| [log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day](#input\_log\_analytics\_workspace\_reservation\_capacity\_in\_gb\_per\_day) | (Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`. | `number` | `null` | no |
+| [log\_analytics\_workspace\_resource\_group\_name](#input\_log\_analytics\_workspace\_resource\_group\_name) | (Optional) Resource group name to create azurerm\_log\_analytics\_solution. | `string` | `null` | no |
+| [log\_analytics\_workspace\_sku](#input\_log\_analytics\_workspace\_sku) | The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018 | `string` | `"PerGB2018"` | no |
+| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period for the logs in days | `number` | `30` | no |
+| [maintenance\_window](#input\_maintenance\_window) | (Optional) Maintenance configuration of the managed cluster. | object({
allowed = optional(list(object({
day = string
hours = set(number)
})), [
]),
not_allowed = optional(list(object({
end = string
start = string
})), []),
}) | `null` | no |
+| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.
---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. | object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
}) | `null` | no |
+| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` -
- `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
- `duration` - (Required) The duration of the window for maintenance to run in hours.
- `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
- `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
- `start_date` - (Optional) The date on which the maintenance window begins to take effect.
- `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
- `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
- `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.
---
`not_allowed` block supports the following:
- `end` - (Required) The end of a time span, formatted as an RFC3339 string.
- `start` - (Required) The start of a time span, formatted as an RFC3339 string. | object({
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(set(object({
end = string
start = string
})))
}) | `null` | no |
+| [microsoft\_defender\_enabled](#input\_microsoft\_defender\_enabled) | (Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`. | `bool` | `false` | no |
+| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_facilities) | Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog | `list(string)` | [
"auth",
"authpriv",
"cron",
"daemon",
"mark",
"kern",
"local0",
"local1",
"local2",
"local3",
"local4",
"local5",
"local6",
"local7",
"lpr",
"mail",
"news",
"syslog",
"user",
"uucp"
]
| no |
+| [monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels](#input\_monitor\_data\_collection\_rule\_data\_sources\_syslog\_levels) | List of syslog levels | `list(string)` | [
"Debug",
"Info",
"Notice",
"Warning",
"Error",
"Critical",
"Alert",
"Emergency"
]
| no |
+| [monitor\_data\_collection\_rule\_extensions\_streams](#input\_monitor\_data\_collection\_rule\_extensions\_streams) | An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr | `list(any)` | [
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-KubeEvents",
"Microsoft-KubePodInventory",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-KubeMonAgentEvents",
"Microsoft-InsightsMetrics",
"Microsoft-ContainerInventory",
"Microsoft-ContainerNodeInventory",
"Microsoft-Perf"
]
| no |
+| [monitor\_metrics](#input\_monitor\_metrics) | (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
object({
annotations\_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
labels\_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
}) | object({
annotations_allowed = optional(string)
labels_allowed = optional(string)
}) | `null` | no |
+| [msi\_auth\_for\_monitoring\_enabled](#input\_msi\_auth\_for\_monitoring\_enabled) | (Optional) Is managed identity authentication for monitoring enabled? | `bool` | `null` | no |
+| [nat\_gateway\_profile](#input\_nat\_gateway\_profile) | `nat_gateway_profile` block supports the following:
- `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
- `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive. | object({
idle_timeout_in_minutes = optional(number)
managed_outbound_ip_count = optional(number)
}) | `null` | no |
+| [net\_profile\_dns\_service\_ip](#input\_net\_profile\_dns\_service\_ip) | (Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created. | `string` | `null` | no |
+| [net\_profile\_outbound\_type](#input\_net\_profile\_outbound\_type) | (Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer. | `string` | `"loadBalancer"` | no |
+| [net\_profile\_pod\_cidr](#input\_net\_profile\_pod\_cidr) | (Optional) The CIDR to use for pod IP addresses. This field can only be set when network\_plugin is set to kubenet or network\_plugin is set to azure and network\_plugin\_mode is set to overlay. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [net\_profile\_pod\_cidrs](#input\_net\_profile\_pod\_cidrs) | (Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no |
+| [net\_profile\_service\_cidr](#input\_net\_profile\_service\_cidr) | (Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [net\_profile\_service\_cidrs](#input\_net\_profile\_service\_cidrs) | (Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created. | `list(string)` | `null` | no |
+| [network\_contributor\_role\_assigned\_subnet\_ids](#input\_network\_contributor\_role\_assigned\_subnet\_ids) | Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id | `map(string)` | `{}` | no |
+| [network\_data\_plane](#input\_network\_data\_plane) | (Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created. | `string` | `null` | no |
+| [network\_ip\_versions](#input\_network\_ip\_versions) | (Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created. | `list(string)` | `null` | no |
+| [network\_mode](#input\_network\_mode) | (Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [network\_plugin](#input\_network\_plugin) | Network plugin to use for networking. | `string` | `"kubenet"` | no |
+| [network\_plugin\_mode](#input\_network\_plugin\_mode) | (Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [network\_policy](#input\_network\_policy) | (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [node\_network\_profile](#input\_node\_network\_profile) | - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
- `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
---
An `allowed_host_ports` block supports the following:
- `port_start`: (Optional) Specifies the start of the port range.
- `port_end`: (Optional) Specifies the end of the port range.
- `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`. | object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}) | `null` | no |
+| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`. | `string` | `null` | no |
+| [node\_pools](#input\_node\_pools) | A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
map(object({
name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
node\_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
vm\_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
host\_group\_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
capacity\_reservation\_group\_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
custom\_ca\_trust\_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
enable\_auto\_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
enable\_host\_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
enable\_node\_public\_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
eviction\_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
gpu\_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
kubelet\_config = optional(object({
cpu\_manager\_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
cpu\_cfs\_quota\_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
cpu\_cfs\_quota\_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
image\_gc\_high\_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
image\_gc\_low\_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
topology\_manager\_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
allowed\_unsafe\_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
container\_log\_max\_size\_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
container\_log\_max\_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
pod\_max\_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
}))
linux\_os\_config = optional(object({
sysctl\_config = optional(object({
fs\_aio\_max\_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
fs\_file\_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
fs\_inotify\_max\_user\_watches = (Optional) The sysctl setting fs.inotify.max\_user\_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
fs\_nr\_open = (Optional) The sysctl setting fs.nr\_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
kernel\_threads\_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
net\_core\_netdev\_max\_backlog = (Optional) The sysctl setting net.core.netdev\_max\_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
net\_core\_optmem\_max = (Optional) The sysctl setting net.core.optmem\_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
net\_core\_rmem\_default = (Optional) The sysctl setting net.core.rmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_rmem\_max = (Optional) The sysctl setting net.core.rmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
net\_core\_wmem\_default = (Optional) The sysctl setting net.core.wmem\_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_core\_wmem\_max = (Optional) The sysctl setting net.core.wmem\_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_min = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_ip\_local\_port\_range\_max = (Optional) The sysctl setting net.ipv4.ip\_local\_port\_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
net\_ipv4\_neigh\_default\_gc\_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc\_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_fin\_timeout = (Optional) The sysctl setting net.ipv4.tcp\_fin\_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_intvl = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_probes = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_keepalive\_time = (Optional) The sysctl setting net.ipv4.tcp\_keepalive\_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_syn\_backlog = (Optional) The sysctl setting net.ipv4.tcp\_max\_syn\_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_max\_tw\_buckets = (Optional) The sysctl setting net.ipv4.tcp\_max\_tw\_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
net\_ipv4\_tcp\_tw\_reuse = (Optional) Is sysctl setting net.ipv4.tcp\_tw\_reuse enabled? Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_buckets = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
net\_netfilter\_nf\_conntrack\_max = (Optional) The sysctl setting net.netfilter.nf\_conntrack\_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
vm\_max\_map\_count = (Optional) The sysctl setting vm.max\_map\_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
vm\_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
vm\_vfs\_cache\_pressure = (Optional) The sysctl setting vm.vfs\_cache\_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
}))
transparent\_huge\_page\_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
transparent\_huge\_page\_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
swap\_file\_size\_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
}))
fips\_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
kubelet\_disk\_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
max\_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
max\_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
message\_of\_the\_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
min\_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
node\_network\_profile = optional(object({
node\_public\_ip\_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
application\_security\_group\_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
allowed\_host\_ports = optional(object({
port\_start = (Optional) Specifies the start of the port range.
port\_end = (Optional) Specifies the end of the port range.
protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
}))
}))
node\_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
node\_public\_ip\_prefix\_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
node\_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
orchestrator\_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
os\_disk\_size\_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
os\_disk\_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
os\_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
os\_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
pod\_subnet = optional(object({
id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
}))
priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
proximity\_placement\_group\_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
spot\_max\_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
scale\_down\_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
snapshot\_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
ultra\_ssd\_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
vnet\_subnet = optional(object({
id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
}))
upgrade\_settings = optional(object({
drain\_timeout\_in\_minutes = number
node\_soak\_duration\_in\_minutes = number
max\_surge = string
}))
windows\_profile = optional(object({
outbound\_nat\_enabled = optional(bool, true)
}))
workload\_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
create\_before\_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
})) | map(object({
name = string
node_count = optional(number)
tags = optional(map(string))
vm_size = string
host_group_id = optional(string)
capacity_reservation_group_id = optional(string)
custom_ca_trust_enabled = optional(bool)
enable_auto_scaling = optional(bool)
enable_host_encryption = optional(bool)
enable_node_public_ip = optional(bool)
eviction_policy = optional(string)
gpu_instance = optional(string)
kubelet_config = optional(object({
cpu_manager_policy = optional(string)
cpu_cfs_quota_enabled = optional(bool)
cpu_cfs_quota_period = optional(string)
image_gc_high_threshold = optional(number)
image_gc_low_threshold = optional(number)
topology_manager_policy = optional(string)
allowed_unsafe_sysctls = optional(set(string))
container_log_max_size_mb = optional(number)
container_log_max_files = optional(number)
pod_max_pid = optional(number)
}))
linux_os_config = optional(object({
sysctl_config = optional(object({
fs_aio_max_nr = optional(number)
fs_file_max = optional(number)
fs_inotify_max_user_watches = optional(number)
fs_nr_open = optional(number)
kernel_threads_max = optional(number)
net_core_netdev_max_backlog = optional(number)
net_core_optmem_max = optional(number)
net_core_rmem_default = optional(number)
net_core_rmem_max = optional(number)
net_core_somaxconn = optional(number)
net_core_wmem_default = optional(number)
net_core_wmem_max = optional(number)
net_ipv4_ip_local_port_range_min = optional(number)
net_ipv4_ip_local_port_range_max = optional(number)
net_ipv4_neigh_default_gc_thresh1 = optional(number)
net_ipv4_neigh_default_gc_thresh2 = optional(number)
net_ipv4_neigh_default_gc_thresh3 = optional(number)
net_ipv4_tcp_fin_timeout = optional(number)
net_ipv4_tcp_keepalive_intvl = optional(number)
net_ipv4_tcp_keepalive_probes = optional(number)
net_ipv4_tcp_keepalive_time = optional(number)
net_ipv4_tcp_max_syn_backlog = optional(number)
net_ipv4_tcp_max_tw_buckets = optional(number)
net_ipv4_tcp_tw_reuse = optional(bool)
net_netfilter_nf_conntrack_buckets = optional(number)
net_netfilter_nf_conntrack_max = optional(number)
vm_max_map_count = optional(number)
vm_swappiness = optional(number)
vm_vfs_cache_pressure = optional(number)
}))
transparent_huge_page_enabled = optional(string)
transparent_huge_page_defrag = optional(string)
swap_file_size_mb = optional(number)
}))
fips_enabled = optional(bool)
kubelet_disk_type = optional(string)
max_count = optional(number)
max_pods = optional(number)
message_of_the_day = optional(string)
mode = optional(string, "User")
min_count = optional(number)
node_network_profile = optional(object({
node_public_ip_tags = optional(map(string))
application_security_group_ids = optional(list(string))
allowed_host_ports = optional(list(object({
port_start = optional(number)
port_end = optional(number)
protocol = optional(string)
})))
}))
node_labels = optional(map(string))
node_public_ip_prefix_id = optional(string)
node_taints = optional(list(string))
orchestrator_version = optional(string)
os_disk_size_gb = optional(number)
os_disk_type = optional(string, "Managed")
os_sku = optional(string)
os_type = optional(string, "Linux")
pod_subnet = optional(object({
id = string
}), null)
priority = optional(string, "Regular")
proximity_placement_group_id = optional(string)
spot_max_price = optional(number)
scale_down_mode = optional(string, "Delete")
snapshot_id = optional(string)
ultra_ssd_enabled = optional(bool)
vnet_subnet = optional(object({
id = string
}), null)
upgrade_settings = optional(object({
drain_timeout_in_minutes = number
node_soak_duration_in_minutes = number
max_surge = string
}))
windows_profile = optional(object({
outbound_nat_enabled = optional(bool, true)
}))
workload_runtime = optional(string)
zones = optional(set(string))
create_before_destroy = optional(bool, true)
})) | `{}` | no |
+| [node\_resource\_group](#input\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | Enable or Disable the OIDC issuer URL. Defaults to false. | `bool` | `false` | no |
+| [oms\_agent\_enabled](#input\_oms\_agent\_enabled) | Enable OMS Agent Addon. | `bool` | `true` | no |
+| [only\_critical\_addons\_enabled](#input\_only\_critical\_addons\_enabled) | (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. | `bool` | `null` | no |
+| [open\_service\_mesh\_enabled](#input\_open\_service\_mesh\_enabled) | Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). | `bool` | `null` | no |
+| [orchestrator\_version](#input\_orchestrator\_version) | Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region | `string` | `null` | no |
+| [os\_disk\_size\_gb](#input\_os\_disk\_size\_gb) | Disk size of nodes in GBs. | `number` | `50` | no |
+| [os\_disk\_type](#input\_os\_disk\_type) | The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. | `string` | `"Managed"` | no |
+| [os\_sku](#input\_os\_sku) | (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [pod\_subnet](#input\_pod\_subnet) | object({
id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
}) | object({
id = string
}) | `null` | no |
+| [prefix](#input\_prefix) | (Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. | `string` | `""` | no |
+| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | If true cluster API server will be exposed only on internal IP address and available only in cluster vnet. | `bool` | `false` | no |
+| [private\_cluster\_public\_fqdn\_enabled](#input\_private\_cluster\_public\_fqdn\_enabled) | (Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`. | `bool` | `false` | no |
+| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | (Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created. | `string` | `null` | no |
+| [public\_ssh\_key](#input\_public\_ssh\_key) | A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created. | `string` | `""` | no |
+| [rbac\_aad](#input\_rbac\_aad) | (Optional) Is Azure Active Directory integration enabled? | `bool` | `true` | no |
+| [rbac\_aad\_admin\_group\_object\_ids](#input\_rbac\_aad\_admin\_group\_object\_ids) | Object ID of groups with admin access. | `list(string)` | `null` | no |
+| [rbac\_aad\_azure\_rbac\_enabled](#input\_rbac\_aad\_azure\_rbac\_enabled) | (Optional) Is Role Based Access Control based on Azure AD enabled? | `bool` | `null` | no |
+| [rbac\_aad\_tenant\_id](#input\_rbac\_aad\_tenant\_id) | (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. | `string` | `null` | no |
+| [resource\_group\_name](#input\_resource\_group\_name) | The existing resource group name to use | `string` | n/a | yes |
+| [role\_based\_access\_control\_enabled](#input\_role\_based\_access\_control\_enabled) | Enable Role Based Access Control. | `bool` | `false` | no |
+| [run\_command\_enabled](#input\_run\_command\_enabled) | (Optional) Whether to enable run command for the cluster or not. | `bool` | `true` | no |
+| [scale\_down\_mode](#input\_scale\_down\_mode) | (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created. | `string` | `"Delete"` | no |
+| [secret\_rotation\_enabled](#input\_secret\_rotation\_enabled) | Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false` | `bool` | `false` | no |
+| [secret\_rotation\_interval](#input\_secret\_rotation\_interval) | The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m` | `string` | `"2m"` | no |
+| [service\_mesh\_profile](#input\_service\_mesh\_profile) | `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
`internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
`external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`. | object({
mode = string
internal_ingress_gateway_enabled = optional(bool, true)
external_ingress_gateway_enabled = optional(bool, true)
}) | `null` | no |
+| [sku\_tier](#input\_sku\_tier) | The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium` | `string` | `"Free"` | no |
+| [snapshot\_id](#input\_snapshot\_id) | (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. | `string` | `null` | no |
+| [storage\_profile\_blob\_driver\_enabled](#input\_storage\_profile\_blob\_driver\_enabled) | (Optional) Is the Blob CSI driver enabled? Defaults to `false` | `bool` | `false` | no |
+| [storage\_profile\_disk\_driver\_enabled](#input\_storage\_profile\_disk\_driver\_enabled) | (Optional) Is the Disk CSI driver enabled? Defaults to `true` | `bool` | `true` | no |
+| [storage\_profile\_disk\_driver\_version](#input\_storage\_profile\_disk\_driver\_version) | (Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`. | `string` | `"v1"` | no |
+| [storage\_profile\_enabled](#input\_storage\_profile\_enabled) | Enable storage profile | `bool` | `false` | no |
+| [storage\_profile\_file\_driver\_enabled](#input\_storage\_profile\_file\_driver\_enabled) | (Optional) Is the File CSI driver enabled? Defaults to `true` | `bool` | `true` | no |
+| [storage\_profile\_snapshot\_controller\_enabled](#input\_storage\_profile\_snapshot\_controller\_enabled) | (Optional) Is the Snapshot Controller enabled? Defaults to `true` | `bool` | `true` | no |
+| [support\_plan](#input\_support\_plan) | The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`. | `string` | `"KubernetesOfficial"` | no |
+| [tags](#input\_tags) | Any tags that should be present on the AKS cluster resources | `map(string)` | `{}` | no |
+| [temporary\_name\_for\_rotation](#input\_temporary\_name\_for\_rotation) | (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation` | `string` | `null` | no |
+| [ultra\_ssd\_enabled](#input\_ultra\_ssd\_enabled) | (Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false. | `bool` | `false` | no |
+| [vnet\_subnet](#input\_vnet\_subnet) | object({
id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
}) | object({
id = string
}) | `null` | no |
+| [web\_app\_routing](#input\_web\_app\_routing) | object({
dns\_zone\_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
}) | object({
dns_zone_ids = list(string)
}) | `null` | no |
+| [workload\_autoscaler\_profile](#input\_workload\_autoscaler\_profile) | `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
`vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled. | object({
keda_enabled = optional(bool, false)
vertical_pod_autoscaler_enabled = optional(bool, false)
}) | `null` | no |
+| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | Enable or Disable Workload Identity. Defaults to false. | `bool` | `false` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [aci\_connector\_linux](#output\_aci\_connector\_linux) | The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource. |
+| [aci\_connector\_linux\_enabled](#output\_aci\_connector\_linux\_enabled) | Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource? |
+| [admin\_client\_certificate](#output\_admin\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. |
+| [admin\_client\_key](#output\_admin\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. |
+| [admin\_cluster\_ca\_certificate](#output\_admin\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. |
+| [admin\_host](#output\_admin\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host. |
+| [admin\_password](#output\_admin\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster. |
+| [admin\_username](#output\_admin\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster. |
+| [aks\_id](#output\_aks\_id) | The `azurerm_kubernetes_cluster`'s id. |
+| [aks\_name](#output\_aks\_name) | The `azurerm_kubernetes_cluster`'s name. |
+| [azure\_policy\_enabled](#output\_azure\_policy\_enabled) | The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) |
+| [azurerm\_log\_analytics\_workspace\_id](#output\_azurerm\_log\_analytics\_workspace\_id) | The id of the created Log Analytics workspace |
+| [azurerm\_log\_analytics\_workspace\_name](#output\_azurerm\_log\_analytics\_workspace\_name) | The name of the created Log Analytics workspace |
+| [azurerm\_log\_analytics\_workspace\_primary\_shared\_key](#output\_azurerm\_log\_analytics\_workspace\_primary\_shared\_key) | Specifies the workspace key of the log analytics workspace |
+| [client\_certificate](#output\_client\_certificate) | The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. |
+| [client\_key](#output\_client\_key) | The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. |
+| [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster. |
+| [cluster\_fqdn](#output\_cluster\_fqdn) | The FQDN of the Azure Kubernetes Managed Cluster. |
+| [cluster\_identity](#output\_cluster\_identity) | The `azurerm_kubernetes_cluster`'s `identity` block. |
+| [cluster\_portal\_fqdn](#output\_cluster\_portal\_fqdn) | The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. |
+| [cluster\_private\_fqdn](#output\_cluster\_private\_fqdn) | The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster. |
+| [generated\_cluster\_private\_ssh\_key](#output\_generated\_cluster\_private\_ssh\_key) | The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format. |
+| [generated\_cluster\_public\_ssh\_key](#output\_generated\_cluster\_public\_ssh\_key) | The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations). |
+| [host](#output\_host) | The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host. |
+| [http\_application\_routing\_zone\_name](#output\_http\_application\_routing\_zone\_name) | The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing. |
+| [ingress\_application\_gateway](#output\_ingress\_application\_gateway) | The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block. |
+| [ingress\_application\_gateway\_enabled](#output\_ingress\_application\_gateway\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block? |
+| [key\_vault\_secrets\_provider](#output\_key\_vault\_secrets\_provider) | The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block. |
+| [key\_vault\_secrets\_provider\_enabled](#output\_key\_vault\_secrets\_provider\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block? |
+| [kube\_admin\_config\_raw](#output\_kube\_admin\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled. |
+| [kube\_config\_raw](#output\_kube\_config\_raw) | The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. |
+| [kubelet\_identity](#output\_kubelet\_identity) | The `azurerm_kubernetes_cluster`'s `kubelet_identity` block. |
+| [location](#output\_location) | The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created. |
+| [network\_profile](#output\_network\_profile) | The `azurerm_kubernetes_cluster`'s `network_profile` block |
+| [node\_resource\_group](#output\_node\_resource\_group) | The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. |
+| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster. |
+| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | The OIDC issuer URL that is associated with the cluster. |
+| [oms\_agent](#output\_oms\_agent) | The `azurerm_kubernetes_cluster`'s `oms_agent` argument. |
+| [oms\_agent\_enabled](#output\_oms\_agent\_enabled) | Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block? |
+| [open\_service\_mesh\_enabled](#output\_open\_service\_mesh\_enabled) | (Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about). |
+| [password](#output\_password) | The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster. |
+| [username](#output\_username) | The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster. |
+| [web\_app\_routing\_identity](#output\_web\_app\_routing\_identity) | The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object. |
+
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md
new file mode 100644
index 000000000..869fdfe2b
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf
new file mode 100644
index 000000000..7f368600b
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool.tf
@@ -0,0 +1,317 @@
+moved {
+ from = azurerm_kubernetes_cluster_node_pool.node_pool
+ to = azurerm_kubernetes_cluster_node_pool.node_pool_create_before_destroy
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" {
+ for_each = local.node_pools_create_before_destroy
+
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
+ name = "${each.value.name}${substr(md5(uuid()), 0, 4)}"
+ capacity_reservation_group_id = each.value.capacity_reservation_group_id
+ eviction_policy = each.value.eviction_policy
+ fips_enabled = each.value.fips_enabled
+ gpu_instance = each.value.gpu_instance
+ host_group_id = each.value.host_group_id
+ kubelet_disk_type = each.value.kubelet_disk_type
+ max_count = each.value.max_count
+ max_pods = each.value.max_pods
+ min_count = each.value.min_count
+ mode = each.value.mode
+ node_count = each.value.node_count
+ node_labels = each.value.node_labels
+ node_public_ip_prefix_id = each.value.node_public_ip_prefix_id
+ node_taints = each.value.node_taints
+ orchestrator_version = each.value.orchestrator_version
+ os_disk_size_gb = each.value.os_disk_size_gb
+ os_disk_type = each.value.os_disk_type
+ os_sku = each.value.os_sku
+ os_type = each.value.os_type
+ pod_subnet_id = try(each.value.pod_subnet.id, null)
+ priority = each.value.priority
+ proximity_placement_group_id = each.value.proximity_placement_group_id
+ scale_down_mode = each.value.scale_down_mode
+ snapshot_id = each.value.snapshot_id
+ spot_max_price = each.value.spot_max_price
+ tags = each.value.tags
+ ultra_ssd_enabled = each.value.ultra_ssd_enabled
+ vm_size = each.value.vm_size
+ vnet_subnet_id = try(each.value.vnet_subnet.id, null)
+ workload_runtime = each.value.workload_runtime
+ zones = each.value.zones
+
+ dynamic "kubelet_config" {
+ for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"]
+
+ content {
+ allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls
+ container_log_max_line = each.value.kubelet_config.container_log_max_files
+ container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb
+ cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period
+ cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy
+ image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold
+ image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold
+ pod_max_pid = each.value.kubelet_config.pod_max_pid
+ topology_manager_policy = each.value.kubelet_config.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"]
+
+ content {
+ swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb
+ transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag
+ transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"]
+
+ content {
+ fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr
+ fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max
+ fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches
+ fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open
+ kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max
+ net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog
+ net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max
+ net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default
+ net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max
+ net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn
+ net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default
+ net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max
+ vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count
+ vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness
+ vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "node_network_profile" {
+ for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"]
+
+ content {
+ application_security_group_ids = each.value.node_network_profile.application_security_group_ids
+ node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags
+
+ dynamic "allowed_host_ports" {
+ for_each = each.value.node_network_profile.allowed_host_ports == null ? [] : each.value.node_network_profile.allowed_host_ports
+
+ content {
+ port_end = allowed_host_ports.value.port_end
+ port_start = allowed_host_ports.value.port_start
+ protocol = allowed_host_ports.value.protocol
+ }
+ }
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = each.value.upgrade_settings.max_surge
+ drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes
+ node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes
+ }
+ }
+ dynamic "windows_profile" {
+ for_each = each.value.windows_profile == null ? [] : ["windows_profile"]
+
+ content {
+ outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled
+ }
+ }
+
+ depends_on = [azapi_update_resource.aks_cluster_post_create]
+
+ lifecycle {
+ create_before_destroy = true
+ ignore_changes = [
+ name
+ ]
+ replace_triggered_by = [
+ null_resource.pool_name_keeper[each.key],
+ ]
+
+ precondition {
+ condition = can(regex("[a-z0-9]{1,8}", each.value.name))
+ error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)"
+ }
+ precondition {
+ condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size))
+ error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. "
+ }
+ precondition {
+ condition = var.agents_type == "VirtualMachineScaleSets"
+ error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets."
+ }
+ }
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" {
+ for_each = local.node_pools_create_after_destroy
+
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
+ name = each.value.name
+ capacity_reservation_group_id = each.value.capacity_reservation_group_id
+ eviction_policy = each.value.eviction_policy
+ fips_enabled = each.value.fips_enabled
+ host_group_id = each.value.host_group_id
+ kubelet_disk_type = each.value.kubelet_disk_type
+ max_count = each.value.max_count
+ max_pods = each.value.max_pods
+ min_count = each.value.min_count
+ mode = each.value.mode
+ node_count = each.value.node_count
+ node_labels = each.value.node_labels
+ node_public_ip_prefix_id = each.value.node_public_ip_prefix_id
+ node_taints = each.value.node_taints
+ orchestrator_version = each.value.orchestrator_version
+ os_disk_size_gb = each.value.os_disk_size_gb
+ os_disk_type = each.value.os_disk_type
+ os_sku = each.value.os_sku
+ os_type = each.value.os_type
+ pod_subnet_id = try(each.value.pod_subnet.id, null)
+ priority = each.value.priority
+ proximity_placement_group_id = each.value.proximity_placement_group_id
+ scale_down_mode = each.value.scale_down_mode
+ snapshot_id = each.value.snapshot_id
+ spot_max_price = each.value.spot_max_price
+ tags = each.value.tags
+ ultra_ssd_enabled = each.value.ultra_ssd_enabled
+ vm_size = each.value.vm_size
+ vnet_subnet_id = try(each.value.vnet_subnet.id, null)
+ workload_runtime = each.value.workload_runtime
+ zones = each.value.zones
+
+ dynamic "kubelet_config" {
+ for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"]
+
+ content {
+ allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls
+ container_log_max_line = each.value.kubelet_config.container_log_max_files
+ container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb
+ cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period
+ cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy
+ image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold
+ image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold
+ pod_max_pid = each.value.kubelet_config.pod_max_pid
+ topology_manager_policy = each.value.kubelet_config.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"]
+
+ content {
+ swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb
+ transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag
+ transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"]
+
+ content {
+ fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr
+ fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max
+ fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches
+ fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open
+ kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max
+ net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog
+ net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max
+ net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default
+ net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max
+ net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn
+ net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default
+ net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max
+ vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count
+ vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness
+ vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "node_network_profile" {
+ for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"]
+
+ content {
+ node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = each.value.upgrade_settings.max_surge
+ drain_timeout_in_minutes = each.value.upgrade_settings.drain_timeout_in_minutes
+ node_soak_duration_in_minutes = each.value.upgrade_settings.node_soak_duration_in_minutes
+ }
+ }
+ dynamic "windows_profile" {
+ for_each = each.value.windows_profile == null ? [] : ["windows_profile"]
+
+ content {
+ outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled
+ }
+ }
+
+ depends_on = [azapi_update_resource.aks_cluster_post_create]
+
+ lifecycle {
+ precondition {
+ condition = can(regex("[a-z0-9]{1,8}", each.value.name))
+ error_message = "A Node Pools name must consist of alphanumeric characters and have a maximum lenght of 8 characters (4 random chars added)"
+ }
+ precondition {
+ condition = var.network_plugin_mode != "overlay" || !can(regex("^Standard_DC[0-9]+s?_v2$", each.value.vm_size))
+ error_message = "With with Azure CNI Overlay you can't use DCsv2-series virtual machines in node pools. "
+ }
+ precondition {
+ condition = var.agents_type == "VirtualMachineScaleSets"
+ error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets."
+ }
+ }
+}
+
+resource "null_resource" "pool_name_keeper" {
+ for_each = var.node_pools
+
+ triggers = {
+ pool_name = each.value.name
+ }
+
+ lifecycle {
+ precondition {
+ condition = !var.create_role_assignment_network_contributor || length(distinct(local.subnet_ids)) == length(local.subnet_ids)
+ error_message = "When `var.create_role_assignment_network_contributor` is `true`, you must set different subnet for different node pools, include default pool, otherwise you must set `var.create_role_assignment_network_contributor` to `false` and manage role assignments yourself."
+ }
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf
new file mode 100644
index 000000000..500f27ece
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/extra_node_pool_override.tf
@@ -0,0 +1,17 @@
+# tflint-ignore-file: azurerm_resource_tag
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" {
+ custom_ca_trust_enabled = each.value.custom_ca_trust_enabled
+ enable_auto_scaling = each.value.enable_auto_scaling
+ enable_host_encryption = each.value.enable_host_encryption
+ enable_node_public_ip = each.value.enable_node_public_ip
+ message_of_the_day = each.value.message_of_the_day
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" {
+ custom_ca_trust_enabled = each.value.custom_ca_trust_enabled
+ enable_auto_scaling = each.value.enable_auto_scaling
+ enable_host_encryption = each.value.enable_host_encryption
+ enable_node_public_ip = each.value.enable_node_public_ip
+ message_of_the_day = each.value.message_of_the_day
+}
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf
new file mode 100644
index 000000000..2b69dfe13
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/locals.tf
@@ -0,0 +1,74 @@
+locals {
+ # Abstract if auto_scaler_profile_scale_down_delay_after_delete is not set or null we should use the scan_interval.
+ auto_scaler_profile_scale_down_delay_after_delete = var.auto_scaler_profile_scale_down_delay_after_delete == null ? var.auto_scaler_profile_scan_interval : var.auto_scaler_profile_scale_down_delay_after_delete
+ # automatic upgrades are either:
+ # - null
+ # - patch, but then neither the kubernetes_version nor orchestrator_version must specify a patch number, where orchestrator_version may be also null
+ # - rapid/stable/node-image, but then the kubernetes_version and the orchestrator_version must be null
+ automatic_channel_upgrade_check = var.automatic_channel_upgrade == null ? true : (
+ (contains(["patch"], var.automatic_channel_upgrade) && can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.kubernetes_version)) && (can(regex("^[0-9]{1,}\\.[0-9]{1,}$", var.orchestrator_version)) || var.orchestrator_version == null)) ||
+ (contains(["rapid", "stable", "node-image"], var.automatic_channel_upgrade) && var.kubernetes_version == null && var.orchestrator_version == null)
+ )
+ cluster_name = try(coalesce(var.cluster_name, trim("${var.prefix}-aks", "-")), "aks")
+ # Abstract the decision whether to create an Analytics Workspace or not.
+ create_analytics_solution = var.log_analytics_workspace_enabled && var.log_analytics_solution == null
+ create_analytics_workspace = var.log_analytics_workspace_enabled && var.log_analytics_workspace == null
+ default_nodepool_subnet_segments = try(split("/", try(var.vnet_subnet.id, null)), [])
+ # Application Gateway ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/applicationGateways/myGateway1
+ existing_application_gateway_for_ingress_id = try(var.brown_field_application_gateway_for_ingress.id, null)
+ existing_application_gateway_resource_group_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : local.existing_application_gateway_segments_for_ingress[4]
+ existing_application_gateway_segments_for_ingress = var.brown_field_application_gateway_for_ingress == null ? null : split("/", local.existing_application_gateway_for_ingress_id)
+ existing_application_gateway_subnet_resource_group_name = try(local.existing_application_gateway_subnet_segments[4], null)
+ # Subnet ID: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1
+ existing_application_gateway_subnet_segments = try(split("/", var.brown_field_application_gateway_for_ingress.subnet_id), [])
+ existing_application_gateway_subnet_subscription_id_for_ingress = try(local.existing_application_gateway_subnet_segments[2], null)
+ existing_application_gateway_subnet_vnet_name = try(local.existing_application_gateway_subnet_segments[8], null)
+ existing_application_gateway_subscription_id_for_ingress = try(local.existing_application_gateway_segments_for_ingress[2], null)
+ ingress_application_gateway_enabled = local.use_brown_field_gw_for_ingress || local.use_green_field_gw_for_ingress
+ # Abstract the decision whether to use an Analytics Workspace supplied via vars, provision one ourselves or leave it null.
+ # This guarantees that local.log_analytics_workspace will contain a valid `id` and `name` IFF log_analytics_workspace_enabled
+ # is set to `true`.
+ log_analytics_workspace = var.log_analytics_workspace_enabled ? (
+ # The Log Analytics Workspace should be enabled:
+ var.log_analytics_workspace == null ? {
+ # `log_analytics_workspace_enabled` is `true` but `log_analytics_workspace` was not supplied.
+ # Create an `azurerm_log_analytics_workspace` resource and use that.
+ id = local.azurerm_log_analytics_workspace_id
+ name = local.azurerm_log_analytics_workspace_name
+ location = local.azurerm_log_analytics_workspace_location
+ resource_group_name = local.azurerm_log_analytics_workspace_resource_group_name
+ } : {
+ # `log_analytics_workspace` is supplied. Let's use that.
+ id = var.log_analytics_workspace.id
+ name = var.log_analytics_workspace.name
+ location = var.log_analytics_workspace.location
+ # `azurerm_log_analytics_workspace`'s id format: /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1
+ resource_group_name = split("/", var.log_analytics_workspace.id)[4]
+ }
+ ) : null # Finally, the Log Analytics Workspace should be disabled.
+ node_pools_create_after_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy != true }
+ node_pools_create_before_destroy = { for k, p in var.node_pools : k => p if p.create_before_destroy == true }
+ private_dns_zone_name = try(reverse(split("/", var.private_dns_zone_id))[0], null)
+ query_datasource_for_log_analytics_workspace_location = var.log_analytics_workspace_enabled && (var.log_analytics_workspace != null ? var.log_analytics_workspace.location == null : false)
+ subnet_ids = [for _, s in local.subnets : s.id]
+ subnets = merge({ for k, v in merge(
+ [
+ for key, pool in var.node_pools : {
+ "${key}-vnet-subnet" : pool.vnet_subnet,
+ "${key}-pod-subnet" : pool.pod_subnet,
+ }
+ ]...) : k => v if v != null }, var.vnet_subnet == null ? {} : {
+ "vnet-subnet" : {
+ id = var.vnet_subnet.id
+ }
+ })
+ # subnet_ids = for id in local.potential_subnet_ids : id if id != null
+ use_brown_field_gw_for_ingress = var.brown_field_application_gateway_for_ingress != null
+ use_green_field_gw_for_ingress = var.green_field_application_gateway_for_ingress != null
+ valid_private_dns_zone_regexs = [
+ "private\\.[a-z0-9]+\\.azmk8s\\.io",
+ "privatelink\\.[a-z0-9]+\\.azmk8s\\.io",
+ "[a-zA-Z0-9\\-]{1,32}\\.private\\.[a-z0-9]+\\.azmk8s\\.io",
+ "[a-zA-Z0-9\\-]{1,32}\\.privatelink\\.[a-z0-9]+\\.azmk8s\\.io",
+ ]
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf
new file mode 100644
index 000000000..fe51625be
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/log_analytics.tf
@@ -0,0 +1,124 @@
+resource "azurerm_log_analytics_workspace" "main" {
+ count = local.create_analytics_workspace ? 1 : 0
+
+ location = var.location
+ name = try(coalesce(var.cluster_log_analytics_workspace_name, trim("${var.prefix}-workspace", "-")), "aks-workspace")
+ resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name)
+ allow_resource_only_permissions = var.log_analytics_workspace_allow_resource_only_permissions
+ cmk_for_query_forced = var.log_analytics_workspace_cmk_for_query_forced
+ daily_quota_gb = var.log_analytics_workspace_daily_quota_gb
+ data_collection_rule_id = var.log_analytics_workspace_data_collection_rule_id
+ immediate_data_purge_on_30_days_enabled = var.log_analytics_workspace_immediate_data_purge_on_30_days_enabled
+ internet_ingestion_enabled = var.log_analytics_workspace_internet_ingestion_enabled
+ internet_query_enabled = var.log_analytics_workspace_internet_query_enabled
+ local_authentication_disabled = var.log_analytics_workspace_local_authentication_disabled
+ reservation_capacity_in_gb_per_day = var.log_analytics_workspace_reservation_capacity_in_gb_per_day
+ retention_in_days = var.log_retention_in_days
+ sku = var.log_analytics_workspace_sku
+ tags = var.tags
+
+ dynamic "identity" {
+ for_each = var.log_analytics_workspace_identity == null ? [] : [var.log_analytics_workspace_identity]
+
+ content {
+ type = identity.value.type
+ identity_ids = identity.value.identity_ids
+ }
+ }
+
+ lifecycle {
+ precondition {
+ condition = can(coalesce(var.cluster_log_analytics_workspace_name, var.prefix))
+ error_message = "You must set one of `var.cluster_log_analytics_workspace_name` and `var.prefix` to create `azurerm_log_analytics_workspace.main`."
+ }
+ }
+}
+
+locals {
+ azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null)
+ azurerm_log_analytics_workspace_location = try(azurerm_log_analytics_workspace.main[0].location, null)
+ azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null)
+ azurerm_log_analytics_workspace_resource_group_name = try(azurerm_log_analytics_workspace.main[0].resource_group_name, null)
+}
+
+data "azurerm_log_analytics_workspace" "main" {
+ count = local.query_datasource_for_log_analytics_workspace_location ? 1 : 0
+
+ name = var.log_analytics_workspace.name
+ resource_group_name = local.log_analytics_workspace.resource_group_name
+}
+
+resource "azurerm_log_analytics_solution" "main" {
+ count = local.create_analytics_solution ? 1 : 0
+
+ location = coalesce(local.log_analytics_workspace.location, try(data.azurerm_log_analytics_workspace.main[0].location, null))
+ resource_group_name = local.log_analytics_workspace.resource_group_name
+ solution_name = "ContainerInsights"
+ workspace_name = local.log_analytics_workspace.name
+ workspace_resource_id = local.log_analytics_workspace.id
+ tags = var.tags
+
+ plan {
+ product = "OMSGallery/ContainerInsights"
+ publisher = "Microsoft"
+ }
+}
+
+locals {
+ dcr_location = try(coalesce(try(local.log_analytics_workspace.location, null), try(data.azurerm_log_analytics_workspace.main[0].location, null)), null)
+}
+
+resource "azurerm_monitor_data_collection_rule" "dcr" {
+ count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0
+
+ location = local.dcr_location
+ name = "MSCI-${local.dcr_location}-${azurerm_kubernetes_cluster.main.name}"
+ resource_group_name = var.resource_group_name
+ description = "DCR for Azure Monitor Container Insights"
+ tags = var.tags
+
+ data_flow {
+ destinations = [local.log_analytics_workspace.name]
+ streams = var.monitor_data_collection_rule_extensions_streams
+ }
+ data_flow {
+ destinations = [local.log_analytics_workspace.name]
+ streams = ["Microsoft-Syslog"]
+ }
+ destinations {
+ log_analytics {
+ name = local.log_analytics_workspace.name
+ workspace_resource_id = local.log_analytics_workspace.id
+ }
+ }
+ data_sources {
+ extension {
+ extension_name = "ContainerInsights"
+ name = "ContainerInsightsExtension"
+ streams = var.monitor_data_collection_rule_extensions_streams
+ extension_json = jsonencode({
+ "dataCollectionSettings" : {
+ interval = var.data_collection_settings.data_collection_interval
+ namespaceFilteringMode = var.data_collection_settings.namespace_filtering_mode_for_data_collection
+ namespaces = var.data_collection_settings.namespaces_for_data_collection
+ enableContainerLogV2 = var.data_collection_settings.container_log_v2_enabled
+ }
+ })
+ }
+ syslog {
+ facility_names = var.monitor_data_collection_rule_data_sources_syslog_facilities
+ log_levels = var.monitor_data_collection_rule_data_sources_syslog_levels
+ name = "sysLogsDataSource"
+ streams = ["Microsoft-Syslog"]
+ }
+ }
+}
+
+resource "azurerm_monitor_data_collection_rule_association" "dcra" {
+ count = local.create_analytics_workspace && var.oms_agent_enabled && var.create_monitor_data_collection_rule ? 1 : 0
+
+ target_resource_id = azurerm_kubernetes_cluster.main.id
+ data_collection_rule_id = azurerm_monitor_data_collection_rule.dcr[0].id
+ description = "Association of container insights data collection rule. Deleting this association will break the data collection for this AKS Cluster."
+ name = "ContainerInsightsExtension"
+}
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf
new file mode 100644
index 000000000..0a8dc8e59
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main.tf
@@ -0,0 +1,741 @@
+moved {
+ from = module.ssh-key.tls_private_key.ssh
+ to = tls_private_key.ssh[0]
+}
+
+resource "tls_private_key" "ssh" {
+ count = var.admin_username == null ? 0 : 1
+
+ algorithm = "RSA"
+ rsa_bits = 2048
+}
+
+resource "azurerm_kubernetes_cluster" "main" {
+ location = var.location
+ name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}"
+ resource_group_name = var.resource_group_name
+ azure_policy_enabled = var.azure_policy_enabled
+ cost_analysis_enabled = var.cost_analysis_enabled
+ disk_encryption_set_id = var.disk_encryption_set_id
+ dns_prefix = var.prefix
+ dns_prefix_private_cluster = var.dns_prefix_private_cluster
+ image_cleaner_enabled = var.image_cleaner_enabled
+ image_cleaner_interval_hours = var.image_cleaner_interval_hours
+ kubernetes_version = var.kubernetes_version
+ local_account_disabled = var.local_account_disabled
+ node_resource_group = var.node_resource_group
+ oidc_issuer_enabled = var.oidc_issuer_enabled
+ open_service_mesh_enabled = var.open_service_mesh_enabled
+ private_cluster_enabled = var.private_cluster_enabled
+ private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled
+ private_dns_zone_id = var.private_dns_zone_id
+ role_based_access_control_enabled = var.role_based_access_control_enabled
+ run_command_enabled = var.run_command_enabled
+ sku_tier = var.sku_tier
+ support_plan = var.support_plan
+ tags = var.tags
+ workload_identity_enabled = var.workload_identity_enabled
+
+ dynamic "default_node_pool" {
+ for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
+
+ content {
+ name = var.agents_pool_name
+ enable_auto_scaling = var.enable_auto_scaling
+ enable_host_encryption = var.enable_host_encryption
+ enable_node_public_ip = var.enable_node_public_ip
+ fips_enabled = var.default_node_pool_fips_enabled
+ max_count = null
+ max_pods = var.agents_max_pods
+ min_count = null
+ node_count = var.agents_count
+ node_labels = var.agents_labels
+ only_critical_addons_enabled = var.only_critical_addons_enabled
+ orchestrator_version = var.orchestrator_version
+ os_disk_size_gb = var.os_disk_size_gb
+ os_disk_type = var.os_disk_type
+ os_sku = var.os_sku
+ pod_subnet_id = try(var.pod_subnet.id, null)
+ proximity_placement_group_id = var.agents_proximity_placement_group_id
+ scale_down_mode = var.scale_down_mode
+ snapshot_id = var.snapshot_id
+ tags = merge(var.tags, var.agents_tags)
+ temporary_name_for_rotation = var.temporary_name_for_rotation
+ type = var.agents_type
+ ultra_ssd_enabled = var.ultra_ssd_enabled
+ vm_size = var.agents_size
+ vnet_subnet_id = try(var.vnet_subnet.id, null)
+ zones = var.agents_availability_zones
+
+ dynamic "kubelet_config" {
+ for_each = var.agents_pool_kubelet_configs
+
+ content {
+ allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
+ container_log_max_line = kubelet_config.value.container_log_max_line
+ container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
+ cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
+ cpu_manager_policy = kubelet_config.value.cpu_manager_policy
+ image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
+ image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
+ pod_max_pid = kubelet_config.value.pod_max_pid
+ topology_manager_policy = kubelet_config.value.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = var.agents_pool_linux_os_configs
+
+ content {
+ swap_file_size_mb = linux_os_config.value.swap_file_size_mb
+ transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
+ transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
+
+ content {
+ fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
+ fs_file_max = sysctl_config.value.fs_file_max
+ fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
+ fs_nr_open = sysctl_config.value.fs_nr_open
+ kernel_threads_max = sysctl_config.value.kernel_threads_max
+ net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
+ net_core_optmem_max = sysctl_config.value.net_core_optmem_max
+ net_core_rmem_default = sysctl_config.value.net_core_rmem_default
+ net_core_rmem_max = sysctl_config.value.net_core_rmem_max
+ net_core_somaxconn = sysctl_config.value.net_core_somaxconn
+ net_core_wmem_default = sysctl_config.value.net_core_wmem_default
+ net_core_wmem_max = sysctl_config.value.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
+ vm_max_map_count = sysctl_config.value.vm_max_map_count
+ vm_swappiness = sysctl_config.value.vm_swappiness
+ vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "node_network_profile" {
+ for_each = var.node_network_profile == null ? [] : [var.node_network_profile]
+
+ content {
+ application_security_group_ids = node_network_profile.value.application_security_group_ids
+ node_public_ip_tags = node_network_profile.value.node_public_ip_tags
+
+ dynamic "allowed_host_ports" {
+ for_each = node_network_profile.value.allowed_host_ports == null ? [] : node_network_profile.value.allowed_host_ports
+
+ content {
+ port_end = allowed_host_ports.value.port_end
+ port_start = allowed_host_ports.value.port_start
+ protocol = allowed_host_ports.value.protocol
+ }
+ }
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = var.agents_pool_max_surge
+ drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
+ node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
+ }
+ }
+ }
+ }
+ dynamic "default_node_pool" {
+ for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
+
+ content {
+ name = var.agents_pool_name
+ enable_auto_scaling = var.enable_auto_scaling
+ enable_host_encryption = var.enable_host_encryption
+ enable_node_public_ip = var.enable_node_public_ip
+ fips_enabled = var.default_node_pool_fips_enabled
+ max_count = var.agents_max_count
+ max_pods = var.agents_max_pods
+ min_count = var.agents_min_count
+ node_labels = var.agents_labels
+ only_critical_addons_enabled = var.only_critical_addons_enabled
+ orchestrator_version = var.orchestrator_version
+ os_disk_size_gb = var.os_disk_size_gb
+ os_disk_type = var.os_disk_type
+ os_sku = var.os_sku
+ pod_subnet_id = try(var.pod_subnet.id, null)
+ proximity_placement_group_id = var.agents_proximity_placement_group_id
+ scale_down_mode = var.scale_down_mode
+ snapshot_id = var.snapshot_id
+ tags = merge(var.tags, var.agents_tags)
+ temporary_name_for_rotation = var.temporary_name_for_rotation
+ type = var.agents_type
+ ultra_ssd_enabled = var.ultra_ssd_enabled
+ vm_size = var.agents_size
+ vnet_subnet_id = try(var.vnet_subnet.id, null)
+ zones = var.agents_availability_zones
+
+ dynamic "kubelet_config" {
+ for_each = var.agents_pool_kubelet_configs
+
+ content {
+ allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
+ container_log_max_line = kubelet_config.value.container_log_max_line
+ container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
+ cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
+ cpu_manager_policy = kubelet_config.value.cpu_manager_policy
+ image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
+ image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
+ pod_max_pid = kubelet_config.value.pod_max_pid
+ topology_manager_policy = kubelet_config.value.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = var.agents_pool_linux_os_configs
+
+ content {
+ swap_file_size_mb = linux_os_config.value.swap_file_size_mb
+ transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
+ transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
+
+ content {
+ fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
+ fs_file_max = sysctl_config.value.fs_file_max
+ fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
+ fs_nr_open = sysctl_config.value.fs_nr_open
+ kernel_threads_max = sysctl_config.value.kernel_threads_max
+ net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
+ net_core_optmem_max = sysctl_config.value.net_core_optmem_max
+ net_core_rmem_default = sysctl_config.value.net_core_rmem_default
+ net_core_rmem_max = sysctl_config.value.net_core_rmem_max
+ net_core_somaxconn = sysctl_config.value.net_core_somaxconn
+ net_core_wmem_default = sysctl_config.value.net_core_wmem_default
+ net_core_wmem_max = sysctl_config.value.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
+ vm_max_map_count = sysctl_config.value.vm_max_map_count
+ vm_swappiness = sysctl_config.value.vm_swappiness
+ vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = var.agents_pool_max_surge
+ drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
+ node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
+ }
+ }
+ }
+ }
+ dynamic "aci_connector_linux" {
+ for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : []
+
+ content {
+ subnet_name = var.aci_connector_linux_subnet_name
+ }
+ }
+ dynamic "api_server_access_profile" {
+ for_each = var.api_server_authorized_ip_ranges != null ? [
+ "api_server_access_profile"
+ ] : []
+
+ content {
+ authorized_ip_ranges = var.api_server_authorized_ip_ranges
+ }
+ }
+ dynamic "auto_scaler_profile" {
+ for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : []
+
+ content {
+ balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups
+ empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max
+ expander = var.auto_scaler_profile_expander
+ max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec
+ max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time
+ max_unready_nodes = var.auto_scaler_profile_max_unready_nodes
+ max_unready_percentage = var.auto_scaler_profile_max_unready_percentage
+ new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay
+ scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add
+ scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete
+ scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure
+ scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded
+ scale_down_unready = var.auto_scaler_profile_scale_down_unready
+ scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold
+ scan_interval = var.auto_scaler_profile_scan_interval
+ skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage
+ skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods
+ }
+ }
+ dynamic "azure_active_directory_role_based_access_control" {
+ for_each = var.role_based_access_control_enabled && var.rbac_aad ? ["rbac"] : []
+
+ content {
+ admin_group_object_ids = var.rbac_aad_admin_group_object_ids
+ azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled
+ managed = true
+ tenant_id = var.rbac_aad_tenant_id
+ }
+ }
+ dynamic "confidential_computing" {
+ for_each = var.confidential_computing == null ? [] : [var.confidential_computing]
+
+ content {
+ sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled
+ }
+ }
+ dynamic "http_proxy_config" {
+ for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"]
+
+ content {
+ http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)
+ https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy)
+ no_proxy = var.http_proxy_config.no_proxy
+ trusted_ca = var.http_proxy_config.trusted_ca
+ }
+ }
+ dynamic "identity" {
+ for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : []
+
+ content {
+ type = var.identity_type
+ identity_ids = var.identity_ids
+ }
+ }
+ dynamic "ingress_application_gateway" {
+ for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : []
+
+ content {
+ gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null)
+ gateway_name = try(var.green_field_application_gateway_for_ingress.name, null)
+ subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null)
+ subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null)
+ }
+ }
+ dynamic "key_management_service" {
+ for_each = var.kms_enabled ? ["key_management_service"] : []
+
+ content {
+ key_vault_key_id = var.kms_key_vault_key_id
+ key_vault_network_access = var.kms_key_vault_network_access
+ }
+ }
+ dynamic "key_vault_secrets_provider" {
+ for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : []
+
+ content {
+ secret_rotation_enabled = var.secret_rotation_enabled
+ secret_rotation_interval = var.secret_rotation_interval
+ }
+ }
+ dynamic "kubelet_identity" {
+ for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity]
+
+ content {
+ client_id = kubelet_identity.value.client_id
+ object_id = kubelet_identity.value.object_id
+ user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id
+ }
+ }
+ dynamic "linux_profile" {
+ for_each = var.admin_username == null ? [] : ["linux_profile"]
+
+ content {
+ admin_username = var.admin_username
+
+ ssh_key {
+ key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "")
+ }
+ }
+ }
+ dynamic "maintenance_window" {
+ for_each = var.maintenance_window != null ? ["maintenance_window"] : []
+
+ content {
+ dynamic "allowed" {
+ for_each = var.maintenance_window.allowed
+
+ content {
+ day = allowed.value.day
+ hours = allowed.value.hours
+ }
+ }
+ dynamic "not_allowed" {
+ for_each = var.maintenance_window.not_allowed
+
+ content {
+ end = not_allowed.value.end
+ start = not_allowed.value.start
+ }
+ }
+ }
+ }
+ dynamic "maintenance_window_auto_upgrade" {
+ for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade]
+
+ content {
+ duration = maintenance_window_auto_upgrade.value.duration
+ frequency = maintenance_window_auto_upgrade.value.frequency
+ interval = maintenance_window_auto_upgrade.value.interval
+ day_of_month = maintenance_window_auto_upgrade.value.day_of_month
+ day_of_week = maintenance_window_auto_upgrade.value.day_of_week
+ start_date = maintenance_window_auto_upgrade.value.start_date
+ start_time = maintenance_window_auto_upgrade.value.start_time
+ utc_offset = maintenance_window_auto_upgrade.value.utc_offset
+ week_index = maintenance_window_auto_upgrade.value.week_index
+
+ dynamic "not_allowed" {
+ for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed
+
+ content {
+ end = not_allowed.value.end
+ start = not_allowed.value.start
+ }
+ }
+ }
+ }
+ dynamic "maintenance_window_node_os" {
+ for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os]
+
+ content {
+ duration = maintenance_window_node_os.value.duration
+ frequency = maintenance_window_node_os.value.frequency
+ interval = maintenance_window_node_os.value.interval
+ day_of_month = maintenance_window_node_os.value.day_of_month
+ day_of_week = maintenance_window_node_os.value.day_of_week
+ start_date = maintenance_window_node_os.value.start_date
+ start_time = maintenance_window_node_os.value.start_time
+ utc_offset = maintenance_window_node_os.value.utc_offset
+ week_index = maintenance_window_node_os.value.week_index
+
+ dynamic "not_allowed" {
+ for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed
+
+ content {
+ end = not_allowed.value.end
+ start = not_allowed.value.start
+ }
+ }
+ }
+ }
+ dynamic "microsoft_defender" {
+ for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : []
+
+ content {
+ log_analytics_workspace_id = local.log_analytics_workspace.id
+ }
+ }
+ dynamic "monitor_metrics" {
+ for_each = var.monitor_metrics != null ? ["monitor_metrics"] : []
+
+ content {
+ annotations_allowed = var.monitor_metrics.annotations_allowed
+ labels_allowed = var.monitor_metrics.labels_allowed
+ }
+ }
+ network_profile {
+ network_plugin = var.network_plugin
+ dns_service_ip = var.net_profile_dns_service_ip
+ ebpf_data_plane = var.ebpf_data_plane
+ ip_versions = var.network_ip_versions
+ load_balancer_sku = var.load_balancer_sku
+ network_data_plane = var.network_data_plane
+ network_mode = var.network_mode
+ network_plugin_mode = var.network_plugin_mode
+ network_policy = var.network_policy
+ outbound_type = var.net_profile_outbound_type
+ pod_cidr = var.net_profile_pod_cidr
+ pod_cidrs = var.net_profile_pod_cidrs
+ service_cidr = var.net_profile_service_cidr
+ service_cidrs = var.net_profile_service_cidrs
+
+ dynamic "load_balancer_profile" {
+ for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [
+ "load_balancer_profile"
+ ] : []
+
+ content {
+ idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes
+ managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count
+ managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count
+ outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids
+ outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids
+ outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated
+ }
+ }
+ dynamic "nat_gateway_profile" {
+ for_each = var.nat_gateway_profile == null ? [] : [var.nat_gateway_profile]
+
+ content {
+ idle_timeout_in_minutes = nat_gateway_profile.value.idle_timeout_in_minutes
+ managed_outbound_ip_count = nat_gateway_profile.value.managed_outbound_ip_count
+ }
+ }
+ }
+ dynamic "oms_agent" {
+ for_each = (var.log_analytics_workspace_enabled && var.oms_agent_enabled) ? ["oms_agent"] : []
+
+ content {
+ log_analytics_workspace_id = local.log_analytics_workspace.id
+ msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled
+ }
+ }
+ dynamic "service_mesh_profile" {
+ for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"]
+
+ content {
+ mode = var.service_mesh_profile.mode
+ external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled
+ internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled
+ }
+ }
+ dynamic "service_principal" {
+ for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : []
+
+ content {
+ client_id = var.client_id
+ client_secret = var.client_secret
+ }
+ }
+ dynamic "storage_profile" {
+ for_each = var.storage_profile_enabled ? ["storage_profile"] : []
+
+ content {
+ blob_driver_enabled = var.storage_profile_blob_driver_enabled
+ disk_driver_enabled = var.storage_profile_disk_driver_enabled
+ disk_driver_version = var.storage_profile_disk_driver_version
+ file_driver_enabled = var.storage_profile_file_driver_enabled
+ snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled
+ }
+ }
+ dynamic "web_app_routing" {
+ for_each = var.web_app_routing == null ? [] : ["web_app_routing"]
+
+ content {
+ dns_zone_ids = var.web_app_routing.dns_zone_ids
+ }
+ }
+ dynamic "workload_autoscaler_profile" {
+ for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile]
+
+ content {
+ keda_enabled = workload_autoscaler_profile.value.keda_enabled
+ vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled
+ }
+ }
+
+ depends_on = [
+ null_resource.pool_name_keeper,
+ ]
+
+ lifecycle {
+ ignore_changes = [
+ http_application_routing_enabled,
+ http_proxy_config[0].no_proxy,
+ kubernetes_version,
+ # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource.
+ name,
+ ]
+ replace_triggered_by = [
+ null_resource.kubernetes_cluster_name_keeper.id
+ ]
+
+ precondition {
+ condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "")
+ error_message = "Either `client_id` and `client_secret` or `identity_type` must be set."
+ }
+ precondition {
+ # Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128
+ condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0)
+ error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well."
+ }
+ precondition {
+ condition = var.identity_ids == null || var.client_id == ""
+ error_message = "Cannot set both `client_id` and `identity_ids`."
+ }
+ precondition {
+ condition = var.cost_analysis_enabled != true || (var.sku_tier == "Standard" || var.sku_tier == "Premium")
+ error_message = "`sku_tier` must be either `Standard` or `Premium` when cost analysis is enabled."
+ }
+ precondition {
+ condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled)
+ error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true."
+ }
+ precondition {
+ condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard")
+ error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`"
+ }
+ precondition {
+ condition = local.automatic_channel_upgrade_check
+ error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`."
+ }
+ precondition {
+ condition = !(var.kms_enabled && var.identity_type != "UserAssigned")
+ error_message = "KMS etcd encryption doesn't work with system-assigned managed identity."
+ }
+ precondition {
+ condition = !var.workload_identity_enabled || var.oidc_issuer_enabled
+ error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity"
+ }
+ precondition {
+ condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure"
+ error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure."
+ }
+ precondition {
+ condition = var.network_policy != "azure" || var.network_plugin == "azure"
+ error_message = "network_policy must be `azure` when network_plugin is `azure`"
+ }
+ precondition {
+ condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure"
+ error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure."
+ }
+ precondition {
+ condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet != null
+ error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet.id must be specified."
+ }
+ precondition {
+ condition = can(coalesce(var.cluster_name, var.prefix, var.dns_prefix_private_cluster))
+ error_message = "You must set one of `var.cluster_name`,`var.prefix`,`var.dns_prefix_private_cluster` to create `azurerm_kubernetes_cluster.main`."
+ }
+ precondition {
+ condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage"
+ error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`."
+ }
+ precondition {
+ condition = (var.kubelet_identity == null) || (
+ (var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0)
+ error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set."
+ }
+ precondition {
+ condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets"
+ error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes."
+ }
+ precondition {
+ condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null
+ error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`."
+ }
+ precondition {
+ condition = var.prefix == null || var.dns_prefix_private_cluster == null
+ error_message = "Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified."
+ }
+ precondition {
+ condition = var.dns_prefix_private_cluster == null || var.private_cluster_enabled
+ error_message = "When `dns_prefix_private_cluster` is set, `private_cluster_enabled` must be set to `true`."
+ }
+ precondition {
+ condition = var.dns_prefix_private_cluster == null || var.identity_type == "UserAssigned" || var.client_id != ""
+ error_message = "A user assigned identity or a service principal must be used when using a custom private dns zone"
+ }
+ precondition {
+ condition = var.private_dns_zone_id == null ? true : (anytrue([for r in local.valid_private_dns_zone_regexs : try(regex(r, local.private_dns_zone_name) == local.private_dns_zone_name, false)]))
+ error_message = "According to the [document](https://learn.microsoft.com/en-us/azure/aks/private-clusters?tabs=azure-portal#configure-a-private-dns-zone), the private DNS zone must be in one of the following format: `privatelink..azmk8s.io`, `.privatelink..azmk8s.io`, `private..azmk8s.io`, `.private..azmk8s.io`"
+ }
+ }
+}
+
+resource "null_resource" "kubernetes_cluster_name_keeper" {
+ triggers = {
+ name = local.cluster_name
+ }
+}
+
+resource "null_resource" "kubernetes_version_keeper" {
+ triggers = {
+ version = var.kubernetes_version
+ }
+}
+
+resource "time_sleep" "interval_before_cluster_update" {
+ count = var.interval_before_cluster_update == null ? 0 : 1
+
+ create_duration = var.interval_before_cluster_update
+
+ depends_on = [
+ azurerm_kubernetes_cluster.main,
+ ]
+
+ lifecycle {
+ replace_triggered_by = [
+ null_resource.kubernetes_version_keeper.id,
+ ]
+ }
+}
+
+resource "azapi_update_resource" "aks_cluster_post_create" {
+ resource_id = azurerm_kubernetes_cluster.main.id
+ type = "Microsoft.ContainerService/managedClusters@2024-02-01"
+ body = {
+ properties = {
+ kubernetesVersion = var.kubernetes_version
+ }
+ }
+
+ depends_on = [
+ time_sleep.interval_before_cluster_update,
+ ]
+
+ lifecycle {
+ ignore_changes = all
+ replace_triggered_by = [null_resource.kubernetes_version_keeper.id]
+ }
+}
+
+resource "null_resource" "http_proxy_config_no_proxy_keeper" {
+ count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
+
+ triggers = {
+ http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "")
+ }
+}
+
+resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" {
+ count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
+
+ resource_id = azurerm_kubernetes_cluster.main.id
+ type = "Microsoft.ContainerService/managedClusters@2024-02-01"
+ body = {
+ properties = {
+ httpProxyConfig = {
+ noProxy = var.http_proxy_config.no_proxy
+ }
+ }
+ }
+
+ depends_on = [azapi_update_resource.aks_cluster_post_create]
+
+ lifecycle {
+ ignore_changes = all
+ replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id]
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf
new file mode 100644
index 000000000..a1f537658
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/main_override.tf
@@ -0,0 +1,6 @@
+# tflint-ignore-file: azurerm_resource_tag
+
+resource "azurerm_kubernetes_cluster" "main" {
+ automatic_channel_upgrade = var.automatic_channel_upgrade
+ node_os_channel_upgrade = var.node_os_channel_upgrade
+}
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf
new file mode 100644
index 000000000..e3d37ce76
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/outputs.tf
@@ -0,0 +1,231 @@
+output "aci_connector_linux" {
+ description = "The `aci_connector_linux` block of `azurerm_kubernetes_cluster` resource."
+ value = try(azurerm_kubernetes_cluster.main.aci_connector_linux[0], null)
+}
+
+output "aci_connector_linux_enabled" {
+ description = "Has `aci_connector_linux` been enabled on the `azurerm_kubernetes_cluster` resource?"
+ value = can(azurerm_kubernetes_cluster.main.aci_connector_linux[0])
+}
+
+output "admin_client_certificate" {
+ description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_certificate, "")
+}
+
+output "admin_client_key" {
+ description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].client_key, "")
+}
+
+output "admin_cluster_ca_certificate" {
+ description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].cluster_ca_certificate, "")
+}
+
+output "admin_host" {
+ description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. The Kubernetes cluster server host."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].host, "")
+}
+
+output "admin_password" {
+ description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A password or token used to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].password, "")
+}
+
+output "admin_username" {
+ description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_admin_config` block. A username used to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.kube_admin_config[0].username, "")
+}
+
+output "aks_id" {
+ description = "The `azurerm_kubernetes_cluster`'s id."
+ value = azurerm_kubernetes_cluster.main.id
+}
+
+output "aks_name" {
+ description = "The `azurerm_kubernetes_cluster`'s name."
+ value = azurerm_kubernetes_cluster.main.name
+}
+
+output "azure_policy_enabled" {
+ description = "The `azurerm_kubernetes_cluster`'s `azure_policy_enabled` argument. Should the Azure Policy Add-On be enabled? For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks)"
+ value = azurerm_kubernetes_cluster.main.azure_policy_enabled
+}
+
+output "azurerm_log_analytics_workspace_id" {
+ description = "The id of the created Log Analytics workspace"
+ value = try(azurerm_log_analytics_workspace.main[0].id, null)
+}
+
+output "azurerm_log_analytics_workspace_name" {
+ description = "The name of the created Log Analytics workspace"
+ value = try(azurerm_log_analytics_workspace.main[0].name, null)
+}
+
+output "azurerm_log_analytics_workspace_primary_shared_key" {
+ description = "Specifies the workspace key of the log analytics workspace"
+ sensitive = true
+ value = try(azurerm_log_analytics_workspace.main[0].primary_shared_key, null)
+}
+
+output "client_certificate" {
+ description = "The `client_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate
+}
+
+output "client_key" {
+ description = "The `client_key` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded private key used by clients to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].client_key
+}
+
+output "cluster_ca_certificate" {
+ description = "The `cluster_ca_certificate` in the `azurerm_kubernetes_cluster`'s `kube_config` block. Base64 encoded public CA certificate used as the root of trust for the Kubernetes cluster."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate
+}
+
+output "cluster_fqdn" {
+ description = "The FQDN of the Azure Kubernetes Managed Cluster."
+ value = azurerm_kubernetes_cluster.main.fqdn
+}
+
+output "cluster_identity" {
+ description = "The `azurerm_kubernetes_cluster`'s `identity` block."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.identity[0], null)
+}
+
+output "cluster_portal_fqdn" {
+ description = "The FQDN for the Azure Portal resources when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster."
+ value = azurerm_kubernetes_cluster.main.portal_fqdn
+}
+
+output "cluster_private_fqdn" {
+ description = "The FQDN for the Kubernetes Cluster when private link has been enabled, which is only resolvable inside the Virtual Network used by the Kubernetes Cluster."
+ value = azurerm_kubernetes_cluster.main.private_fqdn
+}
+
+output "generated_cluster_private_ssh_key" {
+ description = "The cluster will use this generated private key as ssh key when `var.public_ssh_key` is empty or null. Private key data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format."
+ sensitive = true
+ value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].private_key_pem : null) : null
+}
+
+output "generated_cluster_public_ssh_key" {
+ description = "The cluster will use this generated public key as ssh key when `var.public_ssh_key` is empty or null. The fingerprint of the public key data in OpenSSH MD5 hash format, e.g. `aa:bb:cc:....` Only available if the selected private key format is compatible, similarly to `public_key_openssh` and the [ECDSA P224 limitations](https://registry.terraform.io/providers/hashicorp/tls/latest/docs#limitations)."
+ value = try(azurerm_kubernetes_cluster.main.linux_profile[0], null) != null ? (var.public_ssh_key == "" || var.public_ssh_key == null ? tls_private_key.ssh[0].public_key_openssh : null) : null
+}
+
+output "host" {
+ description = "The `host` in the `azurerm_kubernetes_cluster`'s `kube_config` block. The Kubernetes cluster server host."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].host
+}
+
+output "http_application_routing_zone_name" {
+ description = "The `azurerm_kubernetes_cluster`'s `http_application_routing_zone_name` argument. The Zone Name of the HTTP Application Routing."
+ value = azurerm_kubernetes_cluster.main.http_application_routing_zone_name != null ? azurerm_kubernetes_cluster.main.http_application_routing_zone_name : ""
+}
+
+output "ingress_application_gateway" {
+ description = "The `azurerm_kubernetes_cluster`'s `ingress_application_gateway` block."
+ value = try(azurerm_kubernetes_cluster.main.ingress_application_gateway[0], null)
+}
+
+output "ingress_application_gateway_enabled" {
+ description = "Has the `azurerm_kubernetes_cluster` turned on `ingress_application_gateway` block?"
+ value = can(azurerm_kubernetes_cluster.main.ingress_application_gateway[0])
+}
+
+output "key_vault_secrets_provider" {
+ description = "The `azurerm_kubernetes_cluster`'s `key_vault_secrets_provider` block."
+ value = try(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0], null)
+}
+
+output "key_vault_secrets_provider_enabled" {
+ description = "Has the `azurerm_kubernetes_cluster` turned on `key_vault_secrets_provider` block?"
+ value = can(azurerm_kubernetes_cluster.main.key_vault_secrets_provider[0])
+}
+
+output "kube_admin_config_raw" {
+ description = "The `azurerm_kubernetes_cluster`'s `kube_admin_config_raw` argument. Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled and local accounts enabled."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_admin_config_raw
+}
+
+output "kube_config_raw" {
+ description = "The `azurerm_kubernetes_cluster`'s `kube_config_raw` argument. Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config_raw
+}
+
+output "kubelet_identity" {
+ description = "The `azurerm_kubernetes_cluster`'s `kubelet_identity` block."
+ value = azurerm_kubernetes_cluster.main.kubelet_identity
+}
+
+output "location" {
+ description = "The `azurerm_kubernetes_cluster`'s `location` argument. (Required) The location where the Managed Kubernetes Cluster should be created."
+ value = azurerm_kubernetes_cluster.main.location
+}
+
+output "network_profile" {
+ description = "The `azurerm_kubernetes_cluster`'s `network_profile` block"
+ value = azurerm_kubernetes_cluster.main.network_profile
+}
+
+output "node_resource_group" {
+ description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster."
+ value = azurerm_kubernetes_cluster.main.node_resource_group
+}
+
+output "node_resource_group_id" {
+ description = "The ID of the Resource Group containing the resources for this Managed Kubernetes Cluster."
+ value = azurerm_kubernetes_cluster.main.node_resource_group_id
+}
+
+output "oidc_issuer_url" {
+ description = "The OIDC issuer URL that is associated with the cluster."
+ value = azurerm_kubernetes_cluster.main.oidc_issuer_url
+}
+
+output "oms_agent" {
+ description = "The `azurerm_kubernetes_cluster`'s `oms_agent` argument."
+ value = try(azurerm_kubernetes_cluster.main.oms_agent[0], null)
+}
+
+output "oms_agent_enabled" {
+ description = "Has the `azurerm_kubernetes_cluster` turned on `oms_agent` block?"
+ value = can(azurerm_kubernetes_cluster.main.oms_agent[0])
+}
+
+output "open_service_mesh_enabled" {
+ description = "(Optional) Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)."
+ value = azurerm_kubernetes_cluster.main.open_service_mesh_enabled
+}
+
+output "password" {
+ description = "The `password` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A password or token used to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].password
+}
+
+output "username" {
+ description = "The `username` in the `azurerm_kubernetes_cluster`'s `kube_config` block. A username used to authenticate to the Kubernetes cluster."
+ sensitive = true
+ value = azurerm_kubernetes_cluster.main.kube_config[0].username
+}
+
+output "web_app_routing_identity" {
+ description = "The `azurerm_kubernetes_cluster`'s `web_app_routing_identity` block, it's type is a list of object."
+ value = try(azurerm_kubernetes_cluster.main.web_app_routing[0].web_app_routing_identity, [])
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf
new file mode 100644
index 000000000..e9601eaf0
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/role_assignments.tf
@@ -0,0 +1,126 @@
+resource "azurerm_role_assignment" "acr" {
+ for_each = var.attached_acr_id_map
+
+ principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id
+ scope = each.value
+ role_definition_name = "AcrPull"
+ skip_service_principal_aad_check = true
+}
+
+# /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/acceptanceTestResourceGroup1/providers/Microsoft.ManagedIdentity/userAssignedIdentities/testIdentity
+data "azurerm_user_assigned_identity" "cluster_identity" {
+ count = (var.client_id == "" || nonsensitive(var.client_secret) == "") && var.identity_type == "UserAssigned" ? 1 : 0
+
+ name = split("/", var.identity_ids[0])[8]
+ resource_group_name = split("/", var.identity_ids[0])[4]
+}
+
+# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus)
+# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets
+# used by the system node pool and by any additional node pools.
+# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites
+# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites
+# https://github.com/Azure/terraform-azurerm-aks/issues/178
+resource "azurerm_role_assignment" "network_contributor" {
+ for_each = var.create_role_assignment_network_contributor && (var.client_id == "" || nonsensitive(var.client_secret) == "") ? local.subnets : {}
+
+ principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id)
+ scope = each.value.id
+ role_definition_name = "Network Contributor"
+
+ lifecycle {
+ precondition {
+ condition = length(var.network_contributor_role_assigned_subnet_ids) == 0
+ error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`."
+ }
+ }
+}
+
+resource "azurerm_role_assignment" "network_contributor_on_subnet" {
+ for_each = var.network_contributor_role_assigned_subnet_ids
+
+ principal_id = coalesce(try(data.azurerm_user_assigned_identity.cluster_identity[0].principal_id, azurerm_kubernetes_cluster.main.identity[0].principal_id), var.client_id)
+ scope = each.value
+ role_definition_name = "Network Contributor"
+
+ lifecycle {
+ precondition {
+ condition = !var.create_role_assignment_network_contributor
+ error_message = "Cannot set both of `var.create_role_assignment_network_contributor` and `var.network_contributor_role_assigned_subnet_ids`."
+ }
+ }
+}
+
+data "azurerm_client_config" "this" {}
+
+data "azurerm_virtual_network" "application_gateway_vnet" {
+ count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0
+
+ name = local.existing_application_gateway_subnet_vnet_name
+ resource_group_name = local.existing_application_gateway_subnet_resource_group_name
+}
+
+resource "azurerm_role_assignment" "application_gateway_existing_vnet_network_contributor" {
+ count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0
+
+ principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
+ scope = data.azurerm_virtual_network.application_gateway_vnet[0].id
+ role_definition_name = "Network Contributor"
+
+ lifecycle {
+ precondition {
+ condition = data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subnet_subscription_id_for_ingress
+ error_message = "Application Gateway's subnet must be in the same subscription, or `var.application_gateway_for_ingress.create_role_assignments` must be set to `false`."
+ }
+ }
+}
+
+resource "azurerm_role_assignment" "application_gateway_byo_vnet_network_contributor" {
+ count = var.create_role_assignments_for_application_gateway && local.use_green_field_gw_for_ingress ? 1 : 0
+
+ principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
+ scope = join("/", slice(local.default_nodepool_subnet_segments, 0, length(local.default_nodepool_subnet_segments) - 2))
+ role_definition_name = "Network Contributor"
+
+ lifecycle {
+ precondition {
+ condition = var.green_field_application_gateway_for_ingress == null || !(var.create_role_assignments_for_application_gateway && var.vnet_subnet == null)
+ error_message = "When `var.vnet_subnet` is `null`, you must set `var.create_role_assignments_for_application_gateway` to `false`, set `var.green_field_application_gateway_for_ingress` to `null`."
+ }
+ }
+}
+
+resource "azurerm_role_assignment" "existing_application_gateway_contributor" {
+ count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0
+
+ principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
+ scope = var.brown_field_application_gateway_for_ingress.id
+ role_definition_name = "Contributor"
+
+ lifecycle {
+ precondition {
+ condition = var.brown_field_application_gateway_for_ingress == null ? true : data.azurerm_client_config.this.subscription_id == local.existing_application_gateway_subscription_id_for_ingress
+ error_message = "Application Gateway must be in the same subscription, or `var.create_role_assignments_for_application_gateway` must be set to `false`."
+ }
+ }
+}
+
+data "azurerm_resource_group" "ingress_gw" {
+ count = var.create_role_assignments_for_application_gateway && local.use_brown_field_gw_for_ingress ? 1 : 0
+
+ name = local.existing_application_gateway_resource_group_for_ingress
+}
+
+data "azurerm_resource_group" "aks_rg" {
+ count = var.create_role_assignments_for_application_gateway ? 1 : 0
+
+ name = var.resource_group_name
+}
+
+resource "azurerm_role_assignment" "application_gateway_resource_group_reader" {
+ count = var.create_role_assignments_for_application_gateway && local.ingress_application_gateway_enabled ? 1 : 0
+
+ principal_id = azurerm_kubernetes_cluster.main.ingress_application_gateway[0].ingress_application_gateway_identity[0].object_id
+ scope = local.use_brown_field_gw_for_ingress ? data.azurerm_resource_group.ingress_gw[0].id : data.azurerm_resource_group.aks_rg[0].id
+ role_definition_name = "Reader"
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile
new file mode 100644
index 000000000..7f28c53a5
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/tfvmmakefile
@@ -0,0 +1,85 @@
+REMOTE_SCRIPT := "https://raw.githubusercontent.com/Azure/tfmod-scaffold/main/scripts"
+
+fmt:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fmt.sh" | bash
+
+fumpt:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumpt.sh" | bash
+
+gosec:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gosec.sh" | bash
+
+tffmt:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/tffmt.sh" | bash
+
+tffmtcheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-fmt.sh" | bash
+
+tfvalidatecheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terraform-validate.sh" | bash
+
+terrafmtcheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt-check.sh" | bash
+
+gofmtcheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gofmtcheck.sh" | bash
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/fumptcheck.sh" | bash
+
+golint:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-golangci-lint.sh" | bash
+
+tflint:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-tflint.sh" | bash
+
+lint: golint tflint gosec
+
+checkovcheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovcheck.sh" | bash
+
+checkovplancheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/checkovplancheck.sh" | bash
+
+fmtcheck: gofmtcheck tfvalidatecheck tffmtcheck terrafmtcheck
+
+pr-check: depscheck fmtcheck lint unit-test checkovcheck
+
+unit-test:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-unit-test.sh" | bash
+
+e2e-test:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-e2e-test.sh" | bash
+
+version-upgrade-test:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/version-upgrade-test.sh" | bash
+
+terrafmt:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/terrafmt.sh" | bash
+
+pre-commit: tffmt terrafmt depsensure fmt fumpt generate
+
+depsensure:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-ensure.sh" | bash
+
+depscheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/deps-check.sh" | bash
+
+generate:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/generate.sh" | bash
+
+gencheck:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/gencheck.sh" | bash
+
+yor-tag:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/yor-tag.sh" | bash
+
+autofix:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/autofix.sh" | bash
+
+test: fmtcheck
+ @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-gradually-deprecated.sh" | bash
+ @TEST=$(TEST) curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/run-test.sh" | bash
+
+build-test:
+ curl -H 'Cache-Control: no-cache, no-store' -sSL "$(REMOTE_SCRIPT)/build-test.sh" | bash
+
+.PHONY: fmt fmtcheck pr-check
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf
new file mode 120000
index 000000000..9cbc29686
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool.tf
@@ -0,0 +1 @@
+../extra_node_pool.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf
new file mode 100644
index 000000000..4ba39e77c
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/extra_node_pool_override.tf
@@ -0,0 +1,15 @@
+# tflint-ignore-file: azurerm_resource_tag
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_before_destroy" {
+ auto_scaling_enabled = each.value.enable_auto_scaling
+ host_encryption_enabled = each.value.enable_host_encryption
+ node_public_ip_enabled = each.value.enable_node_public_ip
+ temporary_name_for_rotation = each.value.temporary_name_for_rotation
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "node_pool_create_after_destroy" {
+ auto_scaling_enabled = each.value.enable_auto_scaling
+ host_encryption_enabled = each.value.enable_host_encryption
+ node_public_ip_enabled = each.value.enable_node_public_ip
+ temporary_name_for_rotation = each.value.temporary_name_for_rotation
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf
new file mode 120000
index 000000000..1b032e65b
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/locals.tf
@@ -0,0 +1 @@
+../locals.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf
new file mode 120000
index 000000000..639a396cd
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/log_analytics.tf
@@ -0,0 +1 @@
+../log_analytics.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf
new file mode 120000
index 000000000..6c481fa32
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main.tf
@@ -0,0 +1 @@
+../main.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf
new file mode 100644
index 000000000..49dc0a773
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/main_override.tf
@@ -0,0 +1,307 @@
+# tflint-ignore-file: azurerm_resource_tag
+
+resource "azurerm_kubernetes_cluster" "main" {
+ automatic_upgrade_channel = var.automatic_channel_upgrade
+ node_os_upgrade_channel = var.node_os_channel_upgrade
+
+ dynamic "default_node_pool" {
+ for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
+
+ content {
+ name = var.agents_pool_name
+ vm_size = var.agents_size
+ auto_scaling_enabled = var.enable_auto_scaling
+ fips_enabled = var.default_node_pool_fips_enabled
+ host_encryption_enabled = var.enable_host_encryption
+ max_count = null
+ max_pods = var.agents_max_pods
+ min_count = null
+ node_count = var.agents_count
+ node_labels = var.agents_labels
+ node_public_ip_enabled = var.enable_node_public_ip
+ only_critical_addons_enabled = var.only_critical_addons_enabled
+ orchestrator_version = var.orchestrator_version
+ os_disk_size_gb = var.os_disk_size_gb
+ os_disk_type = var.os_disk_type
+ os_sku = var.os_sku
+ pod_subnet_id = try(var.pod_subnet.id, null)
+ proximity_placement_group_id = var.agents_proximity_placement_group_id
+ scale_down_mode = var.scale_down_mode
+ snapshot_id = var.snapshot_id
+ tags = merge(var.tags, var.agents_tags)
+ temporary_name_for_rotation = var.temporary_name_for_rotation
+ type = var.agents_type
+ ultra_ssd_enabled = var.ultra_ssd_enabled
+ vnet_subnet_id = try(var.vnet_subnet.id, null)
+ zones = var.agents_availability_zones
+
+ dynamic "kubelet_config" {
+ for_each = var.agents_pool_kubelet_configs
+
+ content {
+ allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
+ container_log_max_line = kubelet_config.value.container_log_max_line
+ container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
+ cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
+ cpu_manager_policy = kubelet_config.value.cpu_manager_policy
+ image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
+ image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
+ pod_max_pid = kubelet_config.value.pod_max_pid
+ topology_manager_policy = kubelet_config.value.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = var.agents_pool_linux_os_configs
+
+ content {
+ swap_file_size_mb = linux_os_config.value.swap_file_size_mb
+ transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
+ transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
+
+ content {
+ fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
+ fs_file_max = sysctl_config.value.fs_file_max
+ fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
+ fs_nr_open = sysctl_config.value.fs_nr_open
+ kernel_threads_max = sysctl_config.value.kernel_threads_max
+ net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
+ net_core_optmem_max = sysctl_config.value.net_core_optmem_max
+ net_core_rmem_default = sysctl_config.value.net_core_rmem_default
+ net_core_rmem_max = sysctl_config.value.net_core_rmem_max
+ net_core_somaxconn = sysctl_config.value.net_core_somaxconn
+ net_core_wmem_default = sysctl_config.value.net_core_wmem_default
+ net_core_wmem_max = sysctl_config.value.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
+ vm_max_map_count = sysctl_config.value.vm_max_map_count
+ vm_swappiness = sysctl_config.value.vm_swappiness
+ vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = var.agents_pool_max_surge
+ drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
+ node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
+ }
+ }
+ }
+ }
+ dynamic "default_node_pool" {
+ for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
+
+ content {
+ name = var.agents_pool_name
+ vm_size = var.agents_size
+ auto_scaling_enabled = var.enable_auto_scaling
+ fips_enabled = var.default_node_pool_fips_enabled
+ host_encryption_enabled = var.enable_host_encryption
+ max_count = var.agents_max_count
+ max_pods = var.agents_max_pods
+ min_count = var.agents_min_count
+ node_labels = var.agents_labels
+ node_public_ip_enabled = var.enable_node_public_ip
+ only_critical_addons_enabled = var.only_critical_addons_enabled
+ orchestrator_version = var.orchestrator_version
+ os_disk_size_gb = var.os_disk_size_gb
+ os_disk_type = var.os_disk_type
+ os_sku = var.os_sku
+ pod_subnet_id = try(var.pod_subnet.id, null)
+ proximity_placement_group_id = var.agents_proximity_placement_group_id
+ scale_down_mode = var.scale_down_mode
+ snapshot_id = var.snapshot_id
+ tags = merge(var.tags, var.agents_tags)
+ temporary_name_for_rotation = var.temporary_name_for_rotation
+ type = var.agents_type
+ ultra_ssd_enabled = var.ultra_ssd_enabled
+ vnet_subnet_id = try(var.vnet_subnet.id, null)
+ zones = var.agents_availability_zones
+
+ dynamic "kubelet_config" {
+ for_each = var.agents_pool_kubelet_configs
+
+ content {
+ allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
+ container_log_max_line = kubelet_config.value.container_log_max_line
+ container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
+ cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
+ cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
+ cpu_manager_policy = kubelet_config.value.cpu_manager_policy
+ image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
+ image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
+ pod_max_pid = kubelet_config.value.pod_max_pid
+ topology_manager_policy = kubelet_config.value.topology_manager_policy
+ }
+ }
+ dynamic "linux_os_config" {
+ for_each = var.agents_pool_linux_os_configs
+
+ content {
+ swap_file_size_mb = linux_os_config.value.swap_file_size_mb
+ transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
+ transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
+
+ dynamic "sysctl_config" {
+ for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
+
+ content {
+ fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
+ fs_file_max = sysctl_config.value.fs_file_max
+ fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
+ fs_nr_open = sysctl_config.value.fs_nr_open
+ kernel_threads_max = sysctl_config.value.kernel_threads_max
+ net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
+ net_core_optmem_max = sysctl_config.value.net_core_optmem_max
+ net_core_rmem_default = sysctl_config.value.net_core_rmem_default
+ net_core_rmem_max = sysctl_config.value.net_core_rmem_max
+ net_core_somaxconn = sysctl_config.value.net_core_somaxconn
+ net_core_wmem_default = sysctl_config.value.net_core_wmem_default
+ net_core_wmem_max = sysctl_config.value.net_core_wmem_max
+ net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
+ net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
+ net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
+ net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
+ net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
+ net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
+ net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
+ net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
+ net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
+ net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
+ net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
+ net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
+ net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
+ net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
+ vm_max_map_count = sysctl_config.value.vm_max_map_count
+ vm_swappiness = sysctl_config.value.vm_swappiness
+ vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
+ }
+ }
+ }
+ }
+ dynamic "upgrade_settings" {
+ for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
+
+ content {
+ max_surge = var.agents_pool_max_surge
+ drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes
+ node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes
+ }
+ }
+ }
+ }
+ dynamic "service_mesh_profile" {
+ for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"]
+
+ content {
+ mode = var.service_mesh_profile.mode
+ revisions = var.service_mesh_profile.revisions
+ external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled
+ internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled
+ }
+ }
+ dynamic "api_server_access_profile" {
+ for_each = var.api_server_authorized_ip_ranges != null ? [
+ "api_server_access_profile"
+ ] : []
+
+ content {
+ authorized_ip_ranges = var.api_server_authorized_ip_ranges
+ }
+ }
+ dynamic "azure_active_directory_role_based_access_control" {
+ for_each = var.role_based_access_control_enabled ? ["rbac"] : []
+
+ content {
+ admin_group_object_ids = var.rbac_aad_admin_group_object_ids
+ azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled
+ tenant_id = var.rbac_aad_tenant_id
+ }
+ }
+ network_profile {
+ network_plugin = var.network_plugin
+ dns_service_ip = var.net_profile_dns_service_ip
+ load_balancer_sku = var.load_balancer_sku
+ network_data_plane = var.ebpf_data_plane
+ network_plugin_mode = var.network_plugin_mode
+ network_policy = var.network_policy
+ outbound_type = var.net_profile_outbound_type
+ pod_cidr = var.net_profile_pod_cidr
+ service_cidr = var.net_profile_service_cidr
+
+ dynamic "load_balancer_profile" {
+ for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [
+ "load_balancer_profile"
+ ] : []
+
+ content {
+ idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes
+ managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count
+ managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count
+ outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids
+ outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids
+ outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated
+ }
+ }
+ }
+ dynamic "storage_profile" {
+ for_each = var.storage_profile_enabled ? ["storage_profile"] : []
+
+ content {
+ blob_driver_enabled = var.storage_profile_blob_driver_enabled
+ disk_driver_enabled = var.storage_profile_disk_driver_enabled
+ file_driver_enabled = var.storage_profile_file_driver_enabled
+ snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled
+ }
+ }
+
+ dynamic "upgrade_override" {
+ for_each = var.upgrade_override != null ? ["use_upgrade_override"] : []
+ content {
+ effective_until = var.upgrade_override.effective_until
+ force_upgrade_enabled = var.upgrade_override.force_upgrade_enabled
+ }
+
+ }
+
+ dynamic "web_app_routing" {
+ for_each = var.web_app_routing == null ? [] : ["web_app_routing"]
+
+ content {
+ dns_zone_ids = var.web_app_routing.dns_zone_ids
+ }
+ }
+
+ lifecycle {
+ prevent_destroy = true
+
+ ignore_changes = [
+ http_application_routing_enabled,
+ http_proxy_config[0].no_proxy,
+ kubernetes_version,
+ # we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource.
+ name,
+ network_profile[0].load_balancer_profile[0].outbound_ip_address_ids,
+ network_profile[0].load_balancer_profile[0].outbound_ip_prefix_ids,
+ ]
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf
new file mode 120000
index 000000000..1a861df4d
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/outputs.tf
@@ -0,0 +1 @@
+../outputs.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf
new file mode 120000
index 000000000..705ff1c97
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/role_assignments.tf
@@ -0,0 +1 @@
+../role_assignments.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf
new file mode 100644
index 000000000..dee4388f6
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/v4_variables.tf
@@ -0,0 +1,11 @@
+variable "upgrade_override" {
+ type = object({
+ force_upgrade_enabled = bool
+ effective_until = optional(string)
+ })
+ default = null
+ description = <<-EOT
+ `force_upgrade_enabled` - (Required) Whether to force upgrade the cluster. Possible values are `true` or `false`.
+ `effective_until` - (Optional) Specifies the duration, in RFC 3339 format (e.g., `2025-10-01T13:00:00Z`), the upgrade_override values are effective. This field must be set for the `upgrade_override` values to take effect. The date-time must be within the next 30 days.
+ EOT
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf
new file mode 120000
index 000000000..3a65dccd2
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables.tf
@@ -0,0 +1 @@
+../variables.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf
new file mode 100644
index 000000000..30c4e22bc
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/variables_override.tf
@@ -0,0 +1,231 @@
+variable "node_pools" {
+ type = map(object({
+ name = string
+ node_count = optional(number)
+ tags = optional(map(string))
+ vm_size = string
+ host_group_id = optional(string)
+ capacity_reservation_group_id = optional(string)
+ custom_ca_trust_enabled = optional(bool)
+ enable_auto_scaling = optional(bool)
+ enable_host_encryption = optional(bool)
+ enable_node_public_ip = optional(bool)
+ eviction_policy = optional(string)
+ gpu_instance = optional(string)
+ kubelet_config = optional(object({
+ cpu_manager_policy = optional(string)
+ cpu_cfs_quota_enabled = optional(bool)
+ cpu_cfs_quota_period = optional(string)
+ image_gc_high_threshold = optional(number)
+ image_gc_low_threshold = optional(number)
+ topology_manager_policy = optional(string)
+ allowed_unsafe_sysctls = optional(set(string))
+ container_log_max_size_mb = optional(number)
+ container_log_max_files = optional(number)
+ pod_max_pid = optional(number)
+ }))
+ linux_os_config = optional(object({
+ sysctl_config = optional(object({
+ fs_aio_max_nr = optional(number)
+ fs_file_max = optional(number)
+ fs_inotify_max_user_watches = optional(number)
+ fs_nr_open = optional(number)
+ kernel_threads_max = optional(number)
+ net_core_netdev_max_backlog = optional(number)
+ net_core_optmem_max = optional(number)
+ net_core_rmem_default = optional(number)
+ net_core_rmem_max = optional(number)
+ net_core_somaxconn = optional(number)
+ net_core_wmem_default = optional(number)
+ net_core_wmem_max = optional(number)
+ net_ipv4_ip_local_port_range_min = optional(number)
+ net_ipv4_ip_local_port_range_max = optional(number)
+ net_ipv4_neigh_default_gc_thresh1 = optional(number)
+ net_ipv4_neigh_default_gc_thresh2 = optional(number)
+ net_ipv4_neigh_default_gc_thresh3 = optional(number)
+ net_ipv4_tcp_fin_timeout = optional(number)
+ net_ipv4_tcp_keepalive_intvl = optional(number)
+ net_ipv4_tcp_keepalive_probes = optional(number)
+ net_ipv4_tcp_keepalive_time = optional(number)
+ net_ipv4_tcp_max_syn_backlog = optional(number)
+ net_ipv4_tcp_max_tw_buckets = optional(number)
+ net_ipv4_tcp_tw_reuse = optional(bool)
+ net_netfilter_nf_conntrack_buckets = optional(number)
+ net_netfilter_nf_conntrack_max = optional(number)
+ vm_max_map_count = optional(number)
+ vm_swappiness = optional(number)
+ vm_vfs_cache_pressure = optional(number)
+ }))
+ transparent_huge_page_enabled = optional(string)
+ transparent_huge_page_defrag = optional(string)
+ swap_file_size_mb = optional(number)
+ }))
+ fips_enabled = optional(bool)
+ kubelet_disk_type = optional(string)
+ max_count = optional(number)
+ max_pods = optional(number)
+ message_of_the_day = optional(string)
+ mode = optional(string, "User")
+ min_count = optional(number)
+ node_network_profile = optional(object({
+ node_public_ip_tags = optional(map(string))
+ }))
+ node_labels = optional(map(string))
+ node_public_ip_prefix_id = optional(string)
+ node_taints = optional(list(string))
+ orchestrator_version = optional(string)
+ os_disk_size_gb = optional(number)
+ os_disk_type = optional(string, "Managed")
+ os_sku = optional(string)
+ os_type = optional(string, "Linux")
+ pod_subnet = optional(object({
+ id = string
+ }), null)
+ priority = optional(string, "Regular")
+ proximity_placement_group_id = optional(string)
+ spot_max_price = optional(number)
+ scale_down_mode = optional(string, "Delete")
+ snapshot_id = optional(string)
+ ultra_ssd_enabled = optional(bool)
+ vnet_subnet = optional(object({
+ id = string
+ }), null)
+ upgrade_settings = optional(object({
+ drain_timeout_in_minutes = number
+ node_soak_duration_in_minutes = number
+ max_surge = string
+ }))
+ windows_profile = optional(object({
+ outbound_nat_enabled = optional(bool, true)
+ }))
+ workload_runtime = optional(string)
+ zones = optional(set(string))
+ create_before_destroy = optional(bool, true)
+ temporary_name_for_rotation = optional(string)
+ }))
+ default = {}
+ description = <<-EOT
+ A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
+ map(object({
+ name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
+ node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
+ tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
+ vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
+ host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
+ capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
+ custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
+ enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
+ enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
+ enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
+ eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
+ gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
+ kubelet_config = optional(object({
+ cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
+ cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
+ cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
+ image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
+ allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
+ container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
+ container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
+ pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
+ }))
+ linux_os_config = optional(object({
+ sysctl_config = optional(object({
+ fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
+ fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
+ fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
+ fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
+ kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
+ net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
+ net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
+ net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
+ net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
+ vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
+ vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ }))
+ transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
+ transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
+ swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
+ }))
+ fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
+ kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
+ max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
+ max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
+ message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
+ mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
+ min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
+ node_network_profile = optional(object({
+ node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
+ }))
+ node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
+ node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
+ node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
+ orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
+ os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
+ os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
+ os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
+ os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
+ pod_subnet = optional(object({
+ id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
+ }))
+ priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
+ proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
+ spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
+ scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
+ snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
+ ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
+ vnet_subnet = optional(object({
+ id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
+ }))
+ upgrade_settings = optional(object({
+ drain_timeout_in_minutes = number
+ node_soak_duration_in_minutes = number
+ max_surge = string
+ }))
+ windows_profile = optional(object({
+ outbound_nat_enabled = optional(bool, true)
+ }))
+ workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
+ zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
+ create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
+ temporary_name_for_rotation = (Optional) Specifies the name of the temporary node pool used to cycle the node pool when one of the relevant properties are updated.
+ }))
+ EOT
+ nullable = false
+}
+
+variable "service_mesh_profile" {
+ type = object({
+ mode = string
+ revisions = list(string)
+ internal_ingress_gateway_enabled = optional(bool, true)
+ external_ingress_gateway_enabled = optional(bool, true)
+ })
+ default = null
+ description = <<-EOT
+ `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
+ `revisions` - (Required) Specify 1 or 2 Istio control plane revisions for managing minor upgrades using the canary upgrade process. For example, create the resource with `revisions` set to `["asm-1-20"]`, or leave it empty (the `revisions` will only be known after apply). To start the canary upgrade, change `revisions` to `["asm-1-20", "asm-1-21"]`. To roll back the canary upgrade, revert to `["asm-1-20"]`. To confirm the upgrade, change to `["asm-1-21"]`.
+ `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
+ `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`.
+ EOT
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf
new file mode 120000
index 000000000..8bd0ff140
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions.tf
@@ -0,0 +1 @@
+../versions.tf
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf
new file mode 100644
index 000000000..45d44abe3
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/v4/versions_override.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ azapi = {
+ source = "Azure/azapi"
+ version = ">=2.0, < 3.0"
+ }
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = ">= 4.16.0, < 5.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 3.1"
+ }
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf
new file mode 100644
index 000000000..c819f9b89
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/variables.tf
@@ -0,0 +1,1601 @@
+variable "location" {
+ type = string
+ description = "Location of cluster, if not defined it will be read from the resource-group"
+}
+
+variable "resource_group_name" {
+ type = string
+ description = "The existing resource group name to use"
+}
+
+variable "aci_connector_linux_enabled" {
+ type = bool
+ default = false
+ description = "Enable Virtual Node pool"
+}
+
+variable "aci_connector_linux_subnet_name" {
+ type = string
+ default = null
+ description = "(Optional) aci_connector_linux subnet name"
+}
+
+variable "admin_username" {
+ type = string
+ default = null
+ description = "The username of the local administrator to be created on the Kubernetes cluster. Set this variable to `null` to turn off the cluster's `linux_profile`. Changing this forces a new resource to be created."
+}
+
+variable "agents_availability_zones" {
+ type = list(string)
+ default = null
+ description = "(Optional) A list of Availability Zones across which the Node Pool should be spread. Changing this forces a new resource to be created."
+}
+
+variable "agents_count" {
+ type = number
+ default = 2
+ description = "The number of Agents that should exist in the Agent Pool. Please set `agents_count` `null` while `enable_auto_scaling` is `true` to avoid possible `agents_count` changes."
+}
+
+variable "agents_labels" {
+ type = map(string)
+ default = {}
+ description = "(Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created."
+}
+
+variable "agents_max_count" {
+ type = number
+ default = null
+ description = "Maximum number of nodes in a pool"
+}
+
+variable "agents_max_pods" {
+ type = number
+ default = null
+ description = "(Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created."
+}
+
+variable "agents_min_count" {
+ type = number
+ default = null
+ description = "Minimum number of nodes in a pool"
+}
+
+variable "agents_pool_drain_timeout_in_minutes" {
+ type = number
+ default = null
+ description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created."
+}
+
+variable "agents_pool_kubelet_configs" {
+ type = list(object({
+ cpu_manager_policy = optional(string)
+ cpu_cfs_quota_enabled = optional(bool, true)
+ cpu_cfs_quota_period = optional(string)
+ image_gc_high_threshold = optional(number)
+ image_gc_low_threshold = optional(number)
+ topology_manager_policy = optional(string)
+ allowed_unsafe_sysctls = optional(set(string))
+ container_log_max_size_mb = optional(number)
+ container_log_max_line = optional(number)
+ pod_max_pid = optional(number)
+ }))
+ default = []
+ description = <<-EOT
+ list(object({
+ cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
+ cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
+ cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
+ image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
+ allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
+ container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
+ container_log_max_line = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
+ pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
+ }))
+EOT
+ nullable = false
+}
+
+variable "agents_pool_linux_os_configs" {
+ type = list(object({
+ sysctl_configs = optional(list(object({
+ fs_aio_max_nr = optional(number)
+ fs_file_max = optional(number)
+ fs_inotify_max_user_watches = optional(number)
+ fs_nr_open = optional(number)
+ kernel_threads_max = optional(number)
+ net_core_netdev_max_backlog = optional(number)
+ net_core_optmem_max = optional(number)
+ net_core_rmem_default = optional(number)
+ net_core_rmem_max = optional(number)
+ net_core_somaxconn = optional(number)
+ net_core_wmem_default = optional(number)
+ net_core_wmem_max = optional(number)
+ net_ipv4_ip_local_port_range_min = optional(number)
+ net_ipv4_ip_local_port_range_max = optional(number)
+ net_ipv4_neigh_default_gc_thresh1 = optional(number)
+ net_ipv4_neigh_default_gc_thresh2 = optional(number)
+ net_ipv4_neigh_default_gc_thresh3 = optional(number)
+ net_ipv4_tcp_fin_timeout = optional(number)
+ net_ipv4_tcp_keepalive_intvl = optional(number)
+ net_ipv4_tcp_keepalive_probes = optional(number)
+ net_ipv4_tcp_keepalive_time = optional(number)
+ net_ipv4_tcp_max_syn_backlog = optional(number)
+ net_ipv4_tcp_max_tw_buckets = optional(number)
+ net_ipv4_tcp_tw_reuse = optional(bool)
+ net_netfilter_nf_conntrack_buckets = optional(number)
+ net_netfilter_nf_conntrack_max = optional(number)
+ vm_max_map_count = optional(number)
+ vm_swappiness = optional(number)
+ vm_vfs_cache_pressure = optional(number)
+ })), [])
+ transparent_huge_page_enabled = optional(string)
+ transparent_huge_page_defrag = optional(string)
+ swap_file_size_mb = optional(number)
+ }))
+ default = []
+ description = <<-EOT
+ list(object({
+ sysctl_configs = optional(list(object({
+ fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
+ fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
+ fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
+ fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
+ kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
+ net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
+ net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
+ net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
+ net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_tw_reuse = (Optional) The sysctl setting net.ipv4.tcp_tw_reuse. Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
+ vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
+ vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ })), [])
+ transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
+ transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
+ swap_file_size_mb = (Optional) Specifies the size of the swap file on each node in MB. Changing this forces a new resource to be created.
+ }))
+EOT
+ nullable = false
+}
+
+variable "agents_pool_max_surge" {
+ type = string
+ default = "10%"
+ description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade."
+}
+
+variable "agents_pool_name" {
+ type = string
+ default = "nodepool"
+ description = "The default Azure AKS agentpool (nodepool) name."
+ nullable = false
+}
+
+variable "agents_pool_node_soak_duration_in_minutes" {
+ type = number
+ default = 0
+ description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0."
+}
+
+variable "agents_proximity_placement_group_id" {
+ type = string
+ default = null
+ description = "(Optional) The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created."
+}
+
+variable "agents_size" {
+ type = string
+ default = "Standard_D2s_v3"
+ description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created."
+}
+
+variable "agents_tags" {
+ type = map(string)
+ default = {}
+ description = "(Optional) A mapping of tags to assign to the Node Pool."
+}
+
+variable "agents_type" {
+ type = string
+ default = "VirtualMachineScaleSets"
+ description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets."
+}
+
+variable "api_server_authorized_ip_ranges" {
+ type = set(string)
+ default = null
+ description = "(Optional) The IP ranges to allow for incoming traffic to the server nodes."
+}
+
+variable "attached_acr_id_map" {
+ type = map(string)
+ default = {}
+ description = "Azure Container Registry ids that need an authentication mechanism with Azure Kubernetes Service (AKS). Map key must be static string as acr's name, the value is acr's resource id. Changing this forces some new resources to be created."
+ nullable = false
+}
+
+variable "auto_scaler_profile_balance_similar_node_groups" {
+ type = bool
+ default = false
+ description = "Detect similar node groups and balance the number of nodes between them. Defaults to `false`."
+}
+
+variable "auto_scaler_profile_empty_bulk_delete_max" {
+ type = number
+ default = 10
+ description = "Maximum number of empty nodes that can be deleted at the same time. Defaults to `10`."
+}
+
+variable "auto_scaler_profile_enabled" {
+ type = bool
+ default = false
+ description = "Enable configuring the auto scaler profile"
+ nullable = false
+}
+
+variable "auto_scaler_profile_expander" {
+ type = string
+ default = "random"
+ description = "Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`."
+
+ validation {
+ condition = contains(["least-waste", "most-pods", "priority", "random"], var.auto_scaler_profile_expander)
+ error_message = "Must be either `least-waste`, `most-pods`, `priority` or `random`."
+ }
+}
+
+variable "auto_scaler_profile_max_graceful_termination_sec" {
+ type = string
+ default = "600"
+ description = "Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`."
+}
+
+variable "auto_scaler_profile_max_node_provisioning_time" {
+ type = string
+ default = "15m"
+ description = "Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`."
+}
+
+variable "auto_scaler_profile_max_unready_nodes" {
+ type = number
+ default = 3
+ description = "Maximum Number of allowed unready nodes. Defaults to `3`."
+}
+
+variable "auto_scaler_profile_max_unready_percentage" {
+ type = number
+ default = 45
+ description = "Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`."
+}
+
+variable "auto_scaler_profile_new_pod_scale_up_delay" {
+ type = string
+ default = "10s"
+ description = "For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`."
+}
+
+variable "auto_scaler_profile_scale_down_delay_after_add" {
+ type = string
+ default = "10m"
+ description = "How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`."
+}
+
+variable "auto_scaler_profile_scale_down_delay_after_delete" {
+ type = string
+ default = null
+ description = "How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`."
+}
+
+variable "auto_scaler_profile_scale_down_delay_after_failure" {
+ type = string
+ default = "3m"
+ description = "How long after scale down failure that scale down evaluation resumes. Defaults to `3m`."
+}
+
+variable "auto_scaler_profile_scale_down_unneeded" {
+ type = string
+ default = "10m"
+ description = "How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`."
+}
+
+variable "auto_scaler_profile_scale_down_unready" {
+ type = string
+ default = "20m"
+ description = "How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`."
+}
+
+variable "auto_scaler_profile_scale_down_utilization_threshold" {
+ type = string
+ default = "0.5"
+ description = "Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`."
+}
+
+variable "auto_scaler_profile_scan_interval" {
+ type = string
+ default = "10s"
+ description = "How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`."
+}
+
+variable "auto_scaler_profile_skip_nodes_with_local_storage" {
+ type = bool
+ default = true
+ description = "If `true` cluster autoscaler will never delete nodes with pods with local storage, for example, EmptyDir or HostPath. Defaults to `true`."
+}
+
+variable "auto_scaler_profile_skip_nodes_with_system_pods" {
+ type = bool
+ default = true
+ description = "If `true` cluster autoscaler will never delete nodes with pods from kube-system (except for DaemonSet or mirror pods). Defaults to `true`."
+}
+
+variable "automatic_channel_upgrade" {
+ type = string
+ default = null
+ description = <<-EOT
+ (Optional) Defines the automatic upgrade channel for the AKS cluster.
+ Possible values:
+ * `"patch"`: Automatically upgrades to the latest patch version within the specified minor version in `kubernetes_version`. **If using "patch", `kubernetes_version` must be set only up to the minor version (e.g., "1.29").**
+ * `"stable"`, `"rapid"`, `"node-image"`: Automatically upgrade without requiring `kubernetes_version`. **If using one of these values, both `kubernetes_version` and `orchestrator_version` must be `null`.**
+
+ By default, automatic upgrades are disabled.
+ More information: https://learn.microsoft.com/en-us/azure/aks/auto-upgrade-cluster
+ EOT
+
+ validation {
+ condition = var.automatic_channel_upgrade == null ? true : contains([
+ "patch", "stable", "rapid", "node-image"
+ ], var.automatic_channel_upgrade)
+ error_message = "`automatic_channel_upgrade`'s possible values are `patch`, `stable`, `rapid` or `node-image`."
+ }
+}
+
+variable "azure_policy_enabled" {
+ type = bool
+ default = false
+ description = "Enable Azure Policy Addon."
+}
+
+variable "brown_field_application_gateway_for_ingress" {
+ type = object({
+ id = string
+ subnet_id = string
+ })
+ default = null
+ description = <<-EOT
+ [Definition of `brown_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-existing)
+ * `id` - (Required) The ID of the Application Gateway that be used as cluster ingress.
+ * `subnet_id` - (Required) The ID of the Subnet which the Application Gateway is connected to. Must be set when `create_role_assignments` is `true`.
+ EOT
+}
+
+variable "client_id" {
+ type = string
+ default = ""
+ description = "(Optional) The Client ID (appId) for the Service Principal used for the AKS deployment"
+ nullable = false
+}
+
+variable "client_secret" {
+ type = string
+ default = ""
+ description = "(Optional) The Client Secret (password) for the Service Principal used for the AKS deployment"
+ nullable = false
+ sensitive = true
+}
+
+variable "cluster_log_analytics_workspace_name" {
+ type = string
+ default = null
+ description = "(Optional) The name of the Analytics workspace"
+}
+
+variable "cluster_name" {
+ type = string
+ default = null
+ description = "(Optional) The name for the AKS resources created in the specified Azure Resource Group. This variable overwrites the 'prefix' var (The 'prefix' var will still be applied to the dns_prefix if it is set)"
+}
+
+variable "cluster_name_random_suffix" {
+ type = bool
+ default = false
+ description = "Whether to add a random suffix on Aks cluster's name or not. `azurerm_kubernetes_cluster` resource defined in this module is `create_before_destroy = true` implicity now(described [here](https://github.com/Azure/terraform-azurerm-aks/issues/389)), without this random suffix we'll not be able to recreate this cluster directly due to the naming conflict."
+ nullable = false
+}
+
+variable "confidential_computing" {
+ type = object({
+ sgx_quote_helper_enabled = bool
+ })
+ default = null
+ description = "(Optional) Enable Confidential Computing."
+}
+
+variable "cost_analysis_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Enable Cost Analysis."
+}
+
+variable "create_monitor_data_collection_rule" {
+ type = bool
+ default = true
+ description = "Create monitor data collection rule resource for the AKS cluster. Defaults to `true`."
+ nullable = false
+}
+
+variable "create_role_assignment_network_contributor" {
+ type = bool
+ default = false
+ description = "(Deprecated) Create a role assignment for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster"
+ nullable = false
+}
+
+variable "create_role_assignments_for_application_gateway" {
+ type = bool
+ default = true
+ description = "(Optional) Whether to create the corresponding role assignments for application gateway or not. Defaults to `true`."
+ nullable = false
+}
+
+variable "data_collection_settings" {
+ type = object({
+ data_collection_interval = string
+ namespace_filtering_mode_for_data_collection = string
+ namespaces_for_data_collection = list(string)
+ container_log_v2_enabled = bool
+ })
+ default = {
+ data_collection_interval = "1m"
+ namespace_filtering_mode_for_data_collection = "Off"
+ namespaces_for_data_collection = ["kube-system", "gatekeeper-system", "azure-arc"]
+ container_log_v2_enabled = true
+ }
+ description = <<-EOT
+ `data_collection_interval` - Determines how often the agent collects data. Valid values are 1m - 30m in 1m intervals. Default is 1m.
+ `namespace_filtering_mode_for_data_collection` - Can be 'Include', 'Exclude', or 'Off'. Determines how namespaces are filtered for data collection.
+ `namespaces_for_data_collection` - List of Kubernetes namespaces for data collection based on the filtering mode.
+ `container_log_v2_enabled` - Flag to enable the ContainerLogV2 schema for collecting logs.
+ See more details: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=cli#configure-dcr-with-azure-portal-1
+ EOT
+}
+
+variable "default_node_pool_fips_enabled" {
+ type = bool
+ default = null
+ description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created."
+}
+
+variable "disk_encryption_set_id" {
+ type = string
+ default = null
+ description = "(Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/azure/aks/azure-disk-customer-managed-keys). Changing this forces a new resource to be created."
+}
+
+variable "dns_prefix_private_cluster" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the DNS prefix to use with private clusters. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified. Changing this forces a new resource to be created."
+}
+
+variable "ebpf_data_plane" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the eBPF data plane used for building the Kubernetes network. Possible value is `cilium`. Changing this forces a new resource to be created."
+}
+
+variable "enable_auto_scaling" {
+ type = bool
+ default = false
+ description = "Enable node pool autoscaling"
+}
+
+variable "enable_host_encryption" {
+ type = bool
+ default = false
+ description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli"
+}
+
+variable "enable_node_public_ip" {
+ type = bool
+ default = false
+ description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false."
+}
+
+variable "green_field_application_gateway_for_ingress" {
+ type = object({
+ name = optional(string)
+ subnet_cidr = optional(string)
+ subnet_id = optional(string)
+ })
+ default = null
+ description = <<-EOT
+ [Definition of `green_field`](https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-ingress-controller-add-on-new)
+ * `name` - (Optional) The name of the Application Gateway to be used or created in the Nodepool Resource Group, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
+ * `subnet_cidr` - (Optional) The subnet CIDR to be used to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
+ * `subnet_id` - (Optional) The ID of the subnet on which to create an Application Gateway, which in turn will be integrated with the ingress controller of this Kubernetes Cluster.
+EOT
+
+ validation {
+ condition = var.green_field_application_gateway_for_ingress == null ? true : (can(coalesce(var.green_field_application_gateway_for_ingress.subnet_id, var.green_field_application_gateway_for_ingress.subnet_cidr)))
+ error_message = "One of `subnet_cidr` and `subnet_id` must be specified."
+ }
+}
+
+variable "http_proxy_config" {
+ type = object({
+ http_proxy = optional(string)
+ https_proxy = optional(string)
+ no_proxy = optional(list(string))
+ trusted_ca = optional(string)
+ })
+ default = null
+ description = <<-EOT
+ optional(object({
+ http_proxy = (Optional) The proxy address to be used when communicating over HTTP.
+ https_proxy = (Optional) The proxy address to be used when communicating over HTTPS.
+ no_proxy = (Optional) The list of domains that will not use the proxy for communication. Note: If you specify the `default_node_pool.0.vnet_subnet_id`, be sure to include the Subnet CIDR in the `no_proxy` list. Note: You may wish to use Terraform's `ignore_changes` functionality to ignore the changes to this field.
+ trusted_ca = (Optional) The base64 encoded alternative CA certificate content in PEM format.
+ }))
+ Once you have set only one of `http_proxy` and `https_proxy`, this config would be used for both `http_proxy` and `https_proxy` to avoid a configuration drift.
+EOT
+
+ validation {
+ condition = var.http_proxy_config == null ? true : can(coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy))
+ error_message = "`http_proxy` and `https_proxy` cannot be both empty."
+ }
+}
+
+variable "identity_ids" {
+ type = list(string)
+ default = null
+ description = "(Optional) Specifies a list of User Assigned Managed Identity IDs to be assigned to this Kubernetes Cluster."
+}
+
+variable "identity_type" {
+ type = string
+ default = "SystemAssigned"
+ description = "(Optional) The type of identity used for the managed cluster. Conflicts with `client_id` and `client_secret`. Possible values are `SystemAssigned` and `UserAssigned`. If `UserAssigned` is set, an `identity_ids` must be set as well."
+
+ validation {
+ condition = var.identity_type == "SystemAssigned" || var.identity_type == "UserAssigned"
+ error_message = "`identity_type`'s possible values are `SystemAssigned` and `UserAssigned`"
+ }
+}
+
+variable "image_cleaner_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Specifies whether Image Cleaner is enabled."
+}
+
+variable "image_cleaner_interval_hours" {
+ type = number
+ default = 48
+ description = "(Optional) Specifies the interval in hours when images should be cleaned up. Defaults to `48`."
+}
+
+variable "interval_before_cluster_update" {
+ type = string
+ default = "30s"
+ description = "Interval before cluster kubernetes version update, defaults to `30s`. Set this variable to `null` would disable interval before cluster kubernetes version update."
+}
+
+variable "key_vault_secrets_provider_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Whether to use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster. For more details: https://docs.microsoft.com/en-us/azure/aks/csi-secrets-store-driver"
+ nullable = false
+}
+
+variable "kms_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Enable Azure KeyVault Key Management Service."
+ nullable = false
+}
+
+variable "kms_key_vault_key_id" {
+ type = string
+ default = null
+ description = "(Optional) Identifier of Azure Key Vault key. When Azure Key Vault key management service is enabled, this field is required and must be a valid key identifier."
+}
+
+variable "kms_key_vault_network_access" {
+ type = string
+ default = "Public"
+ description = "(Optional) Network Access of Azure Key Vault. Possible values are: `Private` and `Public`."
+
+ validation {
+ condition = contains(["Private", "Public"], var.kms_key_vault_network_access)
+ error_message = "Possible values are `Private` and `Public`"
+ }
+}
+
+variable "kubelet_identity" {
+ type = object({
+ client_id = optional(string)
+ object_id = optional(string)
+ user_assigned_identity_id = optional(string)
+ })
+ default = null
+ description = <<-EOT
+ - `client_id` - (Optional) The Client ID of the user-defined Managed Identity to be assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
+ - `object_id` - (Optional) The Object ID of the user-defined Managed Identity assigned to the Kubelets.If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
+ - `user_assigned_identity_id` - (Optional) The ID of the User Assigned Identity assigned to the Kubelets. If not specified a Managed Identity is created automatically. Changing this forces a new resource to be created.
+EOT
+}
+
+variable "kubernetes_version" {
+ type = string
+ default = null
+ description = "Specify which Kubernetes release to use. The default used is the latest Kubernetes version available in the region"
+}
+
+variable "load_balancer_profile_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Enable a load_balancer_profile block. This can only be used when load_balancer_sku is set to `standard`."
+ nullable = false
+}
+
+variable "load_balancer_profile_idle_timeout_in_minutes" {
+ type = number
+ default = 30
+ description = "(Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive."
+}
+
+variable "load_balancer_profile_managed_outbound_ip_count" {
+ type = number
+ default = null
+ description = "(Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive"
+}
+
+variable "load_balancer_profile_managed_outbound_ipv6_count" {
+ type = number
+ default = null
+ description = "(Optional) The desired number of IPv6 outbound IPs created and managed by Azure for the cluster load balancer. Must be in the range of `1` to `100` (inclusive). The default value is `0` for single-stack and `1` for dual-stack. Note: managed_outbound_ipv6_count requires dual-stack networking. To enable dual-stack networking the Preview Feature Microsoft.ContainerService/AKS-EnableDualStack needs to be enabled and the Resource Provider re-registered, see the documentation for more information. https://learn.microsoft.com/en-us/azure/aks/configure-kubenet-dual-stack?tabs=azure-cli%2Ckubectl#register-the-aks-enabledualstack-preview-feature"
+}
+
+variable "load_balancer_profile_outbound_ip_address_ids" {
+ type = set(string)
+ default = null
+ description = "(Optional) The ID of the Public IP Addresses which should be used for outbound communication for the cluster load balancer."
+}
+
+variable "load_balancer_profile_outbound_ip_prefix_ids" {
+ type = set(string)
+ default = null
+ description = "(Optional) The ID of the outbound Public IP Address Prefixes which should be used for the cluster load balancer."
+}
+
+variable "load_balancer_profile_outbound_ports_allocated" {
+ type = number
+ default = 0
+ description = "(Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`"
+}
+
+variable "load_balancer_sku" {
+ type = string
+ default = "standard"
+ description = "(Optional) Specifies the SKU of the Load Balancer used for this Kubernetes Cluster. Possible values are `basic` and `standard`. Defaults to `standard`. Changing this forces a new kubernetes cluster to be created."
+
+ validation {
+ condition = contains(["basic", "standard"], var.load_balancer_sku)
+ error_message = "Possible values are `basic` and `standard`"
+ }
+}
+
+variable "local_account_disabled" {
+ type = bool
+ default = null
+ description = "(Optional) - If `true` local accounts will be disabled. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/managed-aad#disable-local-accounts) for more information."
+}
+
+variable "log_analytics_solution" {
+ type = object({
+ id = string
+ })
+ default = null
+ description = "(Optional) Object which contains existing azurerm_log_analytics_solution ID. Providing ID disables creation of azurerm_log_analytics_solution."
+
+ validation {
+ condition = var.log_analytics_solution == null ? true : var.log_analytics_solution.id != null && var.log_analytics_solution.id != ""
+ error_message = "`var.log_analytics_solution` must be `null` or an object with a valid `id`."
+ }
+}
+
+variable "log_analytics_workspace" {
+ type = object({
+ id = string
+ name = string
+ location = optional(string)
+ resource_group_name = optional(string)
+ })
+ default = null
+ description = "(Optional) Existing azurerm_log_analytics_workspace to attach azurerm_log_analytics_solution. Providing the config disables creation of azurerm_log_analytics_workspace."
+}
+
+variable "log_analytics_workspace_allow_resource_only_permissions" {
+ type = bool
+ default = null
+ description = "(Optional) Specifies if the log Analytics Workspace allow users accessing to data associated with resources they have permission to view, without permission to workspace. Defaults to `true`."
+}
+
+variable "log_analytics_workspace_cmk_for_query_forced" {
+ type = bool
+ default = null
+ description = "(Optional) Is Customer Managed Storage mandatory for query management?"
+}
+
+variable "log_analytics_workspace_daily_quota_gb" {
+ type = number
+ default = null
+ description = "(Optional) The workspace daily quota for ingestion in GB. Defaults to -1 (unlimited) if omitted."
+}
+
+variable "log_analytics_workspace_data_collection_rule_id" {
+ type = string
+ default = null
+ description = "(Optional) The ID of the Data Collection Rule to use for this workspace."
+}
+
+variable "log_analytics_workspace_enabled" {
+ type = bool
+ default = true
+ description = "Enable the integration of azurerm_log_analytics_workspace and azurerm_log_analytics_solution: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard"
+ nullable = false
+}
+
+variable "log_analytics_workspace_identity" {
+ type = object({
+ identity_ids = optional(set(string))
+ type = string
+ })
+ default = null
+ description = <<-EOT
+ - `identity_ids` - (Optional) Specifies a list of user managed identity ids to be assigned. Required if `type` is `UserAssigned`.
+ - `type` - (Required) Specifies the identity type of the Log Analytics Workspace. Possible values are `SystemAssigned` (where Azure will generate a Service Principal for you) and `UserAssigned` where you can specify the Service Principal IDs in the `identity_ids` field.
+EOT
+}
+
+variable "log_analytics_workspace_immediate_data_purge_on_30_days_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Whether to remove the data in the Log Analytics Workspace immediately after 30 days."
+}
+
+variable "log_analytics_workspace_internet_ingestion_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Should the Log Analytics Workspace support ingestion over the Public Internet? Defaults to `true`."
+}
+
+variable "log_analytics_workspace_internet_query_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Should the Log Analytics Workspace support querying over the Public Internet? Defaults to `true`."
+}
+
+variable "log_analytics_workspace_local_authentication_disabled" {
+ type = bool
+ default = null
+ description = "(Optional) Specifies if the log Analytics workspace should enforce authentication using Azure AD. Defaults to `false`."
+}
+
+variable "log_analytics_workspace_reservation_capacity_in_gb_per_day" {
+ type = number
+ default = null
+ description = "(Optional) The capacity reservation level in GB for this workspace. Possible values are `100`, `200`, `300`, `400`, `500`, `1000`, `2000` and `5000`."
+}
+
+variable "log_analytics_workspace_resource_group_name" {
+ type = string
+ default = null
+ description = "(Optional) Resource group name to create azurerm_log_analytics_solution."
+}
+
+variable "log_analytics_workspace_sku" {
+ type = string
+ default = "PerGB2018"
+ description = "The SKU (pricing level) of the Log Analytics workspace. For new subscriptions the SKU should be set to PerGB2018"
+}
+
+variable "log_retention_in_days" {
+ type = number
+ default = 30
+ description = "The retention period for the logs in days"
+}
+
+variable "maintenance_window" {
+ type = object({
+ allowed = optional(list(object({
+ day = string
+ hours = set(number)
+ })), [
+ ]),
+ not_allowed = optional(list(object({
+ end = string
+ start = string
+ })), []),
+ })
+ default = null
+ description = "(Optional) Maintenance configuration of the managed cluster."
+}
+
+variable "maintenance_window_auto_upgrade" {
+ type = object({
+ day_of_month = optional(number)
+ day_of_week = optional(string)
+ duration = number
+ frequency = string
+ interval = number
+ start_date = optional(string)
+ start_time = optional(string)
+ utc_offset = optional(string)
+ week_index = optional(string)
+ not_allowed = optional(set(object({
+ end = string
+ start = string
+ })))
+ })
+ default = null
+ description = <<-EOT
+ - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
+ - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
+ - `duration` - (Required) The duration of the window for maintenance to run in hours.
+ - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
+ - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
+ - `start_date` - (Optional) The date on which the maintenance window begins to take effect.
+ - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
+ - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
+ - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.
+
+ ---
+ `not_allowed` block supports the following:
+ - `end` - (Required) The end of a time span, formatted as an RFC3339 string.
+ - `start` - (Required) The start of a time span, formatted as an RFC3339 string.
+EOT
+}
+
+variable "maintenance_window_node_os" {
+ type = object({
+ day_of_month = optional(number)
+ day_of_week = optional(string)
+ duration = number
+ frequency = string
+ interval = number
+ start_date = optional(string)
+ start_time = optional(string)
+ utc_offset = optional(string)
+ week_index = optional(string)
+ not_allowed = optional(set(object({
+ end = string
+ start = string
+ })))
+ })
+ default = null
+ description = <<-EOT
+ - `day_of_month` -
+ - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency.
+ - `duration` - (Required) The duration of the window for maintenance to run in hours.
+ - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`.
+ - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based.
+ - `start_date` - (Optional) The date on which the maintenance window begins to take effect.
+ - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`.
+ - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance.
+ - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`.
+
+ ---
+ `not_allowed` block supports the following:
+ - `end` - (Required) The end of a time span, formatted as an RFC3339 string.
+ - `start` - (Required) The start of a time span, formatted as an RFC3339 string.
+EOT
+}
+
+variable "microsoft_defender_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Is Microsoft Defender on the cluster enabled? Requires `var.log_analytics_workspace_enabled` to be `true` to set this variable to `true`."
+ nullable = false
+}
+
+variable "monitor_data_collection_rule_data_sources_syslog_facilities" {
+ type = list(string)
+ default = ["auth", "authpriv", "cron", "daemon", "mark", "kern", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", "lpr", "mail", "news", "syslog", "user", "uucp"]
+ description = "Syslog supported facilities as documented here: https://learn.microsoft.com/en-us/azure/azure-monitor/agents/data-sources-syslog"
+}
+
+variable "monitor_data_collection_rule_data_sources_syslog_levels" {
+ type = list(string)
+ default = ["Debug", "Info", "Notice", "Warning", "Error", "Critical", "Alert", "Emergency"]
+ description = "List of syslog levels"
+}
+
+variable "monitor_data_collection_rule_extensions_streams" {
+ type = list(any)
+ default = ["Microsoft-ContainerLog", "Microsoft-ContainerLogV2", "Microsoft-KubeEvents", "Microsoft-KubePodInventory", "Microsoft-KubeNodeInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", "Microsoft-KubeMonAgentEvents", "Microsoft-InsightsMetrics", "Microsoft-ContainerInventory", "Microsoft-ContainerNodeInventory", "Microsoft-Perf"]
+ description = "An array of container insights table streams. See documentation in DCR for a list of the valid streams and their corresponding table: https://learn.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-data-collection-configure?tabs=portal#stream-values-in-dcr"
+}
+
+variable "monitor_metrics" {
+ type = object({
+ annotations_allowed = optional(string)
+ labels_allowed = optional(string)
+ })
+ default = null
+ description = <<-EOT
+ (Optional) Specifies a Prometheus add-on profile for the Kubernetes Cluster
+ object({
+ annotations_allowed = "(Optional) Specifies a comma-separated list of Kubernetes annotation keys that will be used in the resource's labels metric."
+ labels_allowed = "(Optional) Specifies a Comma-separated list of additional Kubernetes label keys that will be used in the resource's labels metric."
+ })
+EOT
+}
+
+variable "msi_auth_for_monitoring_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Is managed identity authentication for monitoring enabled?"
+}
+
+variable "nat_gateway_profile" {
+ type = object({
+ idle_timeout_in_minutes = optional(number)
+ managed_outbound_ip_count = optional(number)
+ })
+ default = null
+ description = <<-EOT
+ `nat_gateway_profile` block supports the following:
+ - `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the managed nat gateway. Must be between `4` and `120` inclusive. Defaults to `4`.
+ - `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the managed nat gateway. Must be between `1` and `100` inclusive.
+EOT
+}
+
+variable "net_profile_dns_service_ip" {
+ type = string
+ default = null
+ description = "(Optional) IP address within the Kubernetes service address range that will be used by cluster service discovery (kube-dns). Changing this forces a new resource to be created."
+}
+
+variable "net_profile_outbound_type" {
+ type = string
+ default = "loadBalancer"
+ description = "(Optional) The outbound (egress) routing method which should be used for this Kubernetes Cluster. Possible values are loadBalancer and userDefinedRouting. Defaults to loadBalancer."
+}
+
+variable "net_profile_pod_cidr" {
+ type = string
+ default = null
+ description = " (Optional) The CIDR to use for pod IP addresses. This field can only be set when network_plugin is set to kubenet or network_plugin is set to azure and network_plugin_mode is set to overlay. Changing this forces a new resource to be created."
+}
+
+variable "net_profile_pod_cidrs" {
+ type = list(string)
+ default = null
+ description = "(Optional) A list of CIDRs to use for pod IP addresses. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created."
+}
+
+variable "net_profile_service_cidr" {
+ type = string
+ default = null
+ description = "(Optional) The Network Range used by the Kubernetes service. Changing this forces a new resource to be created."
+}
+
+variable "net_profile_service_cidrs" {
+ type = list(string)
+ default = null
+ description = "(Optional) A list of CIDRs to use for Kubernetes services. For single-stack networking a single IPv4 CIDR is expected. For dual-stack networking an IPv4 and IPv6 CIDR are expected. Changing this forces a new resource to be created."
+}
+
+variable "network_contributor_role_assigned_subnet_ids" {
+ type = map(string)
+ default = {}
+ description = "Create role assignments for the AKS Service Principal to be a Network Contributor on the subnets used for the AKS Cluster, key should be static string, value should be subnet's id"
+ nullable = false
+}
+
+variable "network_data_plane" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the data plane used for building the Kubernetes network. Possible values are `azure` and `cilium`. Defaults to `azure`. Disabling this forces a new resource to be created."
+}
+
+variable "network_ip_versions" {
+ type = list(string)
+ default = null
+ description = "(Optional) Specifies a list of IP versions the Kubernetes Cluster will use to assign IP addresses to its nodes and pods. Possible values are `IPv4` and/or `IPv6`. `IPv4` must always be specified. Changing this forces a new resource to be created."
+}
+
+variable "network_mode" {
+ type = string
+ default = null
+ description = "(Optional) Network mode to be used with Azure CNI. Possible values are `bridge` and `transparent`. Changing this forces a new resource to be created."
+}
+
+variable "network_plugin" {
+ type = string
+ default = "kubenet"
+ description = "Network plugin to use for networking."
+ nullable = false
+}
+
+variable "network_plugin_mode" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the network plugin mode used for building the Kubernetes network. Possible value is `overlay`. Changing this forces a new resource to be created."
+}
+
+variable "network_policy" {
+ type = string
+ default = null
+ description = " (Optional) Sets up network policy to be used with Azure CNI. Network policy allows us to control the traffic flow between pods. Currently supported values are calico and azure. Changing this forces a new resource to be created."
+}
+
+variable "node_network_profile" {
+ type = object({
+ node_public_ip_tags = optional(map(string))
+ application_security_group_ids = optional(list(string))
+ allowed_host_ports = optional(list(object({
+ port_start = optional(number)
+ port_end = optional(number)
+ protocol = optional(string)
+ })))
+ })
+ default = null
+ description = <<-EOT
+ - `node_public_ip_tags`: (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
+ - `application_security_group_ids`: (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
+---
+ An `allowed_host_ports` block supports the following:
+ - `port_start`: (Optional) Specifies the start of the port range.
+ - `port_end`: (Optional) Specifies the end of the port range.
+ - `protocol`: (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
+EOT
+}
+
+variable "node_os_channel_upgrade" {
+ type = string
+ default = null
+ description = " (Optional) The upgrade channel for this Kubernetes Cluster Nodes' OS Image. Possible values are `Unmanaged`, `SecurityPatch`, `NodeImage` and `None`."
+}
+
+variable "node_pools" {
+ type = map(object({
+ name = string
+ node_count = optional(number)
+ tags = optional(map(string))
+ vm_size = string
+ host_group_id = optional(string)
+ capacity_reservation_group_id = optional(string)
+ custom_ca_trust_enabled = optional(bool)
+ enable_auto_scaling = optional(bool)
+ enable_host_encryption = optional(bool)
+ enable_node_public_ip = optional(bool)
+ eviction_policy = optional(string)
+ gpu_instance = optional(string)
+ kubelet_config = optional(object({
+ cpu_manager_policy = optional(string)
+ cpu_cfs_quota_enabled = optional(bool)
+ cpu_cfs_quota_period = optional(string)
+ image_gc_high_threshold = optional(number)
+ image_gc_low_threshold = optional(number)
+ topology_manager_policy = optional(string)
+ allowed_unsafe_sysctls = optional(set(string))
+ container_log_max_size_mb = optional(number)
+ container_log_max_files = optional(number)
+ pod_max_pid = optional(number)
+ }))
+ linux_os_config = optional(object({
+ sysctl_config = optional(object({
+ fs_aio_max_nr = optional(number)
+ fs_file_max = optional(number)
+ fs_inotify_max_user_watches = optional(number)
+ fs_nr_open = optional(number)
+ kernel_threads_max = optional(number)
+ net_core_netdev_max_backlog = optional(number)
+ net_core_optmem_max = optional(number)
+ net_core_rmem_default = optional(number)
+ net_core_rmem_max = optional(number)
+ net_core_somaxconn = optional(number)
+ net_core_wmem_default = optional(number)
+ net_core_wmem_max = optional(number)
+ net_ipv4_ip_local_port_range_min = optional(number)
+ net_ipv4_ip_local_port_range_max = optional(number)
+ net_ipv4_neigh_default_gc_thresh1 = optional(number)
+ net_ipv4_neigh_default_gc_thresh2 = optional(number)
+ net_ipv4_neigh_default_gc_thresh3 = optional(number)
+ net_ipv4_tcp_fin_timeout = optional(number)
+ net_ipv4_tcp_keepalive_intvl = optional(number)
+ net_ipv4_tcp_keepalive_probes = optional(number)
+ net_ipv4_tcp_keepalive_time = optional(number)
+ net_ipv4_tcp_max_syn_backlog = optional(number)
+ net_ipv4_tcp_max_tw_buckets = optional(number)
+ net_ipv4_tcp_tw_reuse = optional(bool)
+ net_netfilter_nf_conntrack_buckets = optional(number)
+ net_netfilter_nf_conntrack_max = optional(number)
+ vm_max_map_count = optional(number)
+ vm_swappiness = optional(number)
+ vm_vfs_cache_pressure = optional(number)
+ }))
+ transparent_huge_page_enabled = optional(string)
+ transparent_huge_page_defrag = optional(string)
+ swap_file_size_mb = optional(number)
+ }))
+ fips_enabled = optional(bool)
+ kubelet_disk_type = optional(string)
+ max_count = optional(number)
+ max_pods = optional(number)
+ message_of_the_day = optional(string)
+ mode = optional(string, "User")
+ min_count = optional(number)
+ node_network_profile = optional(object({
+ node_public_ip_tags = optional(map(string))
+ application_security_group_ids = optional(list(string))
+ allowed_host_ports = optional(list(object({
+ port_start = optional(number)
+ port_end = optional(number)
+ protocol = optional(string)
+ })))
+ }))
+ node_labels = optional(map(string))
+ node_public_ip_prefix_id = optional(string)
+ node_taints = optional(list(string))
+ orchestrator_version = optional(string)
+ os_disk_size_gb = optional(number)
+ os_disk_type = optional(string, "Managed")
+ os_sku = optional(string)
+ os_type = optional(string, "Linux")
+ pod_subnet = optional(object({
+ id = string
+ }), null)
+ priority = optional(string, "Regular")
+ proximity_placement_group_id = optional(string)
+ spot_max_price = optional(number)
+ scale_down_mode = optional(string, "Delete")
+ snapshot_id = optional(string)
+ ultra_ssd_enabled = optional(bool)
+ vnet_subnet = optional(object({
+ id = string
+ }), null)
+ upgrade_settings = optional(object({
+ drain_timeout_in_minutes = number
+ node_soak_duration_in_minutes = number
+ max_surge = string
+ }))
+ windows_profile = optional(object({
+ outbound_nat_enabled = optional(bool, true)
+ }))
+ workload_runtime = optional(string)
+ zones = optional(set(string))
+ create_before_destroy = optional(bool, true)
+ }))
+ default = {}
+ description = <<-EOT
+ A map of node pools that need to be created and attached on the Kubernetes cluster. The key of the map can be the name of the node pool, and the key must be static string. The value of the map is a `node_pool` block as defined below:
+ map(object({
+ name = (Required) The name of the Node Pool which should be created within the Kubernetes Cluster. Changing this forces a new resource to be created. A Windows Node Pool cannot have a `name` longer than 6 characters. A random suffix of 4 characters is always added to the name to avoid clashes during recreates.
+ node_count = (Optional) The initial number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` (inclusive) for user pools and between `1` and `1000` (inclusive) for system pools and must be a value in the range `min_count` - `max_count`.
+ tags = (Optional) A mapping of tags to assign to the resource. At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changess) until this is fixed in the AKS API.
+ vm_size = (Required) The SKU which should be used for the Virtual Machines used in this Node Pool. Changing this forces a new resource to be created.
+ host_group_id = (Optional) The fully qualified resource ID of the Dedicated Host Group to provision virtual machines from. Changing this forces a new resource to be created.
+ capacity_reservation_group_id = (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.
+ custom_ca_trust_enabled = (Optional) Specifies whether to trust a Custom CA. This requires that the Preview Feature `Microsoft.ContainerService/CustomCATrustPreview` is enabled and the Resource Provider is re-registered, see [the documentation](https://learn.microsoft.com/en-us/azure/aks/custom-certificate-authority) for more information.
+ enable_auto_scaling = (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler).
+ enable_host_encryption = (Optional) Should the nodes in this Node Pool have host encryption enabled? Changing this forces a new resource to be created.
+ enable_node_public_ip = (Optional) Should each node have a Public IP Address? Changing this forces a new resource to be created.
+ eviction_policy = (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. An Eviction Policy can only be configured when `priority` is set to `Spot` and will default to `Delete` unless otherwise specified.
+ gpu_instance = (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created.
+ kubelet_config = optional(object({
+ cpu_manager_policy = (Optional) Specifies the CPU Manager policy to use. Possible values are `none` and `static`, Changing this forces a new resource to be created.
+ cpu_cfs_quota_enabled = (Optional) Is CPU CFS quota enforcement for containers enabled? Changing this forces a new resource to be created.
+ cpu_cfs_quota_period = (Optional) Specifies the CPU CFS quota period value. Changing this forces a new resource to be created.
+ image_gc_high_threshold = (Optional) Specifies the percent of disk usage above which image garbage collection is always run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ image_gc_low_threshold = (Optional) Specifies the percent of disk usage lower than which image garbage collection is never run. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ topology_manager_policy = (Optional) Specifies the Topology Manager policy to use. Possible values are `none`, `best-effort`, `restricted` or `single-numa-node`. Changing this forces a new resource to be created.
+ allowed_unsafe_sysctls = (Optional) Specifies the allow list of unsafe sysctls command or patterns (ending in `*`). Changing this forces a new resource to be created.
+ container_log_max_size_mb = (Optional) Specifies the maximum size (e.g. 10MB) of container log file before it is rotated. Changing this forces a new resource to be created.
+ container_log_max_files = (Optional) Specifies the maximum number of container log files that can be present for a container. must be at least 2. Changing this forces a new resource to be created.
+ pod_max_pid = (Optional) Specifies the maximum number of processes per pod. Changing this forces a new resource to be created.
+ }))
+ linux_os_config = optional(object({
+ sysctl_config = optional(object({
+ fs_aio_max_nr = (Optional) The sysctl setting fs.aio-max-nr. Must be between `65536` and `6553500`. Changing this forces a new resource to be created.
+ fs_file_max = (Optional) The sysctl setting fs.file-max. Must be between `8192` and `12000500`. Changing this forces a new resource to be created.
+ fs_inotify_max_user_watches = (Optional) The sysctl setting fs.inotify.max_user_watches. Must be between `781250` and `2097152`. Changing this forces a new resource to be created.
+ fs_nr_open = (Optional) The sysctl setting fs.nr_open. Must be between `8192` and `20000500`. Changing this forces a new resource to be created.
+ kernel_threads_max = (Optional) The sysctl setting kernel.threads-max. Must be between `20` and `513785`. Changing this forces a new resource to be created.
+ net_core_netdev_max_backlog = (Optional) The sysctl setting net.core.netdev_max_backlog. Must be between `1000` and `3240000`. Changing this forces a new resource to be created.
+ net_core_optmem_max = (Optional) The sysctl setting net.core.optmem_max. Must be between `20480` and `4194304`. Changing this forces a new resource to be created.
+ net_core_rmem_default = (Optional) The sysctl setting net.core.rmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_rmem_max = (Optional) The sysctl setting net.core.rmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_somaxconn = (Optional) The sysctl setting net.core.somaxconn. Must be between `4096` and `3240000`. Changing this forces a new resource to be created.
+ net_core_wmem_default = (Optional) The sysctl setting net.core.wmem_default. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_core_wmem_max = (Optional) The sysctl setting net.core.wmem_max. Must be between `212992` and `134217728`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_min = (Optional) The sysctl setting net.ipv4.ip_local_port_range min value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_ip_local_port_range_max = (Optional) The sysctl setting net.ipv4.ip_local_port_range max value. Must be between `1024` and `60999`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh1 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh1. Must be between `128` and `80000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh2 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh2. Must be between `512` and `90000`. Changing this forces a new resource to be created.
+ net_ipv4_neigh_default_gc_thresh3 = (Optional) The sysctl setting net.ipv4.neigh.default.gc_thresh3. Must be between `1024` and `100000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_fin_timeout = (Optional) The sysctl setting net.ipv4.tcp_fin_timeout. Must be between `5` and `120`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_intvl = (Optional) The sysctl setting net.ipv4.tcp_keepalive_intvl. Must be between `10` and `75`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_probes = (Optional) The sysctl setting net.ipv4.tcp_keepalive_probes. Must be between `1` and `15`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_keepalive_time = (Optional) The sysctl setting net.ipv4.tcp_keepalive_time. Must be between `30` and `432000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_syn_backlog = (Optional) The sysctl setting net.ipv4.tcp_max_syn_backlog. Must be between `128` and `3240000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_max_tw_buckets = (Optional) The sysctl setting net.ipv4.tcp_max_tw_buckets. Must be between `8000` and `1440000`. Changing this forces a new resource to be created.
+ net_ipv4_tcp_tw_reuse = (Optional) Is sysctl setting net.ipv4.tcp_tw_reuse enabled? Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_buckets = (Optional) The sysctl setting net.netfilter.nf_conntrack_buckets. Must be between `65536` and `147456`. Changing this forces a new resource to be created.
+ net_netfilter_nf_conntrack_max = (Optional) The sysctl setting net.netfilter.nf_conntrack_max. Must be between `131072` and `1048576`. Changing this forces a new resource to be created.
+ vm_max_map_count = (Optional) The sysctl setting vm.max_map_count. Must be between `65530` and `262144`. Changing this forces a new resource to be created.
+ vm_swappiness = (Optional) The sysctl setting vm.swappiness. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ vm_vfs_cache_pressure = (Optional) The sysctl setting vm.vfs_cache_pressure. Must be between `0` and `100`. Changing this forces a new resource to be created.
+ }))
+ transparent_huge_page_enabled = (Optional) Specifies the Transparent Huge Page enabled configuration. Possible values are `always`, `madvise` and `never`. Changing this forces a new resource to be created.
+ transparent_huge_page_defrag = (Optional) specifies the defrag configuration for Transparent Huge Page. Possible values are `always`, `defer`, `defer+madvise`, `madvise` and `never`. Changing this forces a new resource to be created.
+ swap_file_size_mb = (Optional) Specifies the size of swap file on each node in MB. Changing this forces a new resource to be created.
+ }))
+ fips_enabled = (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created. FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview).
+ kubelet_disk_type = (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`.
+ max_count = (Optional) The maximum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be greater than or equal to `min_count`.
+ max_pods = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
+ message_of_the_day = (Optional) A base64-encoded string which will be written to /etc/motd after decoding. This allows customization of the message of the day for Linux nodes. It cannot be specified for Windows nodes and must be a static string (i.e. will be printed raw and not executed as a script). Changing this forces a new resource to be created.
+ mode = (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`.
+ min_count = (Optional) The minimum number of nodes which should exist within this Node Pool. Valid values are between `0` and `1000` and must be less than or equal to `max_count`.
+ node_network_profile = optional(object({
+ node_public_ip_tags = (Optional) Specifies a mapping of tags to the instance-level public IPs. Changing this forces a new resource to be created.
+ application_security_group_ids = (Optional) A list of Application Security Group IDs which should be associated with this Node Pool.
+ allowed_host_ports = optional(object({
+ port_start = (Optional) Specifies the start of the port range.
+ port_end = (Optional) Specifies the end of the port range.
+ protocol = (Optional) Specifies the protocol of the port range. Possible values are `TCP` and `UDP`.
+ }))
+ }))
+ node_labels = (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool.
+ node_public_ip_prefix_id = (Optional) Resource ID for the Public IP Addresses Prefix for the nodes in this Node Pool. `enable_node_public_ip` should be `true`. Changing this forces a new resource to be created.
+ node_taints = (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created.
+ orchestrator_version = (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). AKS does not require an exact patch version to be specified, minor version aliases such as `1.22` are also supported. - The minor version's latest GA patch is automatically chosen in that case. More details can be found in [the documentation](https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#alias-minor-version). This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.
+ os_disk_size_gb = (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.
+ os_disk_type = (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.
+ os_sku = (Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created.
+ os_type = (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.
+ pod_subnet = optional(object({
+ id = The ID of the Subnet where the pods in the Node Pool should exist. Changing this forces a new resource to be created.
+ }))
+ priority = (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
+ proximity_placement_group_id = (Optional) The ID of the Proximity Placement Group where the Virtual Machine Scale Set that powers this Node Pool will be placed. Changing this forces a new resource to be created. When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/azure/aks/spot-node-pool).
+ spot_max_price = (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. This field can only be configured when `priority` is set to `Spot`.
+ scale_down_mode = (Optional) Specifies how the node pool should deal with scaled-down nodes. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.
+ snapshot_id = (Optional) The ID of the Snapshot which should be used to create this Node Pool. Changing this forces a new resource to be created.
+ ultra_ssd_enabled = (Optional) Used to specify whether the UltraSSD is enabled in the Node Pool. Defaults to `false`. See [the documentation](https://docs.microsoft.com/azure/aks/use-ultra-disks) for more information. Changing this forces a new resource to be created.
+ vnet_subnet = optional(object({
+ id = The ID of the Subnet where this Node Pool should exist. Changing this forces a new resource to be created. A route table must be configured on this Subnet.
+ }))
+ upgrade_settings = optional(object({
+ drain_timeout_in_minutes = number
+ node_soak_duration_in_minutes = number
+ max_surge = string
+ }))
+ windows_profile = optional(object({
+ outbound_nat_enabled = optional(bool, true)
+ }))
+ workload_runtime = (Optional) Used to specify the workload runtime. Allowed values are `OCIContainer` and `WasmWasi`. WebAssembly System Interface node pools are in Public Preview - more information and details on how to opt into the preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-wasi-node-pools)
+ zones = (Optional) Specifies a list of Availability Zones in which this Kubernetes Cluster Node Pool should be located. Changing this forces a new Kubernetes Cluster Node Pool to be created.
+ create_before_destroy = (Optional) Create a new node pool before destroy the old one when Terraform must update an argument that cannot be updated in-place. Set this argument to `true` will add add a random suffix to pool's name to avoid conflict. Default to `true`.
+ }))
+ EOT
+ nullable = false
+}
+
+variable "node_resource_group" {
+ type = string
+ default = null
+ description = "The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. Changing this forces a new resource to be created."
+}
+
+variable "oidc_issuer_enabled" {
+ type = bool
+ default = false
+ description = "Enable or Disable the OIDC issuer URL. Defaults to false."
+}
+
+variable "oms_agent_enabled" {
+ type = bool
+ default = true
+ description = "Enable OMS Agent Addon."
+ nullable = false
+}
+
+variable "only_critical_addons_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created."
+}
+
+variable "open_service_mesh_enabled" {
+ type = bool
+ default = null
+ description = "Is Open Service Mesh enabled? For more details, please visit [Open Service Mesh for AKS](https://docs.microsoft.com/azure/aks/open-service-mesh-about)."
+}
+
+variable "orchestrator_version" {
+ type = string
+ default = null
+ description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region"
+}
+
+variable "os_disk_size_gb" {
+ type = number
+ default = 50
+ description = "Disk size of nodes in GBs."
+}
+
+variable "os_disk_type" {
+ type = string
+ default = "Managed"
+ description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created."
+ nullable = false
+}
+
+variable "os_sku" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created."
+}
+
+variable "pod_subnet" {
+ type = object({
+ id = string
+ })
+ default = null
+ description = <<-EOT
+ object({
+ id = The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created.
+ })
+EOT
+}
+
+variable "prefix" {
+ type = string
+ default = ""
+ description = "(Optional) The prefix for the resources created in the specified Azure Resource Group. Omitting this variable requires both `var.cluster_log_analytics_workspace_name` and `var.cluster_name` have been set. Only one of `var.prefix,var.dns_prefix_private_cluster` can be specified."
+}
+
+variable "private_cluster_enabled" {
+ type = bool
+ default = false
+ description = "If true cluster API server will be exposed only on internal IP address and available only in cluster vnet."
+}
+
+variable "private_cluster_public_fqdn_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Specifies whether a Public FQDN for this Private Cluster should be added. Defaults to `false`."
+}
+
+variable "private_dns_zone_id" {
+ type = string
+ default = null
+ description = "(Optional) Either the ID of Private DNS Zone which should be delegated to this Cluster, `System` to have AKS manage this or `None`. In case of `None` you will need to bring your own DNS server and set up resolving, otherwise cluster will have issues after provisioning. Changing this forces a new resource to be created."
+}
+
+variable "public_ssh_key" {
+ type = string
+ default = ""
+ description = "A custom ssh key to control access to the AKS cluster. Changing this forces a new resource to be created."
+}
+
+variable "rbac_aad" {
+ type = bool
+ default = true
+ description = "(Optional) Is Azure Active Directory integration enabled?"
+ nullable = false
+}
+
+variable "rbac_aad_admin_group_object_ids" {
+ type = list(string)
+ default = null
+ description = "Object ID of groups with admin access."
+}
+
+variable "rbac_aad_azure_rbac_enabled" {
+ type = bool
+ default = null
+ description = "(Optional) Is Role Based Access Control based on Azure AD enabled?"
+}
+
+variable "rbac_aad_tenant_id" {
+ type = string
+ default = null
+ description = "(Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used."
+}
+
+variable "role_based_access_control_enabled" {
+ type = bool
+ default = false
+ description = "Enable Role Based Access Control."
+ nullable = false
+}
+
+variable "run_command_enabled" {
+ type = bool
+ default = true
+ description = "(Optional) Whether to enable run command for the cluster or not."
+}
+
+variable "scale_down_mode" {
+ type = string
+ default = "Delete"
+ description = "(Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created."
+}
+
+variable "secret_rotation_enabled" {
+ type = bool
+ default = false
+ description = "Is secret rotation enabled? This variable is only used when `key_vault_secrets_provider_enabled` is `true` and defaults to `false`"
+ nullable = false
+}
+
+variable "secret_rotation_interval" {
+ type = string
+ default = "2m"
+ description = "The interval to poll for secret rotation. This attribute is only set when `secret_rotation` is `true` and defaults to `2m`"
+ nullable = false
+}
+
+variable "service_mesh_profile" {
+ type = object({
+ mode = string
+ internal_ingress_gateway_enabled = optional(bool, true)
+ external_ingress_gateway_enabled = optional(bool, true)
+ })
+ default = null
+ description = <<-EOT
+ `mode` - (Required) The mode of the service mesh. Possible value is `Istio`.
+ `internal_ingress_gateway_enabled` - (Optional) Is Istio Internal Ingress Gateway enabled? Defaults to `true`.
+ `external_ingress_gateway_enabled` - (Optional) Is Istio External Ingress Gateway enabled? Defaults to `true`.
+ EOT
+}
+
+variable "sku_tier" {
+ type = string
+ default = "Free"
+ description = "The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free`, `Standard` and `Premium`"
+
+ validation {
+ condition = contains(["Free", "Standard", "Premium"], var.sku_tier)
+ error_message = "The SKU Tier must be either `Free`, `Standard` or `Premium`. `Paid` is no longer supported since AzureRM provider v3.51.0."
+ }
+}
+
+variable "snapshot_id" {
+ type = string
+ default = null
+ description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property."
+}
+
+variable "storage_profile_blob_driver_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Is the Blob CSI driver enabled? Defaults to `false`"
+}
+
+variable "storage_profile_disk_driver_enabled" {
+ type = bool
+ default = true
+ description = "(Optional) Is the Disk CSI driver enabled? Defaults to `true`"
+}
+
+variable "storage_profile_disk_driver_version" {
+ type = string
+ default = "v1"
+ description = "(Optional) Disk CSI Driver version to be used. Possible values are `v1` and `v2`. Defaults to `v1`."
+}
+
+variable "storage_profile_enabled" {
+ type = bool
+ default = false
+ description = "Enable storage profile"
+ nullable = false
+}
+
+variable "storage_profile_file_driver_enabled" {
+ type = bool
+ default = true
+ description = "(Optional) Is the File CSI driver enabled? Defaults to `true`"
+}
+
+variable "storage_profile_snapshot_controller_enabled" {
+ type = bool
+ default = true
+ description = "(Optional) Is the Snapshot Controller enabled? Defaults to `true`"
+}
+
+variable "support_plan" {
+ type = string
+ default = "KubernetesOfficial"
+ description = "The support plan which should be used for this Kubernetes Cluster. Possible values are `KubernetesOfficial` and `AKSLongTermSupport`."
+
+ validation {
+ condition = contains(["KubernetesOfficial", "AKSLongTermSupport"], var.support_plan)
+ error_message = "The support plan must be either `KubernetesOfficial` or `AKSLongTermSupport`."
+ }
+}
+
+variable "tags" {
+ type = map(string)
+ default = {}
+ description = "Any tags that should be present on the AKS cluster resources"
+}
+
+variable "temporary_name_for_rotation" {
+ type = string
+ default = null
+ description = "(Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`"
+}
+
+variable "ultra_ssd_enabled" {
+ type = bool
+ default = false
+ description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false."
+}
+
+variable "vnet_subnet" {
+ type = object({
+ id = string
+ })
+ default = null
+ description = <<-EOT
+ object({
+ id = The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created.
+ })
+EOT
+}
+
+variable "web_app_routing" {
+ type = object({
+ dns_zone_ids = list(string)
+ })
+ default = null
+ description = <<-EOT
+ object({
+ dns_zone_ids = "(Required) Specifies the list of the DNS Zone IDs in which DNS entries are created for applications deployed to the cluster when Web App Routing is enabled. If not using Bring-Your-Own DNS zones this property should be set to an empty list."
+ })
+EOT
+}
+
+variable "workload_autoscaler_profile" {
+ type = object({
+ keda_enabled = optional(bool, false)
+ vertical_pod_autoscaler_enabled = optional(bool, false)
+ })
+ default = null
+ description = <<-EOT
+ `keda_enabled` - (Optional) Specifies whether KEDA Autoscaler can be used for workloads.
+ `vertical_pod_autoscaler_enabled` - (Optional) Specifies whether Vertical Pod Autoscaler should be enabled.
+EOT
+}
+
+variable "workload_identity_enabled" {
+ type = bool
+ default = false
+ description = "Enable or Disable Workload Identity. Defaults to false."
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf
new file mode 100644
index 000000000..c9d2fe8f1
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/k8scluster/versions.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_version = ">= 1.3"
+
+ required_providers {
+ azapi = {
+ source = "Azure/azapi"
+ version = ">=2.0, < 3.0"
+ }
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = ">= 3.107.0, < 4.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0"
+ }
+ time = {
+ source = "hashicorp/time"
+ version = ">= 0.5"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 3.1"
+ }
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/locals.tf b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf
new file mode 100644
index 000000000..7db3ec326
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/locals.tf
@@ -0,0 +1,51 @@
+locals {
+ # Generate cluster name
+ name = module.name.name
+
+ # Extract spec configurations
+ metadata = lookup(var.instance, "metadata", {})
+ spec = lookup(var.instance, "spec", {})
+
+ # Cluster configuration
+ cluster_config = lookup(local.spec, "cluster", {})
+
+ # Node pools configuration
+ node_pools_config = lookup(local.spec, "node_pools", {})
+ system_np_config = lookup(local.node_pools_config, "system_np", {})
+
+ # Auto-upgrade configuration
+ auto_upgrade_config = lookup(local.spec, "auto_upgrade_settings", {})
+ maintenance_window = lookup(local.auto_upgrade_config, "maintenance_window", {})
+
+ # Features configuration
+ features_config = lookup(local.spec, "features", {})
+
+ # Tags configuration
+ tags_config = lookup(local.spec, "tags", {})
+
+ # Computed values for the cluster
+ kubernetes_version = lookup(local.cluster_config, "kubernetes_version", "1.31")
+ sku_tier = lookup(local.cluster_config, "sku_tier", "Free")
+
+ # Node pool computed values
+ node_count = lookup(local.system_np_config, "node_count", 1)
+ instance_type = lookup(local.system_np_config, "instance_type", "Standard_D2_v4")
+ max_pods = lookup(local.system_np_config, "max_pods", 30)
+ os_disk_size_gb = lookup(local.system_np_config, "os_disk_size_gb", 50)
+ enable_auto_scaling = lookup(local.system_np_config, "enable_auto_scaling", false)
+
+ # Auto-upgrade computed values
+ enable_auto_upgrade = lookup(local.auto_upgrade_config, "enable_auto_upgrade", true)
+ automatic_channel_upgrade = lookup(local.auto_upgrade_config, "automatic_channel_upgrade", "stable")
+ max_surge = lookup(local.auto_upgrade_config, "max_surge", "1")
+
+ # Maintenance window computed values
+ maintenance_window_disabled = lookup(local.maintenance_window, "is_disabled", true)
+ maintenance_day_of_week = lookup(local.maintenance_window, "day_of_week", "SUN")
+ maintenance_start_time = lookup(local.maintenance_window, "start_time", 2)
+ maintenance_end_time = lookup(local.maintenance_window, "end_time", 6)
+
+ # Network access computed values
+ cluster_endpoint_public_access = lookup(local.cluster_config, "cluster_endpoint_public_access", true)
+ cluster_endpoint_public_access_cidrs = lookup(local.cluster_config, "cluster_endpoint_public_access_cidrs", ["0.0.0.0/0"])
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/main.tf b/modules/kubernetes_cluster/azure_aks/0.2/main.tf
new file mode 100644
index 000000000..5a5c322ac
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/main.tf
@@ -0,0 +1,138 @@
+# Generate a unique name for the AKS cluster
+module "name" {
+ source = "github.com/Facets-cloud/facets-utility-modules//name"
+ environment = var.environment
+ limit = 63
+ resource_name = var.instance_name
+ resource_type = "k8s"
+ globally_unique = true
+}
+
+# Create the AKS cluster using the locally modified Azure module
+module "k8scluster" {
+ source = "./k8scluster/v4"
+
+ # Required variables
+ resource_group_name = var.inputs.network_details.attributes.resource_group_name
+ location = var.inputs.network_details.attributes.region
+
+ # Basic cluster configuration
+ cluster_name = local.name
+ prefix = local.name
+ node_resource_group = "MC_${local.name}"
+
+ # Kubernetes version - only set when auto-upgrade is disabled or using patch channel
+ kubernetes_version = (
+ var.instance.spec.auto_upgrade_settings.enable_auto_upgrade &&
+ contains(["stable", "rapid", "node-image"], var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade)
+ ) ? null : var.instance.spec.cluster.kubernetes_version
+
+ # SKU tier
+ sku_tier = var.instance.spec.cluster.sku_tier
+
+ # Network configuration
+ network_plugin = "azure"
+ network_policy = "calico"
+ vnet_subnet = {
+ id = var.inputs.network_details.attributes.private_subnet_ids[0]
+ }
+ net_profile_service_cidr = "10.254.0.0/16"
+ net_profile_dns_service_ip = "10.254.0.254"
+
+ # Public cluster configuration - always enabled
+ private_cluster_enabled = false
+ api_server_authorized_ip_ranges = var.instance.spec.cluster.cluster_endpoint_public_access_cidrs
+
+ # Node pool configuration
+ agents_count = var.instance.spec.node_pools.system_np.node_count
+ agents_size = var.instance.spec.node_pools.system_np.instance_type
+ agents_max_pods = var.instance.spec.node_pools.system_np.max_pods
+ os_disk_size_gb = var.instance.spec.node_pools.system_np.os_disk_size_gb
+ agents_availability_zones = var.inputs.network_details.attributes.availability_zones
+ agents_pool_name = "system"
+
+ # Auto-scaling configuration
+ enable_auto_scaling = var.instance.spec.node_pools.system_np.enable_auto_scaling
+ agents_min_count = var.instance.spec.node_pools.system_np.enable_auto_scaling ? var.instance.spec.node_pools.system_np.node_count : null
+ agents_max_count = var.instance.spec.node_pools.system_np.enable_auto_scaling ? 10 : null
+
+ # System node pool - mark it as system mode
+ only_critical_addons_enabled = true
+
+ # Auto-upgrade configuration
+ automatic_channel_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade ? var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade : null
+
+ # Maintenance window configuration
+ maintenance_window_auto_upgrade = var.instance.spec.auto_upgrade_settings.enable_auto_upgrade && var.instance.spec.auto_upgrade_settings.maintenance_window.is_enabled ? {
+ frequency = "Weekly"
+ interval = 1
+ duration = var.instance.spec.auto_upgrade_settings.maintenance_window.end_time - var.instance.spec.auto_upgrade_settings.maintenance_window.start_time
+ day_of_week = var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week
+ start_time = format("%02d:00", var.instance.spec.auto_upgrade_settings.maintenance_window.start_time)
+ utc_offset = "+00:00"
+ } : null
+
+ # Node surge configuration for upgrades
+ agents_pool_max_surge = var.instance.spec.auto_upgrade_settings.max_surge
+
+ # Enable Azure Policy
+ azure_policy_enabled = true
+
+ # Enable workload identity and OIDC issuer
+ workload_identity_enabled = true
+ oidc_issuer_enabled = true
+
+ # Enable monitoring if log analytics workspace is provided
+ log_analytics_workspace_enabled = var.inputs.network_details.attributes.log_analytics_workspace_id != null
+ log_analytics_workspace = var.inputs.network_details.attributes.log_analytics_workspace_id != null ? {
+ id = var.inputs.network_details.attributes.log_analytics_workspace_id
+ name = split("/", var.inputs.network_details.attributes.log_analytics_workspace_id)[8]
+ } : null
+
+ # Enable AKS cluster logging
+ log_analytics_solution = var.inputs.network_details.attributes.log_analytics_workspace_id != null && length(var.instance.spec.cluster.cluster_enabled_log_types) > 0 ? {
+ enabled = true
+ id = var.inputs.network_details.attributes.log_analytics_workspace_id
+ log_analytics_workspace_id = var.inputs.network_details.attributes.log_analytics_workspace_id
+ log_retention_in_days = 30
+ } : null
+
+ # Auto-scaler profile configuration
+ auto_scaler_profile_enabled = var.instance.spec.node_pools.system_np.enable_auto_scaling
+ auto_scaler_profile_balance_similar_node_groups = false
+ auto_scaler_profile_expander = "random"
+ auto_scaler_profile_max_graceful_termination_sec = "600"
+ auto_scaler_profile_max_node_provisioning_time = "15m"
+ auto_scaler_profile_max_unready_nodes = 3
+ auto_scaler_profile_max_unready_percentage = 45
+ auto_scaler_profile_new_pod_scale_up_delay = "10s"
+ auto_scaler_profile_scale_down_delay_after_add = "10m"
+ auto_scaler_profile_scale_down_delay_after_delete = "10s"
+ auto_scaler_profile_scale_down_delay_after_failure = "3m"
+ auto_scaler_profile_scan_interval = "10s"
+ auto_scaler_profile_scale_down_unneeded = "10m"
+ auto_scaler_profile_scale_down_unready = "20m"
+ auto_scaler_profile_scale_down_utilization_threshold = "0.5"
+ auto_scaler_profile_empty_bulk_delete_max = 10
+ auto_scaler_profile_skip_nodes_with_local_storage = true
+ auto_scaler_profile_skip_nodes_with_system_pods = true
+
+ # Node labels for system node pool
+ agents_labels = {
+ "facets.cloud/node-type" = "system"
+ "managed-by" = "facets"
+ }
+
+ # Tags
+ tags = merge(
+ var.environment.cloud_tags,
+ var.instance.spec.tags != null ? var.instance.spec.tags : {}
+ )
+
+ # Azure AD and RBAC configuration
+ rbac_aad = true
+ rbac_aad_azure_rbac_enabled = true
+
+ # Keep local accounts enabled for compatibility with client certificate auth
+ local_account_disabled = false
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf
new file mode 100644
index 000000000..06c25ddb5
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/outputs.tf
@@ -0,0 +1,27 @@
+locals {
+ output_attributes = {
+ oidc_issuer_url = module.k8scluster.oidc_issuer_url
+ cluster_id = module.k8scluster.aks_id
+ cluster_name = module.k8scluster.aks_name
+ cluster_fqdn = module.k8scluster.cluster_fqdn
+ cluster_private_fqdn = module.k8scluster.cluster_private_fqdn
+ cluster_endpoint = module.k8scluster.host
+ cluster_location = module.k8scluster.location
+ kubernetes_version = var.instance.spec.cluster.kubernetes_version
+ node_resource_group = module.k8scluster.node_resource_group
+ resource_group_name = var.inputs.network_details.attributes.resource_group_name
+ cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate)
+ client_certificate = base64decode(module.k8scluster.client_certificate)
+ client_key = base64decode(module.k8scluster.client_key)
+ secrets = ["client_key", "client_certificate", "cluster_ca_certificate"]
+ }
+ output_interfaces = {
+ kubernetes = {
+ host = module.k8scluster.host
+ client_key = base64decode(module.k8scluster.client_key)
+ client_certificate = base64decode(module.k8scluster.client_certificate)
+ cluster_ca_certificate = base64decode(module.k8scluster.cluster_ca_certificate)
+ secrets = ["client_key", "client_certificate", "cluster_ca_certificate"]
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/variables.tf b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf
new file mode 100644
index 000000000..7bcf9562a
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/variables.tf
@@ -0,0 +1,193 @@
+variable "instance" {
+ description = "The instance configuration for the AKS cluster"
+ type = object({
+ kind = string
+ flavor = string
+ version = string
+ spec = object({
+ cluster = object({
+ kubernetes_version = optional(string, null)
+ cluster_endpoint_public_access_cidrs = optional(list(string), ["0.0.0.0/0"])
+ cluster_enabled_log_types = optional(list(string), [])
+ sku_tier = optional(string, "Free")
+ })
+ auto_upgrade_settings = object({
+ enable_auto_upgrade = optional(bool, true)
+ automatic_channel_upgrade = optional(string, "stable")
+ max_surge = optional(string, "1")
+ maintenance_window = object({
+ is_enabled = optional(bool, true)
+ day_of_week = optional(string, "Sunday")
+ start_time = optional(number, 2)
+ end_time = optional(number, 6)
+ })
+ })
+ node_pools = object({
+ system_np = object({
+ enabled = optional(bool, true)
+ node_count = optional(number, 1)
+ instance_type = optional(string, "Standard_D2_v4")
+ max_pods = optional(number, 30)
+ os_disk_size_gb = optional(number, 50)
+ enable_auto_scaling = optional(bool, false)
+ })
+ })
+ tags = optional(map(string), {})
+ })
+ })
+
+ validation {
+ condition = contains(["Free", "Standard"], var.instance.spec.cluster.sku_tier)
+ error_message = "SKU tier must be one of: Free, Standard."
+ }
+
+ validation {
+ condition = var.instance.spec.node_pools.system_np.node_count >= 1 && var.instance.spec.node_pools.system_np.node_count <= 1000
+ error_message = "System node pool node_count must be between 1 and 1000."
+ }
+
+ validation {
+ condition = var.instance.spec.node_pools.system_np.max_pods >= 10 && var.instance.spec.node_pools.system_np.max_pods <= 250
+ error_message = "System node pool max_pods must be between 10 and 250."
+ }
+
+ validation {
+ condition = var.instance.spec.node_pools.system_np.os_disk_size_gb >= 30 && var.instance.spec.node_pools.system_np.os_disk_size_gb <= 2048
+ error_message = "System node pool os_disk_size_gb must be between 30 and 2048."
+ }
+
+ validation {
+ condition = can(regex("^([0-9]+%?|[0-9]+)$", var.instance.spec.auto_upgrade_settings.max_surge))
+ error_message = "Max surge must be a number or percentage (e.g., 1, 33%)."
+ }
+
+ validation {
+ condition = contains([
+ "rapid", "regular", "stable", "patch", "node-image", "none"
+ ], var.instance.spec.auto_upgrade_settings.automatic_channel_upgrade)
+ error_message = "Automatic channel upgrade must be one of: rapid, regular, stable, patch, node-image, none."
+ }
+
+ validation {
+ condition = contains([
+ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
+ ], var.instance.spec.auto_upgrade_settings.maintenance_window.day_of_week)
+ error_message = "Maintenance window day_of_week must be one of: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday."
+ }
+
+ validation {
+ condition = (
+ var.instance.spec.auto_upgrade_settings.maintenance_window.start_time >= 0 &&
+ var.instance.spec.auto_upgrade_settings.maintenance_window.start_time <= 23
+ )
+ error_message = "Maintenance window start_time must be between 0 and 23."
+ }
+
+ validation {
+ condition = (
+ var.instance.spec.auto_upgrade_settings.maintenance_window.end_time >= 0 &&
+ var.instance.spec.auto_upgrade_settings.maintenance_window.end_time <= 23
+ )
+ error_message = "Maintenance window end_time must be between 0 and 23."
+ }
+
+ validation {
+ condition = (
+ var.instance.spec.auto_upgrade_settings.maintenance_window.end_time >
+ var.instance.spec.auto_upgrade_settings.maintenance_window.start_time
+ )
+ error_message = "Maintenance window end_time must be greater than start_time."
+ }
+}
+
+variable "instance_name" {
+ description = "The architectural name for the resource as added in the Facets blueprint designer."
+ type = string
+
+ validation {
+ condition = length(var.instance_name) > 0 && length(var.instance_name) <= 63
+ error_message = "Instance name must be between 1 and 63 characters long."
+ }
+
+ validation {
+ condition = can(regex("^[a-z0-9-]+$", var.instance_name))
+ error_message = "Instance name must contain only lowercase letters, numbers, and hyphens."
+ }
+}
+
+variable "environment" {
+ description = "An object containing details about the environment."
+ type = object({
+ name = string
+ unique_name = string
+ cloud_tags = optional(map(string), {})
+ })
+
+ validation {
+ condition = length(var.environment.name) > 0
+ error_message = "Environment name cannot be empty."
+ }
+
+ validation {
+ condition = length(var.environment.unique_name) > 0
+ error_message = "Environment unique_name cannot be empty."
+ }
+}
+
+variable "inputs" {
+ description = "A map of inputs requested by the module developer."
+ type = object({
+ network_details = object({
+ attributes = object({
+ vnet_id = string
+ region = string
+ resource_group_name = string
+ availability_zones = list(string)
+ private_subnet_ids = list(string)
+ public_subnet_ids = list(string)
+ log_analytics_workspace_id = optional(string, null)
+ })
+ })
+ cloud_account = object({
+ attributes = object({
+ subscription_id = string
+ tenant_id = string
+ })
+ })
+ })
+
+ validation {
+ condition = length(var.inputs.network_details.attributes.vnet_id) > 0
+ error_message = "VNet ID cannot be empty."
+ }
+
+ validation {
+ condition = length(var.inputs.network_details.attributes.region) > 0
+ error_message = "Region cannot be empty."
+ }
+
+ validation {
+ condition = length(var.inputs.network_details.attributes.resource_group_name) > 0
+ error_message = "Resource group name cannot be empty."
+ }
+
+ validation {
+ condition = length(var.inputs.network_details.attributes.availability_zones) > 0
+ error_message = "At least one availability zone must be specified."
+ }
+
+ validation {
+ condition = length(var.inputs.network_details.attributes.private_subnet_ids) > 0
+ error_message = "At least one private subnet ID must be specified."
+ }
+
+ validation {
+ condition = length(var.inputs.cloud_account.attributes.subscription_id) > 0
+ error_message = "Azure subscription ID cannot be empty."
+ }
+
+ validation {
+ condition = length(var.inputs.cloud_account.attributes.tenant_id) > 0
+ error_message = "Azure tenant ID cannot be empty."
+ }
+}
diff --git a/modules/kubernetes_cluster/azure_aks/0.2/versions.tf b/modules/kubernetes_cluster/azure_aks/0.2/versions.tf
new file mode 100644
index 000000000..ee853a09f
--- /dev/null
+++ b/modules/kubernetes_cluster/azure_aks/0.2/versions.tf
@@ -0,0 +1,7 @@
+terraform {
+ required_providers {
+ azapi = {
+ source = "Azure/azapi"
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/network/azure_vpc/0.2/facets.yaml b/modules/network/azure_vpc/0.2/facets.yaml
new file mode 100644
index 000000000..1ea9c54c6
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/facets.yaml
@@ -0,0 +1,294 @@
+intent: network
+flavor: azure_network
+version: '0.2'
+description: Creates an Azure Virtual Network with configurable public subnets, private
+ subnets, database subnets, and specialized subnets across availability zones
+clouds:
+- azure
+inputs:
+ cloud_account:
+ type: '@outputs/cloud_account'
+ displayName: Cloud Account
+ description: The Azure Cloud Account where the AKS cluster will be created
+ optional: false
+ providers:
+ - azurerm
+spec:
+ type: object
+ properties:
+ vnet_cidr:
+ type: string
+ title: VNet CIDR Block
+ description: CIDR block for the Virtual Network (e.g., 10.0.0.0/16)
+ pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}$
+ x-ui-overrides-only: true
+ x-ui-error-message: CIDR must be a valid IP block (e.g., 10.0.0.0/16)
+ x-ui-placeholder: 10.0.0.0/16
+ region:
+ type: string
+ title: Azure Region
+ description: Azure region where the VNet will be created
+ x-ui-overrides-only: true
+ availability_zones:
+ type: array
+ title: Availability Zones
+ description: List of availability zones to use for subnets (e.g., ["1", "2",
+ "3"])
+ x-ui-overrides-only: true
+ items:
+ type: string
+ minItems: 1
+ maxItems: 3
+ use_fixed_cidr_allocation:
+ type: boolean
+ title: Use Fixed CIDR Allocation
+ description: Use predefined CIDR ranges instead of dynamic allocation
+ default: false
+ public_subnets:
+ type: object
+ title: Public Subnets Configuration
+ properties:
+ count_per_az:
+ type: number
+ title: Public Subnets per AZ
+ description: Number of public subnets to create in each availability zone
+ minimum: 0
+ maximum: 3
+ default: 1
+ subnet_size:
+ type: string
+ title: Public Subnet Size
+ description: Number of IP addresses in each public subnet
+ enum:
+ - '256'
+ - '512'
+ - '1024'
+ - '2048'
+ - '4096'
+ - '8192'
+ default: '256'
+ required:
+ - count_per_az
+ - subnet_size
+ private_subnets:
+ type: object
+ title: Private Subnets Configuration
+ properties:
+ count_per_az:
+ type: number
+ title: Private Subnets per AZ
+ description: Number of private subnets to create in each availability zone
+ minimum: 1
+ maximum: 3
+ default: 1
+ subnet_size:
+ type: string
+ title: Private Subnet Size
+ description: Number of IP addresses in each private subnet
+ enum:
+ - '256'
+ - '512'
+ - '1024'
+ - '2048'
+ - '4096'
+ - '8192'
+ default: '1024'
+ required:
+ - count_per_az
+ - subnet_size
+ database_subnets:
+ type: object
+ title: Database Subnets Configuration
+ description: Configure dedicated subnets for database resources
+ properties:
+ count_per_az:
+ type: number
+ title: Database Subnets per AZ
+ description: Number of database subnets to create in each availability zone
+ minimum: 0
+ maximum: 3
+ default: 1
+ subnet_size:
+ type: string
+ title: Database Subnet Size
+ description: Number of IP addresses in each database subnet
+ enum:
+ - '256'
+ - '512'
+ - '1024'
+ - '2048'
+ - '4096'
+ - '8192'
+ default: '256'
+ required:
+ - count_per_az
+ - subnet_size
+ enable_gateway_subnet:
+ type: boolean
+ title: Enable Gateway Subnet
+ description: Create subnet for VPN/ExpressRoute gateways
+ default: false
+ enable_cache_subnet:
+ type: boolean
+ title: Enable Cache Subnet
+ description: Create subnet for Redis and other caching services
+ default: false
+ enable_functions_subnet:
+ type: boolean
+ title: Enable Functions Subnet
+ description: Create subnet for Azure Functions with VNet integration
+ default: false
+ enable_private_link_service_subnet:
+ type: boolean
+ title: Enable Private Link Service Subnet
+ description: Create subnet for hosting Private Link Services
+ default: false
+ enable_aks:
+ type: boolean
+ title: Enable AKS Integration
+ description: Configure subnets for Azure Kubernetes Service integration
+ default: false
+ nat_gateway:
+ type: object
+ title: NAT Gateway Configuration
+ properties:
+ strategy:
+ type: string
+ title: NAT Gateway Strategy
+ description: Choose whether to create one NAT Gateway or one per availability
+ zone
+ enum:
+ - single
+ - per_az
+ default: single
+ required:
+ - strategy
+ private_endpoints:
+ type: object
+ title: Private Endpoints Configuration
+ description: Configure private endpoints for Azure services to improve security
+ and reduce data transfer costs
+ x-ui-toggle: true
+ properties:
+ enable_storage:
+ type: boolean
+ title: Enable Storage Private Endpoint
+ description: Create private endpoint for Azure Storage (Blob)
+ default: true
+ enable_sql:
+ type: boolean
+ title: Enable SQL Private Endpoint
+ description: Create private endpoint for Azure SQL Database
+ default: true
+ enable_keyvault:
+ type: boolean
+ title: Enable Key Vault Private Endpoint
+ description: Create private endpoint for Azure Key Vault
+ default: true
+ enable_acr:
+ type: boolean
+ title: Enable ACR Private Endpoint
+ description: Create private endpoint for Azure Container Registry
+ default: true
+ enable_aks:
+ type: boolean
+ title: Enable AKS Private Endpoint
+ description: Create private endpoint for Azure Kubernetes Service
+ default: false
+ enable_cosmos:
+ type: boolean
+ title: Enable Cosmos DB Private Endpoint
+ description: Create private endpoint for Azure Cosmos DB
+ default: false
+ enable_servicebus:
+ type: boolean
+ title: Enable Service Bus Private Endpoint
+ description: Create private endpoint for Azure Service Bus
+ default: false
+ enable_eventhub:
+ type: boolean
+ title: Enable Event Hub Private Endpoint
+ description: Create private endpoint for Azure Event Hub
+ default: false
+ enable_monitor:
+ type: boolean
+ title: Enable Monitor Private Endpoint
+ description: Create private endpoint for Azure Monitor
+ default: false
+ enable_cognitive:
+ type: boolean
+ title: Enable Cognitive Services Private Endpoint
+ description: Create private endpoint for Azure Cognitive Services
+ default: false
+ tags:
+ type: object
+ title: Additional Tags
+ description: Optional additional tags to apply to all VNet resources. These
+ will be merged with the standard environment tags.
+ x-ui-yaml-editor: true
+ required:
+ - vnet_cidr
+ - region
+ - availability_zones
+ - public_subnets
+ - private_subnets
+ - database_subnets
+ - nat_gateway
+outputs:
+ default:
+ type: '@facets/azure-network-details'
+sample:
+ kind: network
+ flavor: azure_network
+ version: '0.2'
+ spec:
+ vnet_cidr: 10.0.0.0/16
+ region: centralindia
+ availability_zones:
+ - '1'
+ - '2'
+ - '3'
+ use_fixed_cidr_allocation: false
+ public_subnets:
+ count_per_az: 1
+ subnet_size: '256'
+ private_subnets:
+ count_per_az: 1
+ subnet_size: '1024'
+ database_subnets:
+ count_per_az: 1
+ subnet_size: '256'
+ enable_gateway_subnet: false
+ enable_cache_subnet: false
+ enable_functions_subnet: false
+ enable_private_link_service_subnet: false
+ enable_aks: false
+ nat_gateway:
+ strategy: single
+ private_endpoints:
+ enable_storage: true
+ enable_sql: true
+ enable_keyvault: true
+ enable_acr: true
+ enable_aks: false
+ enable_cosmos: false
+ enable_servicebus: false
+ enable_eventhub: false
+ enable_monitor: false
+ enable_cognitive: false
+ tags:
+ Environment: production
+ Project: main-infrastructure
+iac:
+ validated_files:
+ - main.tf
+ - variables.tf
+ - locals.tf
+ - network.tf
+ - subnets.tf
+ - nat-gateway.tf
+ - routing.tf
+ - security-groups.tf
+ - private-endpoints.tf
+ - outputs.tf
+disable_state_reference_on_selective_release: true
\ No newline at end of file
diff --git a/modules/network/azure_vpc/0.2/locals.tf b/modules/network/azure_vpc/0.2/locals.tf
new file mode 100644
index 000000000..5c77c0aeb
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/locals.tf
@@ -0,0 +1,206 @@
+#########################################################################
+# Local Values and Calculations #
+#########################################################################
+
+locals {
+ # Private endpoint DNS zone mappings
+ private_dns_zones = {
+ enable_storage = "privatelink.blob.core.windows.net"
+ enable_sql = "privatelink.database.windows.net"
+ enable_keyvault = "privatelink.vaultcore.azure.net"
+ enable_acr = "privatelink.azurecr.io"
+ enable_aks = "privatelink.${var.instance.spec.region}.azmk8s.io"
+ enable_cosmos = "privatelink.documents.azure.com"
+ enable_servicebus = "privatelink.servicebus.windows.net"
+ enable_eventhub = "privatelink.servicebus.windows.net"
+ enable_monitor = "privatelink.monitor.azure.com"
+ enable_cognitive = "privatelink.cognitiveservices.azure.com"
+ }
+
+ private_endpoints_enabled = {
+ for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true
+ }
+
+ # Calculate subnet mask from IP count
+ subnet_mask_map = {
+ "256" = 24 # /24 = 256 IPs
+ "512" = 23 # /23 = 512 IPs
+ "1024" = 22 # /22 = 1024 IPs
+ "2048" = 21 # /21 = 2048 IPs
+ "4096" = 20 # /20 = 4096 IPs
+ "8192" = 19 # /19 = 8192 IPs
+ }
+
+ # Use fixed CIDR allocation like the original (optional)
+ use_fixed_cidrs = lookup(var.instance.spec, "use_fixed_cidr_allocation", false)
+
+ # Fixed CIDR allocation (similar to original logic)
+ fixed_private_subnets = local.use_fixed_cidrs ? [for i in range(4) : cidrsubnet(var.instance.spec.vnet_cidr, 4, i)] : []
+ fixed_public_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 12), cidrsubnet(var.instance.spec.vnet_cidr, 4, 14), cidrsubnet(var.instance.spec.vnet_cidr, 4, 15)] : []
+ fixed_database_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 4), cidrsubnet(var.instance.spec.vnet_cidr, 4, 5)] : []
+ fixed_gateway_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 4, 6)] : []
+ fixed_cache_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 112)] : []
+ fixed_functions_subnets = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 113)] : []
+ fixed_private_link_subnet = local.use_fixed_cidrs ? [cidrsubnet(var.instance.spec.vnet_cidr, 8, 114)] : []
+
+ vnet_prefix_length = tonumber(split("/", var.instance.spec.vnet_cidr)[1])
+
+ public_subnet_newbits = local.subnet_mask_map[var.instance.spec.public_subnets.subnet_size] - local.vnet_prefix_length
+ private_subnet_newbits = local.subnet_mask_map[var.instance.spec.private_subnets.subnet_size] - local.vnet_prefix_length
+ database_subnet_newbits = local.subnet_mask_map[var.instance.spec.database_subnets.subnet_size] - local.vnet_prefix_length
+
+ # Calculate total number of subnets needed (only for dynamic allocation)
+ public_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0
+ private_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az : 0
+ database_total_subnets = !local.use_fixed_cidrs ? length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az : 0
+
+ # Specialized subnets (always use fixed allocation for these)
+ gateway_subnets_enabled = lookup(var.instance.spec, "enable_gateway_subnet", false)
+ cache_subnets_enabled = lookup(var.instance.spec, "enable_cache_subnet", false)
+ functions_subnets_enabled = lookup(var.instance.spec, "enable_functions_subnet", false)
+ private_link_svc_enabled = lookup(var.instance.spec, "enable_private_link_service_subnet", false)
+
+ # Create list of newbits for cidrsubnets function (dynamic allocation only)
+ subnet_newbits = !local.use_fixed_cidrs ? concat(
+ var.instance.spec.public_subnets.count_per_az > 0 ? [
+ for i in range(local.public_total_subnets) : local.public_subnet_newbits
+ ] : [],
+ [for i in range(local.private_total_subnets) : local.private_subnet_newbits],
+ [for i in range(local.database_total_subnets) : local.database_subnet_newbits]
+ ) : []
+
+ # Generate all subnet CIDRs using cidrsubnets function - this prevents overlaps (dynamic allocation)
+ all_subnet_cidrs = !local.use_fixed_cidrs && length(local.subnet_newbits) > 0 ? cidrsubnets(var.instance.spec.vnet_cidr, local.subnet_newbits...) : []
+
+ # Extract subnet CIDRs by type (dynamic allocation)
+ public_subnet_cidrs = !local.use_fixed_cidrs && var.instance.spec.public_subnets.count_per_az > 0 ? slice(
+ local.all_subnet_cidrs,
+ 0,
+ local.public_total_subnets
+ ) : local.fixed_public_subnets
+
+ private_subnet_cidrs = !local.use_fixed_cidrs ? slice(
+ local.all_subnet_cidrs,
+ var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets : 0,
+ var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets
+ ) : local.fixed_private_subnets
+
+ database_subnet_cidrs = !local.use_fixed_cidrs ? slice(
+ local.all_subnet_cidrs,
+ var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets : local.private_total_subnets,
+ var.instance.spec.public_subnets.count_per_az > 0 ? local.public_total_subnets + local.private_total_subnets + local.database_total_subnets : local.private_total_subnets + local.database_total_subnets
+ ) : local.fixed_database_subnets
+
+ # Create subnet mappings with AZ and CIDR
+ public_subnets = var.instance.spec.public_subnets.count_per_az > 0 ? (
+ local.use_fixed_cidrs ? [
+ for i, cidr in local.public_subnet_cidrs : {
+ az_index = i % length(var.instance.spec.availability_zones)
+ subnet_index = floor(i / length(var.instance.spec.availability_zones))
+ az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)]
+ cidr_block = cidr
+ }
+ ] : flatten([
+ for az_index, az in var.instance.spec.availability_zones : [
+ for subnet_index in range(var.instance.spec.public_subnets.count_per_az) : {
+ az_index = az_index
+ subnet_index = subnet_index
+ az = az
+ cidr_block = local.public_subnet_cidrs[az_index * var.instance.spec.public_subnets.count_per_az + subnet_index]
+ }
+ ]
+ ])
+ ) : []
+
+ private_subnets = local.use_fixed_cidrs ? [
+ for i, cidr in local.private_subnet_cidrs : {
+ az_index = i % length(var.instance.spec.availability_zones)
+ subnet_index = floor(i / length(var.instance.spec.availability_zones))
+ az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)]
+ cidr_block = cidr
+ }
+ ] : flatten([
+ for az_index, az in var.instance.spec.availability_zones : [
+ for subnet_index in range(var.instance.spec.private_subnets.count_per_az) : {
+ az_index = az_index
+ subnet_index = subnet_index
+ az = az
+ cidr_block = local.private_subnet_cidrs[az_index * var.instance.spec.private_subnets.count_per_az + subnet_index]
+ }
+ ]
+ ])
+
+ database_subnets = local.use_fixed_cidrs ? [
+ for i, cidr in local.database_subnet_cidrs : {
+ az_index = i % length(var.instance.spec.availability_zones)
+ subnet_index = floor(i / length(var.instance.spec.availability_zones))
+ az = var.instance.spec.availability_zones[i % length(var.instance.spec.availability_zones)]
+ cidr_block = cidr
+ }
+ ] : flatten([
+ for az_index, az in var.instance.spec.availability_zones : [
+ for subnet_index in range(var.instance.spec.database_subnets.count_per_az) : {
+ az_index = az_index
+ subnet_index = subnet_index
+ az = az
+ cidr_block = local.database_subnet_cidrs[az_index * var.instance.spec.database_subnets.count_per_az + subnet_index]
+ }
+ ]
+ ])
+
+ # Specialized subnets (always use fixed allocation)
+ gateway_subnets = local.gateway_subnets_enabled ? [
+ for i, cidr in local.fixed_gateway_subnet : {
+ subnet_index = i
+ cidr_block = cidr
+ }
+ ] : []
+
+ cache_subnets = local.cache_subnets_enabled ? [
+ for i, cidr in local.fixed_cache_subnet : {
+ subnet_index = i
+ cidr_block = cidr
+ }
+ ] : []
+
+ functions_subnets = local.functions_subnets_enabled ? [
+ for i, cidr in local.fixed_functions_subnets : {
+ subnet_index = i
+ cidr_block = cidr
+ }
+ ] : []
+
+ private_link_service_subnets = local.private_link_svc_enabled ? [
+ for i, cidr in local.fixed_private_link_subnet : {
+ subnet_index = i
+ cidr_block = cidr
+ }
+ ] : []
+
+ # Private endpoints configuration with defaults
+ private_endpoints = var.instance.spec.private_endpoints != null ? var.instance.spec.private_endpoints : {
+ enable_storage = true
+ enable_sql = true
+ enable_keyvault = true
+ enable_acr = true
+ enable_aks = false
+ enable_cosmos = false
+ enable_servicebus = false
+ enable_eventhub = false
+ enable_monitor = false
+ enable_cognitive = false
+ }
+
+ # Resource naming prefix
+ name_prefix = "${var.environment.unique_name}-${var.instance_name}"
+
+ # Common tags
+ common_tags = merge(
+ var.environment.cloud_tags,
+ lookup(var.instance.spec, "tags", {}),
+ {
+ Name = local.name_prefix
+ Environment = var.environment.name
+ }
+ )
+}
diff --git a/modules/network/azure_vpc/0.2/main.tf b/modules/network/azure_vpc/0.2/main.tf
new file mode 100644
index 000000000..51b6b1398
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/main.tf
@@ -0,0 +1,33 @@
+#########################################################################
+# Terraform Module Structure #
+# #
+# ── Guidance for Code Generators / AI Tools ────────────────────────── #
+# #
+# • This main.tf file serves as the module's entry point and overview. #
+# #
+# • All logic has been organized into separate files for maintainability:#
+# - locals.tf: Local values and calculations #
+# - network.tf: Core network infrastructure (RG, VNet) #
+# - subnets.tf: All subnet resources #
+# - nat-gateway.tf: NAT Gateway and associations #
+# - routing.tf: Route tables and routing configuration #
+# - security-groups.tf: Network Security Groups #
+# - private-endpoints.tf: Private DNS zones and endpoints #
+# #
+# • This structure improves readability, maintainability, and #
+# makes it easier for teams to work on specific components. #
+# #
+#########################################################################
+
+# This Azure VPC module creates a comprehensive virtual network infrastructure
+# with support for multiple subnet types, NAT gateways, private endpoints,
+# and flexible CIDR allocation strategies.
+#
+# Key Features:
+# - Dynamic or fixed CIDR allocation
+# - Multiple subnet types (public, private, database, specialized)
+# - NAT Gateway with per-AZ or shared strategies
+# - Private endpoints with DNS integration
+# - Comprehensive routing and security group configuration
+#
+# All resources are defined in their respective files for better organization.
diff --git a/modules/network/azure_vpc/0.2/nat-gateway.tf b/modules/network/azure_vpc/0.2/nat-gateway.tf
new file mode 100644
index 000000000..8a67cf284
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/nat-gateway.tf
@@ -0,0 +1,77 @@
+#########################################################################
+# NAT Gateway Resources #
+#########################################################################
+
+# Public IP for NAT Gateway
+resource "azurerm_public_ip" "nat_gateway" {
+ for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? {
+ for az in var.instance.spec.availability_zones : az => az
+ } : var.instance.spec.public_subnets.count_per_az > 0 ? {
+ single = var.instance.spec.availability_zones[0]
+ } : {}
+
+ name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-pip-${each.key}" : "${local.name_prefix}-natgw-pip"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+ allocation_method = "Static"
+ sku = "Standard"
+ zones = [each.value]
+
+ tags = local.common_tags
+
+ lifecycle {
+ ignore_changes = [name]
+ }
+}
+
+# NAT Gateway
+resource "azurerm_nat_gateway" "main" {
+ for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? {
+ for az in var.instance.spec.availability_zones : az => az
+ } : var.instance.spec.public_subnets.count_per_az > 0 ? {
+ single = var.instance.spec.availability_zones[0]
+ } : {}
+
+ name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-natgw-${each.key}" : "${local.name_prefix}-natgw"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+ sku_name = "Standard"
+ idle_timeout_in_minutes = 10
+ zones = [each.value]
+
+ tags = local.common_tags
+
+ lifecycle {
+ ignore_changes = [name]
+ }
+}
+
+# Associate Public IP with NAT Gateway
+resource "azurerm_nat_gateway_public_ip_association" "main" {
+ for_each = azurerm_nat_gateway.main
+
+ nat_gateway_id = each.value.id
+ public_ip_address_id = azurerm_public_ip.nat_gateway[each.key].id
+}
+
+# Associate NAT Gateway with Private Subnets
+resource "azurerm_subnet_nat_gateway_association" "private" {
+ for_each = {
+ for k, v in azurerm_subnet.private : k => v
+ if var.instance.spec.public_subnets.count_per_az > 0
+ }
+
+ subnet_id = each.value.id
+ nat_gateway_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_nat_gateway.main[split("-", each.key)[0]].id : azurerm_nat_gateway.main["single"].id
+}
+
+# Associate NAT Gateway with Functions Subnets
+resource "azurerm_subnet_nat_gateway_association" "functions" {
+ for_each = {
+ for k, v in azurerm_subnet.functions : k => v
+ if var.instance.spec.public_subnets.count_per_az > 0
+ }
+
+ subnet_id = each.value.id
+ nat_gateway_id = azurerm_nat_gateway.main["1"].id # Functions typically use single NAT Gateway
+}
diff --git a/modules/network/azure_vpc/0.2/network.tf b/modules/network/azure_vpc/0.2/network.tf
new file mode 100644
index 000000000..81767c918
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/network.tf
@@ -0,0 +1,29 @@
+#########################################################################
+# Core Network Infrastructure #
+#########################################################################
+
+# Resource Group
+resource "azurerm_resource_group" "main" {
+ name = "${local.name_prefix}-rg"
+ location = var.instance.spec.region
+
+ tags = local.common_tags
+
+ lifecycle {
+ prevent_destroy = true
+ }
+}
+
+# Virtual Network
+resource "azurerm_virtual_network" "main" {
+ name = "${local.name_prefix}-vnet"
+ address_space = [var.instance.spec.vnet_cidr]
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ tags = local.common_tags
+
+ lifecycle {
+ prevent_destroy = true
+ }
+}
diff --git a/modules/network/azure_vpc/0.2/outputs.tf b/modules/network/azure_vpc/0.2/outputs.tf
new file mode 100644
index 000000000..ba52b6999
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/outputs.tf
@@ -0,0 +1,26 @@
+locals {
+ output_attributes = {
+ resource_group_id = azurerm_resource_group.main.id
+ resource_group_name = azurerm_resource_group.main.name
+ vnet_id = azurerm_virtual_network.main.id
+ vnet_name = azurerm_virtual_network.main.name
+ vnet_cidr_block = var.instance.spec.vnet_cidr
+ region = azurerm_resource_group.main.location
+ availability_zones = var.instance.spec.availability_zones
+ nat_gateway_ids = values(azurerm_nat_gateway.main)[*].id
+ nat_gateway_public_ip_ids = values(azurerm_public_ip.nat_gateway)[*].id
+ public_subnet_ids = values(azurerm_subnet.public)[*].id
+ private_subnet_ids = values(azurerm_subnet.private)[*].id
+ database_subnet_ids = values(azurerm_subnet.database)[*].id
+ gateway_subnet_ids = values(azurerm_subnet.gateway)[*].id
+ cache_subnet_ids = values(azurerm_subnet.cache)[*].id
+ functions_subnet_ids = values(azurerm_subnet.functions)[*].id
+ private_link_service_subnet_ids = values(azurerm_subnet.private_link_service)[*].id
+ default_security_group_id = azurerm_network_security_group.allow_all_default.id
+ private_endpoints_security_group_id = try(azurerm_network_security_group.vpc_endpoints[0].id, null)
+ storage_private_endpoint_id = try(azurerm_private_endpoint.storage[0].id, null)
+ storage_account_id = try(azurerm_storage_account.example[0].id, null)
+ }
+ output_interfaces = {
+ }
+}
\ No newline at end of file
diff --git a/modules/network/azure_vpc/0.2/private-endpoints.tf b/modules/network/azure_vpc/0.2/private-endpoints.tf
new file mode 100644
index 000000000..b919fd269
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/private-endpoints.tf
@@ -0,0 +1,67 @@
+#########################################################################
+# Private DNS and Private Endpoints #
+#########################################################################
+
+# Private DNS Zone for Private Endpoints
+resource "azurerm_private_dns_zone" "private_endpoints" {
+ for_each = {
+ for k, v in var.instance.spec.private_endpoints : k => lookup(local.private_dns_zones, k, "privatelink.${k}.azure.com") if v == true
+ }
+
+ name = each.value
+ resource_group_name = azurerm_resource_group.main.name
+
+ tags = var.instance.spec.tags
+}
+
+# Link Private DNS Zone to VNet
+resource "azurerm_private_dns_zone_virtual_network_link" "private_endpoints" {
+ for_each = azurerm_private_dns_zone.private_endpoints
+
+ name = "${local.name_prefix}-${each.key}-dns-link"
+ resource_group_name = azurerm_resource_group.main.name
+ private_dns_zone_name = each.value.name
+ virtual_network_id = azurerm_virtual_network.main.id
+ registration_enabled = false
+
+ tags = local.common_tags
+}
+
+# Example Storage Account (for demonstration of private endpoint)
+resource "azurerm_storage_account" "example" {
+ count = try(local.private_endpoints.enable_storage, false) ? 1 : 0
+
+ name = substr(replace(replace(lower(local.name_prefix), "-", ""), "_", ""), 0, 20)
+ resource_group_name = azurerm_resource_group.main.name
+ location = azurerm_resource_group.main.location
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+
+ # Disable public access
+
+ tags = local.common_tags
+}
+
+# Private Endpoint for Storage Account
+resource "azurerm_private_endpoint" "storage" {
+ count = try(local.private_endpoints.enable_storage, false) ? 1 : 0
+
+ name = "${local.name_prefix}-storage-pe"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+ subnet_id = values(azurerm_subnet.private)[0].id
+
+ private_service_connection {
+ name = "${local.name_prefix}-storage-psc"
+ private_connection_resource_id = azurerm_storage_account.example[0].id
+ subresource_names = ["blob"]
+ is_manual_connection = false
+ }
+
+ private_dns_zone_group {
+ name = "storage-dns-zone-group"
+ private_dns_zone_ids = [azurerm_private_dns_zone.private_endpoints["enable_storage"].id]
+ }
+
+ tags = local.common_tags
+}
diff --git a/modules/network/azure_vpc/0.2/routing.tf b/modules/network/azure_vpc/0.2/routing.tf
new file mode 100644
index 000000000..148ac7394
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/routing.tf
@@ -0,0 +1,66 @@
+#########################################################################
+# Route Tables and Routing #
+#########################################################################
+
+# Route Table for Public Subnets
+resource "azurerm_route_table" "public" {
+ count = var.instance.spec.public_subnets.count_per_az > 0 ? 1 : 0
+
+ name = "${local.name_prefix}-public-rt"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ tags = local.common_tags
+}
+
+# Associate Route Table with Public Subnets
+resource "azurerm_subnet_route_table_association" "public" {
+ for_each = azurerm_subnet.public
+
+ subnet_id = each.value.id
+ route_table_id = azurerm_route_table.public[0].id
+}
+
+# Route Table for Private Subnets
+resource "azurerm_route_table" "private" {
+ for_each = var.instance.spec.nat_gateway.strategy == "per_az" ? {
+ for az in var.instance.spec.availability_zones : az => az
+ } : var.instance.spec.public_subnets.count_per_az > 0 ? {
+ single = "1"
+ } : {}
+
+ name = var.instance.spec.nat_gateway.strategy == "per_az" ? "${local.name_prefix}-private-rt-${each.key}" : "${local.name_prefix}-private-rt"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ tags = local.common_tags
+}
+
+# Associate Route Table with Private Subnets
+resource "azurerm_subnet_route_table_association" "private" {
+ for_each = azurerm_subnet.private
+
+ subnet_id = each.value.id
+ route_table_id = var.instance.spec.nat_gateway.strategy == "per_az" ? azurerm_route_table.private[split("-", each.key)[0]].id : azurerm_route_table.private["single"].id
+}
+
+# Route Table for Database Subnets (isolated)
+resource "azurerm_route_table" "database" {
+ for_each = {
+ for az in var.instance.spec.availability_zones : az => az
+ }
+
+ name = "${local.name_prefix}-database-rt-${each.key}"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ tags = local.common_tags
+}
+
+# Associate Route Table with Database Subnets
+resource "azurerm_subnet_route_table_association" "database" {
+ for_each = azurerm_subnet.database
+
+ subnet_id = each.value.id
+ route_table_id = azurerm_route_table.database[split("-", each.key)[0]].id
+}
diff --git a/modules/network/azure_vpc/0.2/security-groups.tf b/modules/network/azure_vpc/0.2/security-groups.tf
new file mode 100644
index 000000000..0b9e90dc7
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/security-groups.tf
@@ -0,0 +1,127 @@
+#########################################################################
+# Network Security Groups #
+#########################################################################
+
+# Network Security Group - Allow all within VNet (similar to original logic)
+resource "azurerm_network_security_group" "allow_all_default" {
+ name = "${local.name_prefix}-allow-all-default-nsg"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ security_rule {
+ name = "AllowVnetInbound"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = var.instance.spec.vnet_cidr
+ destination_address_prefix = "*"
+ description = "Allowing connection from within vnet"
+ }
+
+ tags = merge(local.common_tags, {
+ Terraform = "true"
+ })
+
+ lifecycle {
+ ignore_changes = [name]
+ }
+}
+
+# Security Group for VPC Endpoints (keep existing for private endpoints)
+resource "azurerm_network_security_group" "vpc_endpoints" {
+ count = anytrue([
+ try(local.private_endpoints.enable_storage, false),
+ try(local.private_endpoints.enable_sql, false),
+ try(local.private_endpoints.enable_keyvault, false),
+ try(local.private_endpoints.enable_acr, false),
+ try(local.private_endpoints.enable_aks, false),
+ try(local.private_endpoints.enable_cosmos, false),
+ try(local.private_endpoints.enable_servicebus, false),
+ try(local.private_endpoints.enable_eventhub, false),
+ try(local.private_endpoints.enable_monitor, false),
+ try(local.private_endpoints.enable_cognitive, false)
+ ]) ? 1 : 0
+
+ name = "${local.name_prefix}-private-endpoints-nsg"
+ location = azurerm_resource_group.main.location
+ resource_group_name = azurerm_resource_group.main.name
+
+ security_rule {
+ name = "AllowHTTPS"
+ priority = 1001
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "443"
+ source_address_prefix = var.instance.spec.vnet_cidr
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "AllowOutbound"
+ priority = 1001
+ direction = "Outbound"
+ access = "Allow"
+ protocol = "*"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ tags = local.common_tags
+}
+
+# Network Security Groups for Subnets - Apply the allow-all NSG to all subnets
+resource "azurerm_subnet_network_security_group_association" "public" {
+ for_each = azurerm_subnet.public
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "private" {
+ for_each = azurerm_subnet.private
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "database" {
+ for_each = azurerm_subnet.database
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "gateway" {
+ for_each = azurerm_subnet.gateway
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "cache" {
+ for_each = azurerm_subnet.cache
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "functions" {
+ for_each = azurerm_subnet.functions
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
+
+resource "azurerm_subnet_network_security_group_association" "private_link_service" {
+ for_each = azurerm_subnet.private_link_service
+
+ subnet_id = each.value.id
+ network_security_group_id = azurerm_network_security_group.allow_all_default.id
+}
diff --git a/modules/network/azure_vpc/0.2/subnets.tf b/modules/network/azure_vpc/0.2/subnets.tf
new file mode 100644
index 000000000..392f74274
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/subnets.tf
@@ -0,0 +1,174 @@
+#########################################################################
+# Subnet Resources #
+#########################################################################
+
+# Public Subnets
+resource "azurerm_subnet" "public" {
+ for_each = var.instance.spec.public_subnets.count_per_az > 0 ? {
+ for subnet in local.public_subnets :
+ "${subnet.az}-${subnet.subnet_index}" => subnet
+ } : {}
+
+ name = "${local.name_prefix}-public-${each.value.az}-${each.value.subnet_index + 1}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ lifecycle {
+ ignore_changes = [delegation, service_endpoints, name]
+ }
+}
+
+# Private Subnets
+resource "azurerm_subnet" "private" {
+ for_each = {
+ for subnet in local.private_subnets :
+ "${subnet.az}-${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-private-${each.value.az}-${each.value.subnet_index + 1}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ # Delegate subnet to specific services if needed
+ dynamic "delegation" {
+ for_each = var.instance.spec.enable_aks ? [1] : []
+ content {
+ name = "aks-delegation"
+ service_delegation {
+ name = "Microsoft.ContainerService/managedClusters"
+ actions = [
+ "Microsoft.Network/virtualNetworks/subnets/join/action",
+ ]
+ }
+ }
+ }
+
+ lifecycle {
+ ignore_changes = [delegation, service_endpoints, name]
+ }
+}
+
+# Database Subnets
+resource "azurerm_subnet" "database" {
+ for_each = {
+ for subnet in local.database_subnets :
+ "${subnet.az}-${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-database-${each.value.az}-${each.value.subnet_index + 1}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ # Configure private endpoint network policies
+ private_endpoint_network_policies = "Disabled"
+
+ # Delegate to SQL services
+ delegation {
+ name = "sql-delegation"
+ service_delegation {
+ name = "Microsoft.DBforMySQL/flexibleServers"
+ actions = [
+ "Microsoft.Network/virtualNetworks/subnets/join/action",
+ "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action",
+ "Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action"
+ ]
+ }
+ }
+
+ lifecycle {
+ ignore_changes = [service_endpoints, delegation, name]
+ }
+}
+
+# Gateway Subnets (for VPN/ExpressRoute gateways)
+resource "azurerm_subnet" "gateway" {
+ for_each = {
+ for subnet in local.gateway_subnets :
+ "${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-gateway-subnet-${each.value.subnet_index}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ lifecycle {
+ ignore_changes = [delegation, service_endpoints, name]
+ }
+}
+
+# Cache Subnets (for Redis and other caching services)
+resource "azurerm_subnet" "cache" {
+ for_each = {
+ for subnet in local.cache_subnets :
+ "${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-cache-subnet-${each.value.subnet_index}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ lifecycle {
+ ignore_changes = [delegation, service_endpoints, name]
+ }
+}
+
+# Functions Subnets (dedicated for Azure Functions)
+resource "azurerm_subnet" "functions" {
+ for_each = {
+ for subnet in local.functions_subnets :
+ "${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-functions-subnet-${each.value.subnet_index}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ # Configure private endpoint network policies
+ private_endpoint_network_policies = "Disabled"
+
+ # Delegate to Azure Functions
+ delegation {
+ name = "functions-delegation"
+ service_delegation {
+ name = "Microsoft.Web/serverFarms"
+ actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"]
+ }
+ }
+
+ lifecycle {
+ ignore_changes = [service_endpoints, delegation, name]
+ }
+}
+
+# Private Link Service Subnets
+resource "azurerm_subnet" "private_link_service" {
+ for_each = {
+ for subnet in local.private_link_service_subnets :
+ "${subnet.subnet_index}" => subnet
+ }
+
+ name = "${local.name_prefix}-pls-subnet-${each.value.subnet_index}"
+ resource_group_name = azurerm_resource_group.main.name
+ virtual_network_name = azurerm_virtual_network.main.name
+ address_prefixes = [each.value.cidr_block]
+ service_endpoints = ["Microsoft.Storage"]
+
+ # Configure private link service network policies (disabled for Private Link Service)
+ private_link_service_network_policies_enabled = false
+
+ lifecycle {
+ ignore_changes = [service_endpoints, name]
+ }
+}
diff --git a/modules/network/azure_vpc/0.2/variables.tf b/modules/network/azure_vpc/0.2/variables.tf
new file mode 100644
index 000000000..fa75696b6
--- /dev/null
+++ b/modules/network/azure_vpc/0.2/variables.tf
@@ -0,0 +1,242 @@
+#########################################################################
+# Facets Module Variables #
+# #
+# Auto-injected variables that every Facets module receives #
+#########################################################################
+
+variable "instance_name" {
+ description = "The architectural name for the resource as added in the Facets blueprint designer."
+ type = string
+
+ validation {
+ condition = can(regex("^[a-zA-Z0-9_-]+$", var.instance_name))
+ error_message = "Instance name must contain only alphanumeric characters, hyphens, and underscores."
+ }
+}
+
+variable "environment" {
+ description = "An object containing details about the environment."
+ type = object({
+ name = string
+ unique_name = string
+ cloud_tags = map(string)
+ })
+
+ validation {
+ condition = can(var.environment.name) && can(var.environment.unique_name) && can(var.environment.cloud_tags)
+ error_message = "Environment must contain name, unique_name, and cloud_tags."
+ }
+}
+
+variable "inputs" {
+ description = "A map of inputs requested by the module developer."
+ type = any
+ default = {}
+}
+
+#########################################################################
+# Instance Configuration Schema #
+# #
+# Comprehensive validation for all module specifications #
+#########################################################################
+
+variable "instance" {
+ description = "The resource instance configuration"
+ type = object({
+ spec = object({
+ # Core VNet Configuration
+ vnet_cidr = string
+ region = string
+ availability_zones = list(string)
+
+ # Optional CIDR Strategy
+ use_fixed_cidr_allocation = optional(bool, false)
+
+ # Public Subnets Configuration
+ public_subnets = object({
+ count_per_az = number
+ subnet_size = string
+ })
+
+ # Private Subnets Configuration
+ private_subnets = object({
+ count_per_az = number
+ subnet_size = string
+ })
+
+ # Database Subnets Configuration
+ database_subnets = object({
+ count_per_az = number
+ subnet_size = string
+ })
+
+ # Specialized Subnet Toggles
+ enable_gateway_subnet = optional(bool, false)
+ enable_cache_subnet = optional(bool, false)
+ enable_functions_subnet = optional(bool, false)
+ enable_private_link_service_subnet = optional(bool, false)
+ enable_aks = optional(bool, false)
+
+ # NAT Gateway Configuration
+ nat_gateway = object({
+ strategy = string
+ })
+
+ # Private Endpoints Configuration
+ private_endpoints = optional(object({
+ enable_storage = optional(bool, true)
+ enable_sql = optional(bool, true)
+ enable_keyvault = optional(bool, true)
+ enable_acr = optional(bool, true)
+ enable_aks = optional(bool, false)
+ enable_cosmos = optional(bool, false)
+ enable_servicebus = optional(bool, false)
+ enable_eventhub = optional(bool, false)
+ enable_monitor = optional(bool, false)
+ enable_cognitive = optional(bool, false)
+ }))
+
+ # Additional Tags
+ tags = optional(map(string), {})
+ })
+ })
+
+ #########################################################################
+ # VNet CIDR Validation #
+ #########################################################################
+ validation {
+ condition = can(cidrhost(var.instance.spec.vnet_cidr, 0))
+ error_message = "VNet CIDR must be a valid CIDR block (e.g., 10.0.0.0/16)."
+ }
+
+ validation {
+ condition = can(regex("^([0-9]{1,3}\\.){3}[0-9]{1,3}/[0-9]{1,2}$", var.instance.spec.vnet_cidr))
+ error_message = "VNet CIDR must follow the format x.x.x.x/xx (e.g., 10.0.0.0/16)."
+ }
+
+ #########################################################################
+ # Region Validation #
+ #########################################################################
+ validation {
+ condition = length(var.instance.spec.region) > 0
+ error_message = "Azure region cannot be empty."
+ }
+
+ validation {
+ condition = contains([
+ "eastus", "eastus2", "southcentralus", "westus2", "westus3", "australiaeast",
+ "southeastasia", "northeurope", "swedencentral", "uksouth", "westeurope",
+ "centralus", "southafricanorth", "centralindia", "eastasia", "japaneast",
+ "koreacentral", "canadacentral", "francecentral", "germanywestcentral",
+ "norwayeast", "switzerlandnorth", "uaenorth", "brazilsouth", "eastus2euap",
+ "qatarcentral", "centralusstage", "eastusstage", "eastus2stage", "northcentralusstage",
+ "southcentralusstage", "westusstage", "westus2stage", "asia", "asiapacific",
+ "australia", "brazil", "canada", "europe", "france", "germany", "global",
+ "india", "japan", "korea", "norway", "singapore", "southafrica", "switzerland",
+ "uae", "uk", "unitedstates"
+ ], var.instance.spec.region)
+ error_message = "Region must be a valid Azure region name."
+ }
+
+ #########################################################################
+ # Availability Zones Validation #
+ #########################################################################
+ validation {
+ condition = length(var.instance.spec.availability_zones) >= 1 && length(var.instance.spec.availability_zones) <= 3
+ error_message = "Availability zones must contain between 1 and 3 zones."
+ }
+
+ validation {
+ condition = alltrue([
+ for zone in var.instance.spec.availability_zones :
+ contains(["1", "2", "3"], zone)
+ ])
+ error_message = "Availability zones must be \"1\", \"2\", or \"3\"."
+ }
+
+ #########################################################################
+ # Public Subnets Validation #
+ #########################################################################
+ validation {
+ condition = var.instance.spec.public_subnets.count_per_az >= 0 && var.instance.spec.public_subnets.count_per_az <= 3
+ error_message = "Public subnets count per AZ must be between 0 and 3."
+ }
+
+ validation {
+ condition = contains([
+ "256", "512", "1024", "2048", "4096", "8192"
+ ], var.instance.spec.public_subnets.subnet_size)
+ error_message = "Public subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192."
+ }
+
+ #########################################################################
+ # Private Subnets Validation #
+ #########################################################################
+ validation {
+ condition = var.instance.spec.private_subnets.count_per_az >= 1 && var.instance.spec.private_subnets.count_per_az <= 3
+ error_message = "Private subnets count per AZ must be between 1 and 3."
+ }
+
+ validation {
+ condition = contains([
+ "256", "512", "1024", "2048", "4096", "8192"
+ ], var.instance.spec.private_subnets.subnet_size)
+ error_message = "Private subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192."
+ }
+
+ #########################################################################
+ # Database Subnets Validation #
+ #########################################################################
+ validation {
+ condition = var.instance.spec.database_subnets.count_per_az >= 0 && var.instance.spec.database_subnets.count_per_az <= 3
+ error_message = "Database subnets count per AZ must be between 0 and 3."
+ }
+
+ validation {
+ condition = contains([
+ "256", "512", "1024", "2048", "4096", "8192"
+ ], var.instance.spec.database_subnets.subnet_size)
+ error_message = "Database subnet size must be one of: 256, 512, 1024, 2048, 4096, 8192."
+ }
+
+ #########################################################################
+ # NAT Gateway Strategy Validation #
+ #########################################################################
+ validation {
+ condition = contains([
+ "single", "per_az"
+ ], var.instance.spec.nat_gateway.strategy)
+ error_message = "NAT Gateway strategy must be either 'single' or 'per_az'."
+ }
+
+ #########################################################################
+ # Logical Validations #
+ #########################################################################
+ validation {
+ condition = var.instance.spec.public_subnets.count_per_az > 0 || var.instance.spec.nat_gateway.strategy == "single"
+ error_message = "NAT Gateway requires at least one public subnet when using 'per_az' strategy."
+ }
+
+ validation {
+ condition = (
+ (var.instance.spec.public_subnets.count_per_az > 0 ?
+ length(var.instance.spec.availability_zones) * var.instance.spec.public_subnets.count_per_az : 0) +
+ length(var.instance.spec.availability_zones) * var.instance.spec.private_subnets.count_per_az +
+ length(var.instance.spec.availability_zones) * var.instance.spec.database_subnets.count_per_az
+ ) <= 20
+ error_message = "Total number of subnets across all types and AZs cannot exceed 20."
+ }
+
+ validation {
+ condition = !var.instance.spec.enable_aks || var.instance.spec.private_subnets.count_per_az > 0
+ error_message = "AKS integration requires at least one private subnet per AZ."
+ }
+
+ #########################################################################
+ # CIDR Size Validation #
+ #########################################################################
+ validation {
+ condition = tonumber(split("/", var.instance.spec.vnet_cidr)[1]) <= 24
+ error_message = "VNet CIDR prefix must be /24 or larger (smaller number) to accommodate all configured subnets."
+ }
+}