Home > front end >  Terraform AZURE Kubernetes NodePool recreates every time when apply Terraform
Terraform AZURE Kubernetes NodePool recreates every time when apply Terraform

Time:12-14

I am trying to create an Azure kubernetes cluster with a 1 node pool. Please find the below snippets:


resource "azurerm_kubernetes_cluster" "k8s_cluster" {
  lifecycle {
    ignore_changes = [
      default_node_pool
    ]
    prevent_destroy = false
  }

  key_vault_secrets_provider {
    secret_rotation_enabled = true
  }

  private_cluster_enabled = true
  name                    = var.cluster_name
  location                = var.location
  resource_group_name     = var.rg_name
  dns_prefix              = var.dns_prefix
  kubernetes_version      = var.kubernetes_version
  # node_resource_group = var.resource_group_name

  default_node_pool {
    name                  = var.default_node_pool.name
    node_count            = var.default_node_pool.node_count
    max_count             = var.default_node_pool.max_count
    min_count             = var.default_node_pool.min_count
    vm_size               = var.default_node_pool.vm_size
    os_disk_size_gb       = var.default_node_pool.os_disk_size_gb
    vnet_subnet_id        = var.vnet_subnet_id
    max_pods              = var.default_node_pool.max_pods
    type                  = var.default_node_pool.agent_pool_type
    enable_node_public_ip = var.default_node_pool.enable_node_public_ip
    enable_auto_scaling   = var.default_node_pool.enable_auto_scaling

    tags = merge(var.common_tags)
  }

  linux_profile {
    admin_username = var.admin_username
    ssh_key {
      key_data = file("${path.module}/${var.ssh_public_key}")


    }
  }

  identity {
    type = var.identity
  }

  network_profile {
    network_plugin    = var.network_plugin    #azure
    network_policy    = var.network_policy    #"azure"
    load_balancer_sku = var.load_balancer_sku #"standard"
    # pod_cidr  = var.pod_cidr | When network_plugin is set to azure - the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set.
  }

  tags = merge({ "Name" : var.cluster_name }, var.common_tags)
}


data "azurerm_kubernetes_service_versions" "current" {
  location = "Germany West Central"
}

# Node Pool
resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
  zones                 = var.np_availability_zones  #[1, 2, 3]
  enable_auto_scaling   = var.np_enable_auto_scaling #true
  kubernetes_cluster_id = azurerm_kubernetes_cluster.k8s_cluster.id
  node_count            = var.np_node_count
  max_count             = var.np_max_count
  min_count             = var.np_min_count
  mode                  = var.np_mode 
  name                  = var.np_name 
  orchestrator_version  = "1.22.15"  
  os_disk_size_gb       = 30
  os_type               = var.np_os_type  
  vm_size               = var.np_vm_size  
  priority              = var.np_priority
  node_labels           = merge({ "Name" : var.np_name }, var.common_tags)

  lifecycle {
    ignore_changes = [
      kubernetes_cluster_id
    ]
  }

  #pod_subnet_id = ""
  tags = merge(
    { "Name" : var.np_name },
  var.common_tags)
}

but for every terraform plan/apply I get the following change and it forced to re-create the nodepool. Can someone help me understand why this is happening?

# module.aks_cluster.azurerm_kubernetes_cluster_node_pool.gp_nodes must be replaced
-/  resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
      - custom_ca_trust_enabled = false -> null
      - enable_host_encryption  = false -> null
      - enable_node_public_ip   = false -> null
      - fips_enabled            = false -> null
      ~ id                      = "/subscriptions/<SOME-VALUE>/resourceGroups/shared-rg/providers/Microsoft.ContainerService/managedClusters/test-cluster/agentPools/gpnodes" -> (known after apply)
      ~ kubelet_disk_type       = "OS" -> (known after apply)
      - max_count               = 0 -> null
      ~ max_pods                = 30 -> (known after apply)
      - min_count               = 0 -> null
        name                    = "gpnodes"
      - node_taints             = [] -> null
      ~ os_sku                  = "Ubuntu" -> (known after apply)
        tags                    = {
            "Name"        = "test-cluster"
            "developedby" = "jana"
            "environment" = "test"
            "managedby"   = "devops"
        }
      - vnet_subnet_id          = "/subscriptions/<SOME-VALUE>/resourceGroups/shared-rg/providers/Microsoft.Network/virtualNetworks/shared-network/subnets/aks-subnet-test" -> null # forces replacement
        # (15 unchanged attributes hidden)
    }

CodePudding user response:

I was able to fix this issue by setting the vnet_subnet_id in the azurerm_kubernetes_cluster_node_pool as below:

# Node Pool
resource "azurerm_kubernetes_cluster_node_pool" "gp_nodes" {
  . . .
  vnet_subnet_id        = var.vnet_subnet_id
  . . .
}

CodePudding user response:

azurerm_kubernetes_cluster_node_pool usually manages a Node Pool within a Kubernetes Cluster via subnet id under VNet

Here is the code base that I replicated. We can use them in conjunction with vnet and subnet.

data "azurerm_resource_group" "example" {
  name     = "v-swarna-mindtree"
}
data "azuread_client_config" "current" {}


resource "azurerm_virtual_network" "example" {
  name                = "example-network"
  location            = data.azurerm_resource_group.example.location
  resource_group_name = data.azurerm_resource_group.example.name
  address_space       = ["10.0.0.0/16"]
  dns_servers         = ["10.0.0.4", "10.0.0.5"]

  subnet {
    name           = "subnet1"
    address_prefix = "10.0.1.0/24"
  }

 tags = {
    environment = "Production"
  }
}

resource "azurerm_kubernetes_cluster" "cluster" {
   name                    = "swarnademo-cluster"
  location                = data.azurerm_resource_group.example.location
  resource_group_name     = data.azurerm_resource_group.example.name
  dns_prefix              = "swarnaexampleaks"
  kubernetes_version      = "1.22.11"

  default_node_pool {
        name       = "linux"
        node_count = 6
        vm_size    = "Standard_D4s_v3"
        max_pods   = 200
    }
    network_profile {
        network_plugin = "azure"
    }

    service_principal {
        client_id     = "8f12c4f7-5250-4454-90ba-654ac9ead9d2"
        client_secret = "EAX8Q~HXyRRR38q75bOHZpOjyEsQmMmoPx_DJbb7"
    }

}

resource "azurerm_kubernetes_cluster_node_pool" "system" {
    name                  = "sys"
    kubernetes_cluster_id = azurerm_kubernetes_cluster.cluster.id
    vm_size               = "Standard_D2s_v3"
    node_count            = 2
    os_type = "Linux"
}

Provider file as follows:

terraform {
  
      required_version = "~>1.3.3"
      required_providers {
        azurerm = {
           source = "hashicorp/azurerm"
           version = ">=3.0.0"
             }
           }
 }

 provider "azurerm" {
    features {}
    skip_provider_registration = true
}

Step2:
run the bellow commands and deployed the respective infrastructure.

terraform plan 
terraform apply -auto-approve

enter image description here

Step3:
Once again ran the above commands, we can observe that only Subnet will be refresh not cluster node pool
Plan reference
enter image description here

Apply reference:

enter image description here

enter image description here

  • Related