In this post I’ll show you how to setup Workload Identity in an AKS cluster using terraform and then deploy a pod with Azure CLI that you will use to login to Azure.

Long story short: once workload identity is configured and enabled, kubernetes will inject 3 environment variables needed to login with Azure CLI:

  • AZURE_FEDERATED_TOKEN_FILE
  • AZURE_CLIENT_ID
  • AZURE_TENANT_ID

And then you can login with Azure CLI using the following command:

1az login --federated-token "$(cat  $AZURE_FEDERATED_TOKEN_FILE)" --service-principal -u $AZURE_CLIENT_ID -t $AZURE_TENANT_ID

Now let’s go step by step:

Using Terraform to create an AKS cluster with workload identity

Create variables.tf with the following contents

 1# Resource Group Name
 2variable "resource_group_name" {
 3  default = "aks-workload-identity"
 4}
 5
 6# Location of the services
 7variable "location" {
 8  default = "West Europe"
 9}
10
11# Name of the AKS cluster
12variable "cluster_name" {
13  default = "aks-cfm"
14}
15
16# DNS prefix for the AKS cluster
17variable "dns_prefix" {
18  default = "aks-cfm"
19}
20
21# Log Analytics Workspace Name
22variable "log_workspace_name" {
23  default = "aks-cfm-logs"
24}
25
26# Managed Identity Name
27variable "managed_identity_name" {
28  default = "aks-workload-identity"
29}

Create providers.tf with the following contents

 1terraform {
 2  required_providers {
 3    azurerm = {
 4      source  = "hashicorp/azurerm"
 5      version = "3.44.1"
 6    }
 7    kubernetes = {
 8      source  = "hashicorp/kubernetes"
 9      version = "2.18.0"
10    }
11  }
12}
13
14terraform {
15  required_version = ">= 1.1.8"
16}
17
18provider "azurerm" {
19  features {}
20}
21
22# Configuring the kubernetes provider
23provider "kubernetes" {
24  host                   = azurerm_kubernetes_cluster.k8s.kube_config.0.host
25  client_certificate     = base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate)
26  client_key             = base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.client_key)
27  cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.k8s.kube_config.0.cluster_ca_certificate)
28}

Create main.tf with the following contents

1data "azurerm_subscription" "current" {}
2
3data "azurerm_client_config" "current" {}
4
5# Create Resource Group
6resource "azurerm_resource_group" "rg" {
7  name     = var.resource_group_name
8  location = var.location
9}

Create mi.tf with the following contents

 1# Create Managed Identity
 2resource "azurerm_user_assigned_identity" "mi" {
 3  resource_group_name = azurerm_resource_group.rg.name
 4  location            = azurerm_resource_group.rg.location
 5
 6  name = var.managed_identity_name
 7}
 8
 9# Assign the Reader role to the Managed Identity
10resource "azurerm_role_assignment" "reader" {
11  scope                = azurerm_resource_group.rg.id
12  role_definition_name = "Reader"
13  principal_id         = azurerm_user_assigned_identity.mi.principal_id
14}
15
16# Associate the Managed Identity with the AKS cluster
17resource "azurerm_federated_identity_credential" "federation" {
18  name                = "aks-workload-identity"
19  resource_group_name = azurerm_resource_group.rg.name
20  audience            = ["api://AzureADTokenExchange"]
21  issuer              = azurerm_kubernetes_cluster.k8s.oidc_issuer_url
22  parent_id           = azurerm_user_assigned_identity.mi.id
23  subject             = "system:serviceaccount:default:workload-identity-test-account"
24}

Note: A federated identity credential is created to associate the managed identity with the workload-identity-test-account Service Account inside the AKS cluster.

Create log_analytics.tf with the following contents

1resource "azurerm_log_analytics_workspace" "logs" {
2  name                = var.log_workspace_name
3  location            = azurerm_resource_group.rg.location
4  resource_group_name = azurerm_resource_group.rg.name
5  sku                 = "PerGB2018"
6  retention_in_days   = 30
7}

Create vnet.tf with the following contents

 1resource "azurerm_virtual_network" "vnet" {
 2  name                = "aks-vnet"
 3  location            = azurerm_resource_group.rg.location
 4  resource_group_name = azurerm_resource_group.rg.name
 5  address_space       = ["10.0.0.0/16"]
 6}
 7
 8resource "azurerm_subnet" "aks-subnet" {
 9  name                 = "aks-subnet"
10  resource_group_name  = azurerm_resource_group.rg.name
11  virtual_network_name = azurerm_virtual_network.vnet.name
12  address_prefixes     = ["10.0.1.0/24"]
13}

Create aks.tf with the following contents

 1# Deploy Kubernetes
 2resource "azurerm_kubernetes_cluster" "k8s" {
 3  name                      = var.cluster_name
 4  location                  = azurerm_resource_group.rg.location
 5  resource_group_name       = azurerm_resource_group.rg.name
 6  dns_prefix                = var.dns_prefix
 7  oidc_issuer_enabled       = true
 8  workload_identity_enabled = true
 9  role_based_access_control_enabled = true
10  oms_agent {
11    log_analytics_workspace_id = azurerm_log_analytics_workspace.logs.id
12  }
13
14  default_node_pool {
15    name                = "default"
16    node_count          = 2
17    vm_size             = "Standard_D2s_v3"
18    os_disk_size_gb     = 30
19    os_disk_type        = "Ephemeral"
20    vnet_subnet_id      = azurerm_subnet.aks-subnet.id
21    max_pods            = 15
22    enable_auto_scaling = false
23  }
24
25  # Using Managed Identity
26  identity {
27    type = "SystemAssigned"
28  }
29
30  network_profile {
31    # The --service-cidr is used to assign internal services in the AKS cluster an IP address. This IP address range should be an address space that isn't in use elsewhere in your network environment, including any on-premises network ranges if you connect, or plan to connect, your Azure virtual networks using Express Route or a Site-to-Site VPN connection.
32    service_cidr = "172.0.0.0/16"
33    # The --dns-service-ip address should be the .10 address of your service IP address range.
34    dns_service_ip = "172.0.0.10"
35    # The --docker-bridge-address lets the AKS nodes communicate with the underlying management platform. This IP address must not be within the virtual network IP address range of your cluster, and shouldn't overlap with other address ranges in use on your network.
36    docker_bridge_cidr = "172.17.0.1/16"
37    network_plugin     = "azure"
38    network_policy     = "calico"
39  }
40}
41
42data "azurerm_resource_group" "node_resource_group" {
43  name = azurerm_kubernetes_cluster.k8s.node_resource_group
44}
45
46# Assign the Contributor role to the AKS kubelet identity
47resource "azurerm_role_assignment" "kubelet_contributor" {
48  scope                = data.azurerm_resource_group.node_resource_group.id
49  role_definition_name = "Contributor" #"Virtual Machine Contributor"?
50  principal_id         = azurerm_kubernetes_cluster.k8s.kubelet_identity[0].object_id
51}
52
53resource "azurerm_role_assignment" "kubelet_network_contributor" {
54  scope                = azurerm_virtual_network.vnet.id
55  role_definition_name = "Network Contributor"
56  principal_id         = azurerm_kubernetes_cluster.k8s.identity[0].principal_id
57}
58
59# Create Service Account with the azure.workload.identity/client-id annotation
60resource "kubernetes_service_account" "default" {
61  metadata {
62    name      = "workload-identity-test-account"
63    namespace = "default"
64    annotations = {
65      "azure.workload.identity/client-id" = azurerm_user_assigned_identity.mi.client_id
66    }
67    labels = {
68      "azure.workload.identity/use" : "true"
69    }
70  }
71}

Note: The cluster has the oidc_issuer_enabled set to true and the service account is associated with the managed identity created in the previous step via the azure.workload.identity/client-id annotation.

Deploy the cluster

Run the following command:

1terraform init
2terraform apply -auto-approve

Deploy a pod with Azure CLI

Create a pod.yaml with the following contents

 1apiVersion: v1
 2kind: Pod
 3metadata:
 4  name: az-cli
 5  namespace: default
 6  labels:
 7    azure.workload.identity/use: "true"
 8spec:
 9  serviceAccountName: workload-identity-test-account
10  containers:
11    - name: az-cli
12      image: mcr.microsoft.com/azure-cli
13      ports:
14        - containerPort: 80
15      command:
16          - sh
17          - -c
18          - sleep 1d

Note: The pod is using the service account created in the previous step and the label azure.workload.identity/use is set to true to enable workload identity.

Deploy the Azure CLI deployment

Run the following commands:

1az aks get-credentials --resource-group aks-workload-identity --name aks-cfm
2kubectl apply -f pod.yaml

Test Azure CLI with Workload Identity

Exec into the pod

Run the following command:

1kubectl exec -it az-cli -- /bin/bash

Login with Azure CLI

Run the following command:

1az login --federated-token "$(cat  $AZURE_FEDERATED_TOKEN_FILE)" --service-principal -u $AZURE_CLIENT_ID -t $AZURE_TENANT_ID

Note: Once workload identity is enabled, kubernetes will inject the 3 environment variables needed to login with Azure CLI.

If everything went ok, you should be able to work with Azure CLI without any issues.

Please find the complete samples here

Hope it helps!

References: