DEV Community

Cover image for Create an EKS Cluster Using Terraform
Ravindra Singh for AWS Community Builders

Posted on • Updated on

Create an EKS Cluster Using Terraform

In this blog, we’ll explore how to create an EKS cluster using a Terraform module, including setting up a node group, , ECR, ACM, and other core components.

Amazon EKS (Elastic Kubernetes Service) provides a managed Kubernetes service that makes it easy to run Kubernetes on AWS without needing to manage your own control plane.

GIT LINK: https://github.com/ravindrasinghh/Kubernetes-Playlist

Prerequisites
Before you begin, ensure that you have the following tools installed:

  • Terraform: Install Terraform by following the official installation guide.
  • AWS CLI: Install and configure the AWS CLI by following the AWS CLI installation guide.
  • kubectl: Install kubectl for interacting with your EKS cluster by following the kubectl installation guide.

Let’s Begin😎
👉🏻** What are Terraform modules?**
A Terraform module is a set of organized configuration files within a specific directory. These modules bundle together resources focused on a particular task, helping to minimize the amount of code you need to write for similar infrastructure components.

Here is the structure for that.

Step1: Clone the Repository
🧑🏻‍💻git clone https://github.com/ravindrasinghh/Kubernetes-Playlist.git
👨🏻‍💻cd Kubernetes-Playlist/Lesson1/

Step 2: Initialize Terraform

  1. terraform init

  2. terraform plan

  3. terraform apply

👉🏻acm.tf

module "acm_backend" {
  source      = "terraform-aws-modules/acm/aws"
  version     = "4.0.1"
  domain_name = "codedevops.cloud"
  subject_alternative_names = [
    "*.codedevops.cloud"
  ]
  zone_id             = data.aws_route53_zone.main.id
  validation_method   = "DNS"
  wait_for_validation = true
  tags = {
    Name = "${local.project}-${local.env}-backend-validation"
  }
}

data "aws_route53_zone" "main" {
  name = "codedevops.cloud." # Ensure the domain name ends with a dot

}
Enter fullscreen mode Exit fullscreen mode

👉🏻data.tf

data "aws_caller_identity" "current" {}
Enter fullscreen mode Exit fullscreen mode

👉🏻ecr.tf

resource "aws_ecr_repository" "foo" {
  for_each             = toset(local.ecr_names)
  name                 = each.key
  image_tag_mutability = "MUTABLE"
  tags = {
    Name = "${local.project}-${local.env}-ecr"
  }
}
Enter fullscreen mode Exit fullscreen mode

👉🏻eks.tf


module "eks" {
  source                                 = "terraform-aws-modules/eks/aws"
  version                                = "20.13.1"
  cluster_name                           = local.cluster_name
  cluster_version                        = local.cluster_version
  cluster_enabled_log_types              = local.cluster_enabled_log_types
  cloudwatch_log_group_retention_in_days = 30
  cluster_endpoint_public_access         = true

  cluster_addons = {
    coredns = {
      most_recent                 = true
      resolve_conflicts_on_create = "OVERWRITE"
      configuration_values        = jsonencode(local.coredns_config)
    }
    kube-proxy = {
      most_recent = true
    }
    vpc-cni = {
      most_recent              = true
      service_account_role_arn = aws_iam_role.vpc_cni.arn
    }
  }

  vpc_id     = local.vpc_id
  subnet_ids = local.public_subnet_ids
  eks_managed_node_group_defaults = {
    ## This instance type (m6a.large) is a placeholder and will not be used in the actual deployment.

  }

  eks_managed_node_groups = local.eks_managed_node_groups

  cluster_security_group_additional_rules = local.cluster_security_group_additional_rules

  enable_cluster_creator_admin_permissions = false

  access_entries = {
    for k in local.eks_access_entries : k.username => {
      kubernetes_groups = []
      principal_arn     = k.username
      policy_associations = {
        single = {
          policy_arn = k.access_policy
          access_scope = {
            type = "cluster"
          }
        }
      }
    }
  }
  tags = local.default_tags
}
#Role for vpc cni
resource "aws_iam_role" "vpc_cni" {
  name               = "${local.prefix}-vpc-cni"
  assume_role_policy = <<EOF
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Federated": "${module.eks.oidc_provider_arn}"
      },
      "Action": "sts:AssumeRoleWithWebIdentity",
      "Condition": {
        "StringEquals": {
          "${module.eks.oidc_provider}:sub": "system:serviceaccount:kube-system:aws-node"
        }
      }
    }
  ]
}
EOF
}
resource "aws_iam_role_policy_attachment" "vpc_cni" {
  role       = aws_iam_role.vpc_cni.name
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"

}
Enter fullscreen mode Exit fullscreen mode

👉🏻locals.tf


data "aws_eks_cluster_auth" "eks" {
  name = module.eks.cluster_name
}

locals {
  environment                             = terraform.workspace
  k8s_info                                = lookup(var.environments, local.environment)
  cluster_name                            = lookup(local.k8s_info, "cluster_name")
  region                                  = lookup(local.k8s_info, "region")
  env                                     = lookup(local.k8s_info, "env")
  vpc_id                                  = lookup(local.k8s_info, "vpc_id")
  vpc_cidr                                = lookup(local.k8s_info, "vpc_cidr")
  public_subnet_ids                       = lookup(local.k8s_info, "public_subnet_ids")
  cluster_version                         = lookup(local.k8s_info, "cluster_version")
  cluster_enabled_log_types               = lookup(local.k8s_info, "cluster_enabled_log_types")
  eks_managed_node_groups                 = lookup(local.k8s_info, "eks_managed_node_groups")
  cluster_security_group_additional_rules = lookup(local.k8s_info, "cluster_security_group_additional_rules")
  coredns_config                          = lookup(local.k8s_info, "coredns_config")
  ecr_names                               = lookup(local.k8s_info, "ecr_names")

  prefix             = "${local.project}-${local.environment}-${var.region}"
  eks_access_entries = flatten([for k, v in local.k8s_info.eks_access_entries : [for s in v.user_arn : { username = s, access_policy = lookup(local.eks_access_policy, k), group = k }]])

  eks_access_policy = {
    viewer = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy",
    admin  = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
  }
  project    = "codedevops"
  account_id = data.aws_caller_identity.current.account_id
  default_tags = {
    environment = local.environment
    managed_by  = "terraform"
    project     = local.project
  }
}
Enter fullscreen mode Exit fullscreen mode

👉🏻output.tf

output "cluster_name" {
  value       = module.eks.cluster_name
  description = "The name of the created EKS cluster."
}

output "cluster_version" {
  value       = module.eks.cluster_version
  description = "The version of Kubernetes running on the EKS cluster."
}

output "cluster_endpoint" {
  value       = module.eks.cluster_endpoint
  description = "The endpoint for the EKS Kubernetes API server."
}

output "access_entries" {
  value = module.eks.access_entries
}

output "oidc_provider" {
  value = module.eks.oidc_provider
}

output "oidc_provider_arn" {
  value = module.eks.oidc_provider_arn

}
output "acm_certificate_arn" {
  value = module.acm_backend.acm_certificate_arn

}
Enter fullscreen mode Exit fullscreen mode

👉🏻provider.tf

terraform {
  backend "s3" {
    bucket = "devsecops-backend-codedevops"
    key    = "secops-dev.tfstae"
    region = "ap-south-1"
  }
}
terraform {
  required_version = ">= 0.15.0"
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = ">= 4.29.0"
    }
    random = {
      source  = "hashicorp/random"
      version = ">= 3.6.0"
    }
    template = {
      source  = "hashicorp/template"
      version = ">= 2.2.0"
    }
  }
}
provider "aws" {
  region              = var.region
  allowed_account_ids = [434605749312]

  default_tags {
    tags = local.default_tags
  }
}
provider "kubernetes" {
  host                   = module.eks.cluster_endpoint
  cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
  token                  = data.aws_eks_cluster_auth.eks.token

}
provider "helm" {
  kubernetes {
    host                   = module.eks.cluster_endpoint
    cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
    token                  = data.aws_eks_cluster_auth.eks.token
  }
}
Enter fullscreen mode Exit fullscreen mode

👉🏻terraform.tfvars

environments = {
  default = {
    # Global variables
    cluster_name                   = "codedevops-cluster"
    env                            = "default"
    region                         = "ap-south-1"
    vpc_id                         = "vpc-02af529e05c41b6bb"
    vpc_cidr                       = "10.0.0.0/16"
    public_subnet_ids              = ["subnet-09aeb297a112767b2", "subnet-0e25e76fb4326ce99"]
    cluster_version                = "1.29"
    cluster_endpoint_public_access = true
    ecr_names                      = ["codedevops"]

    # EKS variables
    eks_managed_node_groups = {
      generalworkload-v4 = {
        min_size       = 1
        max_size       = 1
        desired_size   = 1
        instance_types = ["m5a.xlarge"]
        capacity_type  = "SPOT"
        disk_size      = 60
        ebs_optimized  = true
        iam_role_additional_policies = {
          ssm_access        = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
          cloudwatch_access = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
          service_role_ssm  = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
          default_policy    = "arn:aws:iam::aws:policy/AmazonSSMManagedEC2InstanceDefaultPolicy"
        }
      }
    }
    cluster_security_group_additional_rules = {}

    # EKS Cluster Logging
    cluster_enabled_log_types = ["audit"]
    eks_access_entries = {
      viewer = {
        user_arn = []
      }
      admin = {
        user_arn = ["arn:aws:iam::434605749312:root"]
      }
    }
    # EKS Addons variables 
    coredns_config = {
      replicaCount = 1
    }
  }

}
Enter fullscreen mode Exit fullscreen mode

👉🏻variables.tf

variable "region" {
  type        = any
  default     = "ap-south-1"
  description = "value of the region where the resources will be created"
}

variable "environments" {
  type = any

  description = "The environment configuration"
}
Enter fullscreen mode Exit fullscreen mode

Step 3: Access Your EKS Cluster
Once the cluster is created, you can configure kubectl to interact with your EKS cluster using the following command:

aws eks --region update-kubeconfig --name

You can then verify the cluster connectivity:

kubectl get nodes

Troubleshooting
If you encounter any issues, refer to the Terraform documentation or raise an issue in this repository.

🏴‍☠️
source link: https://github.com/ravindrasinghh/Kubernetes-Playlist/tree/master

If you prefer a video tutorial to help guide you to create the EKS Cluster Using Terraform

Top comments (0)