I have 1 deployment and 1 service configurations:
Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashboard-backend-deployment
spec:
replicas: 2
selector:
matchLabels:
app: dashboard-backend
template:
metadata:
labels:
app: dashboard-backend
spec:
containers:
- name: dashboard-backend
image: $BACKEND_IMAGE
imagePullPolicy: Always
env:
- name: NODE_ENV
value: $NODE_ENV
- name: PORT
value: '3000'
- name: ACCESS_TOKEN_JWT_KEY
value: $ACCESS_TOKEN_JWT_KEY
- name: REFRESH_TOKEN_JWT_KEY
value: $REFRESH_TOKEN_JWT_KEY
- name: GOOGLE_OAUTH_CLIENT_ID
value: $GOOGLE_OAUTH_CLIENT_ID
- name: GOOGLE_OAUTH_CLIENT_SECRET
value: $GOOGLE_OAUTH_CLIENT_SECRET
- name: GOOGLE_OAUTH_REDIRECT_URI
value: $GOOGLE_OAUTH_REDIRECT_URI
- name: GH_OAUTH_CLIENT_ID
value: $GH_OAUTH_CLIENT_ID
- name: GH_OAUTH_CLIENT_SECRET
value: $GH_OAUTH_CLIENT_SECRET
- name: GITHUB_OAUTH_REDIRECT_URI
value: $GITHUB_OAUTH_REDIRECT_URI
- name: MIXPANEL_TOKEN
value: $MIXPANEL_TOKEN
- name: FRONTEND_URL
value: $FRONTEND_URL
- name: CLI_TOKEN_JWT_KEY
value: $CLI_TOKEN_JWT_KEY
- name: DATABASE_URL
value: $DATABASE_URL
Service
apiVersion: v1
kind: Service
metadata:
name: backend-service
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: $SSL_CERTIFICATE_ARN
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
spec:
selector:
app: dashboard-backend
type: LoadBalancer
ports:
- name: https
protocol: TCP
port: 3000
targetPort: 3000
I have a cluster, AWS EKS, configured. I run this command:
kubectl apply -f=./k8s/backend-deployment.yaml -f=./k8s/backend-service.yaml
, of course, when kubectl
is "connected" to my AWS EKS cluster.
Output of command:
Using kubectl version: Client Version: v1.26.0
Kustomize Version: v4.5.7
Using aws-iam-authenticator version: {"Version":"0.6.2","Commit":"..."}
deployment.apps/dashboard-backend-deployment unchanged
service/backend-service unchanged
When I enter the load balancers section in EC2
service in AWS, there are no load balancers at all. Why?
These are the Terraform files, I used to deploy my cluster:
eks-cluster:
data "aws_iam_policy_document" "eks_cluster_policy" {
version = "2012-10-17"
statement {
actions = ["sts:AssumeRole"]
effect = "Allow"
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "aws_iam_role" "cluster" {
name = "${var.project}-Cluster-Role"
assume_role_policy = data.aws_iam_policy_document.eks_cluster_policy.json
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-cluster-iam-role",
}
)
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.cluster.name
}
resource "aws_eks_cluster" "main" {
name = "${var.project}-cluster"
role_arn = aws_iam_role.cluster.arn
version = "1.24"
vpc_config {
subnet_ids = flatten([aws_subnet.public[*].id, aws_subnet.private[*].id])
endpoint_private_access = true
endpoint_public_access = true
public_access_cidrs = ["0.0.0.0/0"]
}
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-cluster",
}
)
depends_on = [
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy
]
}
resource "aws_security_group" "eks_cluster" {
name = "${var.project}-cluster-sg"
description = "Cluster communication with worker nodes"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-cluster-sg"
}
)
}
resource "aws_security_group_rule" "cluster_inbound" {
description = "Allow worker nodes to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "cluster_outbound" {
description = "Allow cluster API Server to communicate with the worker nodes"
from_port = 1024
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 65535
type = "egress"
}
EKS WORKER NODES
data "aws_iam_policy_document" "eks_node_policy" {
version = "2012-10-17"
statement {
actions = ["sts:AssumeRole"]
effect = "Allow"
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role" "node" {
name = "${var.project}-Worker-Role"
assume_role_policy = data.aws_iam_policy_document.eks_node_policy.json
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-node-iam-role",
}
)
}
resource "aws_iam_role_policy_attachment" "node_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "node_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "node_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node.name
}
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = var.project
node_role_arn = aws_iam_role.node.arn
subnet_ids = aws_subnet.private[*].id
scaling_config {
desired_size = 1
max_size = 2
min_size = 1
}
ami_type = "AL2_x86_64"
capacity_type = "ON_DEMAND"
disk_size = 20
instance_types = ["t3.small"]
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-node-group",
}
)
depends_on = [
aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly,
]
}
resource "aws_security_group" "eks_nodes" {
name = "${var.project}-node-sg"
description = "Security group for all nodes in the cluster"
vpc_id = aws_vpc.main.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-node-sg"
"kubernetes.io/cluster/${var.project}-cluster" = "owned"
}
)
}
resource "aws_security_group_rule" "nodes_internal" {
description = "Allow nodes to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.eks_nodes.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "nodes_cluster_inbound" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = aws_security_group.eks_nodes.id
source_security_group_id = aws_security_group.eks_cluster.id
to_port = 65535
type = "ingress"
}
VPC
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = merge(
var.tags,
{
Name = "${var.project}-vpc",
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
}
)
}
resource "aws_subnet" "public" {
count = var.availability_zones_count
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, count.index)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = merge(
var.tags,
{
Name = "${var.project}-public-subnet",
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
"kubernetes.io/role/elb" = 1
}
)
map_public_ip_on_launch = true
}
resource "aws_subnet" "private" {
count = var.availability_zones_count
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, count.index var.availability_zones_count)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = merge(
var.tags,
{
Name = "${var.project}-private-sg"
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
)
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-igw",
}
)
depends_on = [aws_vpc.main]
}
resource "aws_route_table" "primary" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
tags = merge(
var.tags,
{
Name = "${var.project}-primary-route-table",
}
)
}
resource "aws_route_table_association" "internet_access" {
count = var.availability_zones_count
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.primary.id
}
resource "aws_eip" "main" {
vpc = true
tags = merge(
var.tags,
{
Name = "${var.project}-ngw-ip"
}
)
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public[0].id
tags = merge(
var.tags,
{
Name = "${var.project}-ngw"
}
)
}
resource "aws_route" "main" {
route_table_id = aws_vpc.main.default_route_table_id
nat_gateway_id = aws_nat_gateway.main.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_security_group" "public_sg" {
name = "${var.project}-Public-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-Public-sg",
}
)
}
resource "aws_security_group_rule" "sg_ingress_public_443" {
security_group_id = aws_security_group.public_sg.id
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "sg_ingress_public_80" {
security_group_id = aws_security_group.public_sg.id
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "sg_egress_public" {
security_group_id = aws_security_group.public_sg.id
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group" "data_plane_sg" {
name = "${var.project}-Worker-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-Worker-sg",
}
)
}
resource "aws_security_group_rule" "nodes" {
description = "Allow nodes to communicate with each other"
security_group_id = aws_security_group.data_plane_sg.id
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 0), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 1), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "nodes_inbound" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
security_group_id = aws_security_group.data_plane_sg.id
type = "ingress"
from_port = 1025
to_port = 65535
protocol = "tcp"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "node_outbound" {
security_group_id = aws_security_group.data_plane_sg.id
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group" "control_plane_sg" {
name = "${var.project}-ControlPlane-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-ControlPlane-sg",
}
)
}
resource "aws_security_group_rule" "control_plane_inbound" {
security_group_id = aws_security_group.control_plane_sg.id
type = "ingress"
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 0), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 1), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "control_plane_outbound" {
security_group_id = aws_security_group.control_plane_sg.id
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
There are more files of course, but not sure they are relevant.
CodePudding user response:
You need to install aws-load-balancer-controller
.
CodePudding user response:
In order to create Loadbalancers automatically on the basis of service and ingress you need to deploy aws-load-balancer-controller
in your EKS cluster.
AWS also has official documentation at here.
! Disclaimer !! I have created the below-mentioned repo.
You can also find terraform code to deploy and use the aws-load-balancer-controller
in this ishuar/terraform-eks GitHub Repo along with EKS module for practical referencing.