export PS1="\n[\u@\h \W]$ "
cd /home/ssm-user
sudo su
export AWS_REGION=ap-southeast-1
export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')
export CLUSTER_NAME=eks-test
export NODEGROUP_NAME=$CLUSTER_NAME-nodegroup
export EKS_VERSION=1.20
export TOMCAT_DOWNLOAD_URL=https://github.com/xuemark/workshop-eks-new/raw/master/tomcat.tar.gz
export AWS_ACCOUNTID=$(aws sts get-caller-identity| jq -r '.Account')
export ECR_URL=$AWS_ACCOUNTID.dkr.ecr.$AWS_REGION.amazonaws.com/mytomcat:1
export S3_BUCKET=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
echo -e "S3 BUCKET:\n$S3_BUCKET"
Add ENV to bash_profile
cd /home/ssm-user
sudo su
export AWS_ACCOUNTID=$(aws sts get-caller-identity| jq -r '.Account')
export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')
cat <<EOF > ~/.bash_profile
export PS1="\n[\u@\h \W]$ "
export AWS_REGION=$AWS_REGION
export CLUSTER_NAME=eks-test
export NODEGROUP_NAME=nodegroup
export EKS_VERSION=1.20
export TOMCAT_DOWNLOAD_URL=https://github.com/xuemark/workshop-eks-new/raw/master/tomcat.tar.gz
export AWS_ACCOUNTID=$AWS_ACCOUNTID
export ECR_URL=$AWS_ACCOUNTID.dkr.ecr.$AWS_REGION.amazonaws.com/mytomcat:1
export S3_BUCKET=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
EOF
source ~/.bash_profile
Create EKS Nodegroup Script - X86
eksctl create nodegroup \
--cluster ${CLUSTER_NAME} \
--region ${AWS_REGION} \
--version ${EKS_VERSION} \
--name ${NODEGROUP_NAME} \
--instance-name ${CLUSTER_NAME}-${NODEGROUP_NAME}-managed \
--node-type t3.medium \
--node-volume-size 10 \
--nodes 2 \
--nodes-min 2 \
--nodes-max 5 \
--node-private-networking \
--alb-ingress-access \
--managed \
--asg-access \
--full-ecr-access
(optional) create EKS cluster
eksctl create cluster --name $CLUSTER_NAME --region $AWS_REGION --version $EKS_VERSION --without-nodegroup --vpc-private-subnets <PrivateSubnet1>,<PrivateSubnet2>,<PrivateSubnet3>
(optional) Create EKS Nodegroup Script - ARM
eksctl utils update-coredns --approve --cluster ${CLUSTER_NAME} --region ${AWS_REGION}
eksctl utils update-kube-proxy --approve --cluster ${CLUSTER_NAME} --region ${AWS_REGION}
eksctl utils update-aws-node --approve --cluster ${CLUSTER_NAME} --region ${AWS_REGION}
eksctl create nodegroup \
--cluster ${CLUSTER_NAME} \
--region ${AWS_REGION} \
--version ${EKS_VERSION} \
--name ${NODEGROUP_NAME}-arm \
--instance-name ${CLUSTER_NAME}-${NODEGROUP_NAME}-arm-managed \
--node-type t4g.medium \
--node-volume-size 10 \
--nodes 2 \
--nodes-min 2 \
--nodes-max 5 \
--node-private-networking \
--alb-ingress-access \
--managed \
--asg-access \
--full-ecr-access
aws eks --region ${AWS_REGION} update-kubeconfig --name ${CLUSTER_NAME}
kubectl get node
output
NAME STATUS ROLES AGE VERSION
ip-10-10-5-168.ap-southeast-1.compute.internal Ready <none> 2m4s v1.18.8-eks-7c9bda
ip-10-10-6-53.ap-southeast-1.compute.internal Ready <none> 2m14s v1.18.8-eks-7c9bda
SECURITY_GROUP_ID=$(aws ec2 describe-security-groups --filters Name=tag:aws:eks:cluster-name,Values=$CLUSTER_NAME --region $AWS_REGION | jq -r '.SecurityGroups[].GroupId')
VPC_ID=$(aws eks describe-cluster --name $CLUSTER_NAME --region $AWS_REGION | jq -r '.cluster.resourcesVpcConfig.vpcId')
VPC_CIDR=$(aws ec2 describe-vpcs --vpc-ids $VPC_ID --region $AWS_REGION |jq -r '.Vpcs[].CidrBlock')
aws ec2 authorize-security-group-ingress \
--group-id $SECURITY_GROUP_ID \
--region $AWS_REGION \
--protocol tcp \
--port 80 \
--cidr $VPC_CIDR
aws ec2 authorize-security-group-ingress \
--group-id $SECURITY_GROUP_ID \
--region $AWS_REGION \
--protocol tcp \
--port 443 \
--cidr $VPC_CIDR
# NodePort
aws ec2 authorize-security-group-ingress \
--group-id $SECURITY_GROUP_ID \
--region $AWS_REGION \
--protocol tcp \
--port 30000-32767 \
--cidr $VPC_CIDR
aws ec2 authorize-security-group-ingress \
--group-id $SECURITY_GROUP_ID \
--region $AWS_REGION \
--protocol tcp \
--port 30000-32767 \
--cidr 0.0.0.0/0