Commit b3b1caf8 authored by Chris Merrett's avatar Chris Merrett

Initial commit

parents
Pipeline #4965 passed with stage
in 19 seconds
.DS_Store
.terraform/
config-map-aws-auth.yaml
kubeconfig
image: alpine:latest
variables:
TERRAFORM_URL: "https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip"
before_script:
- apk update && apk add ca-certificates && update-ca-certificates && apk add openssl
- wget -O /tmp/terraform.zip $TERRAFORM_URL
- unzip /tmp/terraform.zip -d /usr/local/bin
test:
script: terraform init && terraform validate -check-variables=false
resource "aws_eks_cluster" "cluster" {
name = "${var.cluster_name}"
role_arn = "${aws_iam_role.masters.arn}"
vpc_config {
security_group_ids = ["${aws_security_group.masters.id}"]
subnet_ids = ["${var.private_subnets}"]
}
depends_on = [
"aws_iam_role_policy_attachment.masters_cluster_policy",
"aws_iam_role_policy_attachment.masters_service_policy",
]
}
resource "aws_security_group" "masters" {
name = "${var.cluster_name}-eks-masters"
description = "Cluster communication with EKS worker nodes"
vpc_id = "${var.vpc_id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "${var.cluster_name}-eks-masters"
}
}
resource "aws_security_group_rule" "masters_api_allow_from_workers" {
description = "Allow an EKS workers pods to communicate with the EKS cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.masters.id}"
source_security_group_id = "${aws_security_group.workers.id}"
to_port = 443
type = "ingress"
}
resource "aws_iam_role" "masters" {
name = "${var.cluster_name}-eks-masters"
assume_role_policy = "${data.aws_iam_policy_document.masters.json}"
}
resource "aws_iam_role_policy_attachment" "masters_cluster_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.masters.name}"
}
resource "aws_iam_role_policy_attachment" "masters_service_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.masters.name}"
}
data "aws_region" "current" {}
data "aws_iam_policy_document" "masters" {
statement {
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = [
"eks.amazonaws.com",
]
}
}
}
data "aws_ami" "eks_worker" {
filter {
name = "name"
values = ["eks-worker-*"]
}
most_recent = true
owners = ["602401143452"] # Amazon
}
data "aws_iam_policy_document" "workers" {
statement {
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = [
"ec2.amazonaws.com",
]
}
}
}
data "template_file" "kubeconfig" {
template = "${file("${path.module}/templates/kubeconfig.tpl")}"
vars {
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.cluster.endpoint}"
region = "${data.aws_region.current.name}"
cluster_auth_base64 = "${aws_eks_cluster.cluster.certificate_authority.0.data}"
}
}
data "template_file" "config_map_aws_auth" {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
role_arn = "${aws_iam_role.workers.arn}"
}
}
data "template_file" "workers_userdata" {
template = "${file("${path.module}/templates/userdata.tpl")}"
vars {
region = "${data.aws_region.current.name}"
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.cluster.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.cluster.certificate_authority.0.data}"
max_pod_count = "${var.max_pods_per_worker}"
alt_dns_cluster_ip = "${var.alt_dns_cluster_ip}"
additional_userdata = "${var.additional_userdata}"
}
}
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "./kubeconfig"
}
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "./config-map-aws-auth.yaml"
}
resource "null_resource" "configure_kubectl" {
provisioner "local-exec" {
command = "kubectl apply -f ./config-map-aws-auth.yaml --kubeconfig ./kubeconfig"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
}
}
provider "null" {}
provider "template" {}
output "cluster_id" {
description = "The name/id of the EKS cluster"
value = "${aws_eks_cluster.cluster.id}"
}
output "cluster_certificate_authority_data" {
description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster"
value = "${aws_eks_cluster.cluster.certificate_authority.0.data}"
}
output "cluster_endpoint" {
description = "The endpoint for your EKS Kubernetes API"
value = "${aws_eks_cluster.cluster.endpoint}"
}
output "cluster_version" {
description = "The Kubernetes server version for the EKS cluster"
value = "${aws_eks_cluster.cluster.version}"
}
output "masters_sg_id" {
description = "Security group ID attached to the EKS cluster masters"
value = "${aws_security_group.masters.id}"
}
output "workers_sg_id" {
description = "Security group ID attached to the EKS cluster workers"
value = "${aws_security_group.workers.id}"
}
output "worker_iam_role_name" {
description = "IAM role name attached to EKS cluster workers"
value = "${aws_iam_role.workers.name}"
}
output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster."
value = "${data.template_file.config_map_aws_auth.rendered}"
}
output "kubeconfig" {
description = "kubectl config file contents for this EKS cluster."
value = "${data.template_file.kubeconfig.rendered}"
}
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
apiVersion: v1
preferences: {}
kind: Config
clusters:
- cluster:
server: ${endpoint}
certificate-authority-data: ${cluster_auth_base64}
name: ${cluster_name}
contexts:
- context:
cluster: ${cluster_name}
user: ${cluster_name}-aws
name: ${cluster_name}
current-context: ${cluster_name}
users:
- name: ${cluster_name}-aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: heptio-authenticator-aws
args:
- "token"
- "-i"
- "${cluster_name}"
#!/bin/bash -xe
# Certificate Authority config
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${cluster_auth_base64}" | base64 -d >$CA_CERTIFICATE_FILE_PATH
# Authentication
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${region},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,${max_pod_count},g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
# DNS cluster configuration
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]]; then DNS_CLUSTER_IP=${alt_dns_cluster_ip}; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
# start services
systemctl daemon-reload
systemctl restart kubelet kube-proxy
# Install cron
yum -y install crontabs && \
chkconfig crond on && \
service crond start
# Add Steamhaus user
adduser steamhaus && \
passwd -l steamhaus && \
mkdir -p /home/steamhaus/bin && \
mkdir -p /home/steamhaus/.ssh && \
chmod 700 /home/steamhaus/.ssh
# Ensure Steamhaus user has full passwordless root
echo "steamhaus ALL=(ALL)NOPASSWD:ALL" > /etc/sudoers.d/steamhaus && \
chmod 440 /etc/sudoers.d/steamhaus
# Create Steamhaus SSH credential update script
wget --no-check-certificate "https://gist.githubusercontent.com/chrisfu/87b642951aadafa62b99/raw/sh_pubkey_update.sh" -O /home/steamhaus/bin/sh_pubkey_update.sh && \
sed -i 's/`whoami`/steamhaus/g' /home/steamhaus/bin/sh_pubkey_update.sh && \
chmod 755 /home/steamhaus/bin/sh_pubkey_update.sh
# Perform first-time update of Steamhaus SSH credentials
/home/steamhaus/bin/sh_pubkey_update.sh && \
# Add Steamhaus update script to cron, run 1am every morning, if first run succeeds
echo "0 1 * * * /home/steamhaus/bin/sh_pubkey_update.sh" > /var/spool/cron/steamhaus
# Ensure everything within Steamhaus home directory is owned by Steamhaus
chown -R steamhaus: /home/steamhaus/
# Allow user supplied userdata code
${additional_userdata}
variable "cluster_name" {
description = "Name for the EKS cluster"
}
variable "cluster_version" {
description = "Kubernetes version to use for the EKS cluster"
default = "1.10"
}
variable "vpc_id" {
description = "VPC ID to use for cluster - ensure VPC is properly enabled for EKS using tf_mod_aws_vpc"
}
variable "private_subnets" {
description = "Private subnets for the EKS master and workers to be present within - requires the use of NAT gateways!"
type = "list"
}
variable "additional_userdata" {
description = "Additional userdata in bash form to be executed at first boot for EKS workers"
default = ""
}
variable "desired_workers" {
description = "Desired number of EKS worker nodes"
default = 3
}
variable "min_workers" {
description = "Minimum number of EKS worker nodes"
default = 1
}
variable "max_workers" {
description = "Maximum number of EKS worker nodes"
default = 3
}
variable "ebs_optimized_workers" {
description = "Optionally enable EBS optimization for EKS worker nodes"
default = true
}
variable "instance_type" {
description = "Instance type for EKS worker nodes"
default = "m4.large"
}
variable "max_pods_per_worker" {
description = "Maximum number of pods per EKS worker node - see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI"
default = "20"
}
variable "alt_dns_cluster_ip" {
description = "Alternate DNS cluster IP address on different (non 10.x.x.x) range - this is a fallback"
default = "172.20.0.10"
}
resource "aws_launch_configuration" "workers" {
associate_public_ip_address = false
ebs_optimized = "${var.ebs_optimized_workers}"
iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
image_id = "${data.aws_ami.eks_worker.id}"
instance_type = "${var.instance_type}"
name_prefix = "${var.cluster_name}-eks-workers"
security_groups = ["${aws_security_group.workers.id}"]
user_data_base64 = "${base64encode(data.template_file.workers_userdata.rendered)}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "workers" {
desired_capacity = "${var.desired_workers}"
launch_configuration = "${aws_launch_configuration.workers.id}"
max_size = "${var.max_workers}"
min_size = "${var.min_workers}"
name = "${var.cluster_name}-eks-workers"
vpc_zone_identifier = ["${var.private_subnets}"]
tag {
key = "Name"
value = "${var.cluster_name}-eks-workers"
propagate_at_launch = true
}
tag {
key = "kubernetes.io/cluster/${var.cluster_name}"
value = "owned"
propagate_at_launch = true
}
}
resource "aws_security_group" "workers" {
name = "${var.cluster_name}-eks-workers"
description = "Security group for all EKS worker nodes in the cluster"
vpc_id = "${var.vpc_id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = "${
map(
"Name", "${var.cluster_name}-eks-workers",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
)
}"
}
resource "aws_security_group_rule" "workers_ingress" {
description = "Allow EKS worker nodes to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.workers.id}"
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "workers_allow_masters" {
description = "Allow EKS worker node Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.masters.id}"
to_port = 65535
type = "ingress"
}
resource "aws_iam_role" "workers" {
name = "${var.cluster_name}-eks-workers"
assume_role_policy = "${data.aws_iam_policy_document.workers.json}"
}
resource "aws_iam_role_policy_attachment" "workers_node_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_cni_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_ecr_registry" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_instance_profile" "workers" {
name = "${var.cluster_name}-eks-workers-instance-profile"
role = "${aws_iam_role.workers.name}"
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment