You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
k8s-terraform/main.tf

380 lines
15 KiB
HCL

locals {
old-k8s-subnets-ids = [
# module.aws-network-from-scratch.subnet.id,
module.aws-network-existing-managed.subnet-by-name["subnet_1"].id,
module.aws-network-existing-managed.subnet-by-name["subnet_3"].id,
]
new-k8s-subnets-ids = [
module.aws-network-existing-mss-dev.subnet-by-name["mssdev Subnet 1"].id,
module.aws-network-existing-mss-dev.subnet-by-name["mssdev Subnet 3"].id,
]
nfs-subnets = [
# module.aws-network-from-scratch.subnet,
module.aws-network-existing-managed.subnet-by-name["subnet_4"],
]
aws-managed-security-group-id = module.aws-network-existing-managed.default-sg.id
aws-mss-dev-security-group-id = module.aws-network-existing-mss-dev.default-sg.id
# aws-managed-security-group-id = module.aws-network-from-scratch.default-security-group.id
# The names of these nodes is created by:
# "{var.vm-name-prefix}-{name from nodes-config}-{number}"
# The length of this full name must be < 16 characters. This is a limitaion of
# Active Directory? The "realm join" command failed when the name was too
# long, although it did not give the reason for the failure, the reason was
# surmised because it looked like it was truncating the name when attempting
# to join.
#
# NOTE: This naming restriction is only important if you plan on joining these
# VMs to Active Directory.
nodes-config = {
# TODO if the above comment about the name length is true, then this name is
# too long. IMPORTANT! If you change this then you need to change the k8s
# ansible role that assigns roles based on wether or not 'master' is in the
# name.
"k8s-master" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.t2-medium-4gib-2vcpu
subnet-ids = local.old-k8s-subnets-ids
security-groups = [local.aws-managed-security-group-id]
num = 1
},
"k8s-wrkr" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
subnet-ids = local.old-k8s-subnets-ids
security-groups = [local.aws-managed-security-group-id]
disk-size = 32
num = 2
},
"worker" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
subnet-ids = local.old-k8s-subnets-ids
security-groups = [local.aws-managed-security-group-id]
disk-size = 64
num = 3
},
"tst-master" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.t2-medium-4gib-2vcpu
subnet-ids = local.old-k8s-subnets-ids
security-groups = [local.aws-managed-security-group-id]
num = 0
},
"tst-wrkr" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
subnet-ids = local.old-k8s-subnets-ids
security-groups = [local.aws-managed-security-group-id]
disk-size = 32
num = 0
},
"rancher-master" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
subnet-ids = local.new-k8s-subnets-ids
security-groups = [local.aws-mss-dev-security-group-id]
disk-size = 64
num = 1
},
"rancher-worker" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
subnet-ids = local.new-k8s-subnets-ids
security-groups = [local.aws-mss-dev-security-group-id]
disk-size = 64
num = 2
},
"dev" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.c5a-xlarge-amd-8gib-4vcpu
# subnet-ids = [module.aws-network-from-scratch.subnet.id]
subnet-ids = [module.aws-network-existing-managed.subnet-by-name["subnet_4"].id]
security-groups = [local.aws-managed-security-group-id]
disk-size = 32
num = 2
},
"nfs" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.t3a-medium-amd-4gib-2vcpu
# subnet-ids = [module.aws-network-from-scratch.subnet.id]
subnet-ids = [module.aws-network-existing-managed.subnet-by-name["subnet_4"].id]
security-groups = [local.aws-managed-security-group-id]
num = 1
num-disks = 1
# TODO Remove this, prefferred method is to use 'disks' and 'disk-mounts'
# like shown below.
zfs-disk-size = 10
},
"proxy" = {
base-image = module.aws-ami-constants.ubuntu-ami
aws-ec2-type = module.ec2-types.t2-micro-1gib-1vcpu
# subnet-ids = [module.aws-network-from-scratch.subnet.id]
subnet-ids = [module.aws-network-existing-managed.subnet-by-name["subnet_4"].id]
security-groups = [local.aws-managed-security-group-id]
private-ips = [var.aws-proxy-private-ip]
num = 1
},
}
disks = {
"zfs-64g" = {
num = 1,
size = 64
availability_zone = local.nfs-subnets[0].availability_zone
},
"zfs-256g" = {
num = 1,
size = 256
availability_zone = local.nfs-subnets[0].availability_zone
},
}
disk-mounts = [
{
# TODO make this attach field work.
attach = false
ec2-id = module.nodes["nfs"].nodes[0].id,
disk-group = "zfs-64g"
# TODO also make sure that get drive letters or whetever still works. did
# it ever work?
# TODO We need to have a map of drive letters to device path in linux.
# This gets a little more complicated becuase the t2 instances map to
# /dev/xvdg while the t3a instance types map to /dev/nvme0n1 where 0 is
# the nth drive connected. The nth drive does not seem to map to a drive
# letter, ie. drive letter "f" does not map to 6, the nth drive simply
# increments by 1 for each drive attached regardless of the drive letter
# set.
drive-letters = ["g", "h", "i"]
},
{
# TODO make this attach field work.
attach = false
ec2-id = module.nodes["nfs"].nodes[0].id,
disk-group = "zfs-256g"
# TODO also make sure that get drive letters or whetever still works. did
# it ever work?
# TODO We need to have a map of drive letters to device path in linux.
# This gets a little more complicated becuase the t2 instances map to
# /dev/xvdg while the t3a instance types map to /dev/nvme0n1 where 0 is
# the nth drive connected. The nth drive does not seem to map to a drive
# letter, ie. drive letter "f" does not map to 6, the nth drive simply
# increments by 1 for each drive attached regardless of the drive letter
# set.
drive-letters = ["j", "k", "l"]
},
]
# This is only needed for libvirt.
install-qemu-agent = false
}
################################################################################
# cloud-init
################################################################################
module "cloud-init-config" {
for_each = local.nodes-config
source = "./modules/cloud-init-config"
cloud-init-template = "${path.module}/cloud_init.cfg"
hostname-prefix = "${var.vm-name-prefix}-${each.key}"
num = each.value.num
root-admin-passwd = var.root-admin-passwd
root-admin-pub-key = var.root-admin-pub-key
install-qemu-agent = local.install-qemu-agent
}
################################################################################
# aws
# To use the aws module, uncomment the aws modules/resources and comment out the
# libvirt modules/resources.
################################################################################
# This module sets the ec2-types constants.
module "ec2-types" {
source = "./modules/ec2-types"
}
# This module sets the AWS AMI constants.
module "aws-ami-constants" {
source = "./modules/aws-ami-constants"
}
# This module will grab the latest ami for a variety of distros. Uncomment to
# get a list of the latest AMIs for our supported distros.
# module "aws-amis" {
# source = "./modules/aws-amis"
# }
# output "amis" {
# value = module.aws-amis.amis
# }
################################################################################
# AWS Networking
# Use the 2 modules below to create resources for the AWS network.
# aws-network-from-scratch will build the AWS network from scratch.
# aws-network-existing will query AWS for an existing VPC.
################################################################################
# module "aws-network-from-scratch" {
# source = "./modules/aws-network-from-scratch"
# name-prefix = var.vm-name-prefix
# vpc-cidr-block = var.aws-vpc-cidr-block
# subnet-cidr-block = var.aws-subnet-cidr-block
# admin-ips = var.admin-ips
# }
module "aws-network-existing-managed" {
source = "./modules/aws-network-existing"
default-vpc-name = var.aws-existing-managed-vpc-name
default-security-group-name = var.aws-existing-managed-sg-name
existing-subnet-names = var.aws-existing-managed-subnet-names
}
module "aws-network-existing-mss-dev" {
source = "./modules/aws-network-existing"
default-vpc-name = var.aws-existing-mss-dev-vpc-name
default-security-group-name = var.aws-existing-mss-dev-sg-name
existing-subnet-names = var.aws-existing-mss-dev-subnet-names
}
################################################################################
# This key pair is not actually used. Keys are added to the nodes via cloud-init
# instead. We just add this here that this key will show up in the AWS console."
resource "aws_key_pair" "key" {
key_name = "${var.vm-name-prefix}-key}"
public_key = var.root-admin-pub-key
tags = {
Name = "${var.vm-name-prefix}-key"
}
}
module "disks" {
for_each = local.disks
source = "./modules/aws-disks"
availability_zone = each.value.availability_zone
size = each.value.size
num = each.value.num
prefix = each.key
}
module "disk-mounts" {
source = "./modules/aws-disk-mounts"
disks = module.disks[element(local.disk-mounts, count.index).disk-group].disks
ec2-id = element(local.disk-mounts, count.index).ec2-id
drive-letters = try(element(local.disk-mounts, count.index).drive-letters, null)
count = length(local.disk-mounts)
}
# TODO REM remove if unuesed.
resource "aws_ebs_volume" "zfs" {
# TODO REM look at types.
availability_zone = local.nfs-subnets[0].availability_zone
size = local.nodes-config["nfs"].zfs-disk-size
encrypted = true
count = local.nodes-config["nfs"].num-disks
tags = {
Name = "zfs-disk-${count.index}"
}
}
resource "aws_volume_attachment" "mount-nfs-volume" {
device_name = "/dev/sd${element(var.aws-zfs-drive-letters, count.index)}"
instance_id = module.nodes["nfs"].nodes[0].id
count = local.nodes-config["nfs"].num-disks
volume_id = element(aws_ebs_volume.zfs, count.index).id
}
output "zfs-drive-letters" {
value = aws_volume_attachment.mount-nfs-volume.*.device_name
}
module "nodes" {
for_each = local.nodes-config
source = "./modules/aws-nodes"
ec2-instance-type = each.value.aws-ec2-type
ami = each.value.base-image
subnet-ids = each.value.subnet-ids
private-ips = try(each.value.private-ips, [])
security-group-ids = each.value.security-groups
user-datas = lookup(module.cloud-init-config, each.key, null).user-datas
disk-size = try(each.value.disk-size, null)
num-nodes = each.value.num
name-prefix = "${var.vm-name-prefix}-${each.key}"
# TODO add a input for the key so that it will show up as the key in the aws
# console.
}
# TODO an attempt to create a windows machine.
# module "nodes-win" {
# source = "./modules/aws-nodes"
# ec2-instance-type = module.ec2-types.t2-small-2gib-1vcpu
# ami = module.aws-ami-constants.win-srv-2019-ami
# subnet-ids = [module.aws-network-existing-managed.subnet-by-name["subnet_2"].id]
# private-ips = []
# security-group-ids = [local.aws-managed-security-group-id]
# # TODO REM need to figure out how to not pass a user data.
# user-datas = [null]
# num-nodes = 1
# name-prefix = "${var.vm-name-prefix}-win-test"
# }
################################################################################
# end aws
################################################################################
################################################################################
# libvirt
# To use the libvirt module, uncomment the libvirt modules/resources and comment
# out the aws modules/resources.
################################################################################
# provider "libvirt" {
# uri = var.libvirt-connection-url
# }
#
# module "libvirt-images" {
# source = "./modules/libvirt-images"
# }
#
# module "nodes" {
# for_each = local.nodes-config
# source = "./modules/libvirt-nodes"
# pool-name = libvirt_pool.images.name
# name-prefix = "${var.vm-name-prefix}-${each.key}"
# num-nodes = each.value.num
# node-memory = var.node-memory
# node-vcpus = var.node-vcpus
# node-disk-size = var.libvirt-node-disk-size
# base-image = each.value.base-image
# network-name = var.libvirt-network-name
# root-admin-passwd = var.root-admin-passwd
# root-admin-pub-key = var.root-admin-pub-key
# libvirt-connection-url = var.libvirt-connection-url
# user-datas = lookup(module.cloud-init-config, each.key, null).user-datas
# }
#
# resource "libvirt_pool" "images" {
# name = var.disk-image-pool-name
# type = "dir"
# path = var.disk-image-dir
# }
################################################################################
# end libvirt
################################################################################
# This will outpus a map of group => [{hostname, ip}].
# TODO A 'names' output needs to be added to libvirt.
output "groups_hostnames_ips" {
value = { for type, node in module.nodes : type => zipmap(node.names, node.ips) }
}
# This will outpus a map of group => [{hostname, private_ip}].
# TODO Figure out how what to do about private_ips for libvirt.
output "groups_hostnames_private_ips" {
value = { for type, node in module.nodes : type => zipmap(node.names, node.private_ips) }
}