@ -1,15 +1,172 @@
locals {
old - k8s - subnets - ids = [
# module . aws - network - from - scratch . subnet . id ,
module . aws - network - existing - managed . subnet - by - name [ " subnet_1 " ] . id ,
module . aws - network - existing - managed . subnet - by - name [ " subnet_3 " ] . id ,
]
new - k8s - subnets - ids = [
module . aws - network - existing - mss - dev . subnet - by - name [ " mssdev Subnet 1 " ] . id ,
module . aws - network - existing - mss - dev . subnet - by - name [ " mssdev Subnet 3 " ] . id ,
]
nfs - subnets = [
# module . aws - network - from - scratch . subnet ,
module . aws - network - existing - managed . subnet - by - name [ " subnet_4 " ] ,
]
aws - managed - security - group - id = module . aws - network - existing - managed . default - sg . id
aws - mss - dev - security - group - id = module . aws - network - existing - mss - dev . default - sg . id
# aws - managed - security - group - id = module . aws - network - from - scratch . default - security - group . id
# The names of these nodes is created by :
# " {var.vm-name-prefix}-{name from nodes-config}-{number} "
# The length of this full name must be < 16 characters . This is a limitaion of
# Active Directory ? The " realm join " command failed when the name was too
# long , although it did not give the reason for the failure , the reason was
# surmised because it looked like it was truncating the name when attempting
# to join .
#
# NOTE : This naming restriction is only important if you plan on joining these
# VMs to Active Directory .
nodes - config = {
" master " = {
base - image = var . ubuntu - ami
num = 1
# TODO if the above comment about the name length is true , then this name is
# too long . IMPORTANT ! If you change this then you need to change the k8s
# ansible role that assigns roles based on wether or not ' master ' is in the
# name .
" k8s-master " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . t2 - medium -4 gib -2 vcpu
subnet - ids = local . old - k8s - subnets - ids
security - groups = [ local . aws - managed - security - group - id ]
num = 1
} ,
" k8s-wrkr " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
subnet - ids = local . old - k8s - subnets - ids
security - groups = [ local . aws - managed - security - group - id ]
disk - size = 32
num = 2
} ,
" worker " = {
base - image = var . ubuntu - ami
num = 2
}
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
subnet - ids = local . old - k8s - subnets - ids
security - groups = [ local . aws - managed - security - group - id ]
disk - size = 64
num = 3
} ,
" tst-master " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . t2 - medium -4 gib -2 vcpu
subnet - ids = local . old - k8s - subnets - ids
security - groups = [ local . aws - managed - security - group - id ]
num = 0
} ,
" tst-wrkr " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
subnet - ids = local . old - k8s - subnets - ids
security - groups = [ local . aws - managed - security - group - id ]
disk - size = 32
num = 0
} ,
" rancher-master " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
subnet - ids = local . new - k8s - subnets - ids
security - groups = [ local . aws - mss - dev - security - group - id ]
disk - size = 64
num = 1
} ,
" rancher-worker " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
subnet - ids = local . new - k8s - subnets - ids
security - groups = [ local . aws - mss - dev - security - group - id ]
disk - size = 64
num = 2
} ,
" dev " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . c5a - xlarge - amd -8 gib -4 vcpu
# subnet - ids = [ module . aws - network - from - scratch . subnet . id ]
subnet - ids = [ module . aws - network - existing - managed . subnet - by - name [ " subnet_4 " ] . id ]
security - groups = [ local . aws - managed - security - group - id ]
disk - size = 32
num = 2
} ,
" nfs " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . t3a - medium - amd -4 gib -2 vcpu
# subnet - ids = [ module . aws - network - from - scratch . subnet . id ]
subnet - ids = [ module . aws - network - existing - managed . subnet - by - name [ " subnet_4 " ] . id ]
security - groups = [ local . aws - managed - security - group - id ]
num = 1
num - disks = 1
# TODO Remove this , prefferred method is to use ' disks ' and ' disk - mounts '
# like shown below .
zfs - disk - size = 10
} ,
" proxy " = {
base - image = module . aws - ami - constants . ubuntu - ami
aws - ec2 - type = module . ec2 - types . t2 - micro -1 gib -1 vcpu
# subnet - ids = [ module . aws - network - from - scratch . subnet . id ]
subnet - ids = [ module . aws - network - existing - managed . subnet - by - name [ " subnet_4 " ] . id ]
security - groups = [ local . aws - managed - security - group - id ]
private - ips = [ var . aws - proxy - private - ip ]
num = 1
} ,
}
disks = {
" zfs-64g " = {
num = 1 ,
size = 64
availability_zone = local . nfs - subnets [ 0 ] . availability_zone
} ,
" zfs-256g " = {
num = 1 ,
size = 256
availability_zone = local . nfs - subnets [ 0 ] . availability_zone
} ,
}
disk - mounts = [
{
# TODO make this attach field work .
attach = false
ec2 - id = module . nodes [ " nfs " ] . nodes [ 0 ] . id ,
disk - group = " zfs-64g "
# TODO also make sure that get drive letters or whetever still works . did
# it ever work ?
# TODO We need to have a map of drive letters to device path in linux .
# This gets a little more complicated becuase the t2 instances map to
# / dev / xvdg while the t3a instance types map to / dev / nvme0n1 where 0 is
# the nth drive connected . The nth drive does not seem to map to a drive
# letter , ie . drive letter " f " does not map to 6 , the nth drive simply
# increments by 1 for each drive attached regardless of the drive letter
# set .
drive - letters = [ " g " , " h " , " i " ]
} ,
{
# TODO make this attach field work .
attach = false
ec2 - id = module . nodes [ " nfs " ] . nodes [ 0 ] . id ,
disk - group = " zfs-256g "
# TODO also make sure that get drive letters or whetever still works . did
# it ever work ?
# TODO We need to have a map of drive letters to device path in linux .
# This gets a little more complicated becuase the t2 instances map to
# / dev / xvdg while the t3a instance types map to / dev / nvme0n1 where 0 is
# the nth drive connected . The nth drive does not seem to map to a drive
# letter , ie . drive letter " f " does not map to 6 , the nth drive simply
# increments by 1 for each drive attached regardless of the drive letter
# set .
drive - letters = [ " j " , " k " , " l " ]
} ,
]
# This is only needed for libvirt .
install - qemu - agent = false
}
@ -34,6 +191,16 @@ module "cloud-init-config" {
# libvirt module s / resources .
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This module sets the ec2 - types constants .
module " ec2-types " {
source = " ./modules/ec2-types "
}
# This module sets the AWS AMI constants .
module " aws-ami-constants " {
source = " ./modules/aws-ami-constants "
}
# This module will grab the latest ami for a var iety of distros . Uncomment to
# get a list of the latest AMIs for our supported distros .
# module " aws-amis " {
@ -45,7 +212,7 @@ module "cloud-init-config" {
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# AWS Networking
# Use of the 2 module s below to create resources for the AWS network .
# Use the 2 module s below to create resources for the AWS network .
# aws - network - from - scratch will build the AWS network from scratch .
# aws - network - existing will query AWS for an existing VPC .
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@ -58,10 +225,18 @@ module "cloud-init-config" {
# admin - ips = var . admin - ips
# }
module " aws-network-existing " {
module " aws-network-existing-managed " {
source = " ./modules/aws-network-existing "
default - vpc - name = var . aws - existing - managed - vpc - name
default - security - group - name = var . aws - existing - managed - sg - name
existing - subnet - names = var . aws - existing - managed - subnet - names
}
module " aws-network-existing-mss-dev " {
source = " ./modules/aws-network-existing "
default - vpc - name = var . aws - existing - vpc - name
default - security - group - name = var . aws - existing - sg - name
default - vpc - name = var . aws - existing - mss - dev - vpc - name
default - security - group - name = var . aws - existing - mss - dev - sg - name
existing - subnet - names = var . aws - existing - mss - dev - subnet - names
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@ -76,18 +251,76 @@ resource "aws_key_pair" "key" {
}
}
module " disks " {
for_each = local . disks
source = " ./modules/aws-disks "
availability_zone = each . value . availability_zone
size = each . value . size
num = each . value . num
prefix = each . key
}
module " disk-mounts " {
source = " ./modules/aws-disk-mounts "
disks = module . disks [ element ( local . disk - mounts , count . index ) . disk - group ] . disks
ec2 - id = element ( local . disk - mounts , count . index ) . ec2 - id
drive - letters = try ( element ( local . disk - mounts , count . index ) . drive - letters , null )
count = length ( local . disk - mounts )
}
# TODO REM remove if unuesed .
resource " aws_ebs_volume " " zfs " {
# TODO REM look at types .
availability_zone = local . nfs - subnets [ 0 ] . availability_zone
size = local . nodes - config [ " nfs " ] . zfs - disk - size
encrypted = true
count = local . nodes - config [ " nfs " ] . num - disks
tags = {
Name = " zfs-disk- ${ count . index } "
}
}
resource " aws_volume_attachment " " mount-nfs-volume " {
device_name = " /dev/sd ${ element ( var . aws - zfs - drive - letters , count . index ) } "
instance_id = module . nodes [ " nfs " ] . nodes [ 0 ] . id
count = local . nodes - config [ " nfs " ] . num - disks
volume_id = element ( aws_ebs_volume . zfs , count . index ) . id
}
output " zfs-drive-letters " {
value = aws_volume_attachment . mount - nfs - volume . * . device_name
}
module " nodes " {
for_each = local . nodes - config
source = " ./modules/aws-nodes "
ec2 - instance - type = each . value . aws - ec2 - type
ami = each . value . base - image
ec2 - instance - type = var . aws - ec2 - instance - type
subnet - id = module . aws - network - existing . k8s - subnets - ids [ 0 ]
security - group - ids = [ module . aws - network - existing . default - sg . id ]
subnet- ids = each . value . subnet - ids
private- ips = try ( each . value . private - ips , [ ] )
security - group - ids = each . value . security - groups
user - datas = lookup ( module . cloud - init - config , each . key , null ) . user - data s
disk - size = try ( each . value . disk - size , null )
num - nodes = each . value . num
name - prefix = " ${ var . vm - name - prefix } - ${ each . key } "
# TODO add a input for the key so that it will show up as the key in the aws
# console .
}
# TODO an attempt to create a windows machine .
# module " nodes-win " {
# source = " ./modules/aws-nodes "
# ec2 - instance - type = module . ec2 - types . t2 - small -2 gib -1 vcpu
# ami = module . aws - ami - constants . win - srv -2019 - ami
# subnet - ids = [ module . aws - network - existing - managed . subnet - by - name [ " subnet_2 " ] . id ]
# private - ips = [ ]
# security - group - ids = [ local . aws - managed - security - group - id ]
# # TODO REM need to figure out how to not pass a user data .
# user - datas = [ null ]
# num - nodes = 1
# name - prefix = " ${ var . vm - name - prefix } -win-test "
# }
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# end aws
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@ -101,7 +334,11 @@ module "nodes" {
# provider " libvirt " {
# uri = var . libvirt - connection - url
# }
#
#
# module " libvirt-images " {
# source = " ./modules/libvirt-images "
# }
#
# module " nodes " {
# for_each = local . nodes - config
# source = " ./modules/libvirt-nodes "
@ -118,7 +355,7 @@ module "nodes" {
# libvirt - connection - url = var . libvirt - connection - url
# user - datas = lookup ( module . cloud - init - config , each . key , null ) . user - data s
# }
#
#
# resource " libvirt_pool " " images " {
# name = var . disk - image - pool - name
# type = " dir "