k8s-nodes initial work

master
shnee 4 years ago
parent 2c0f1ff60e
commit e9df4cd1d6

@ -0,0 +1,17 @@
Dependencies
----------------------------------------
TODO REM add libvirt provider
libvirt provider depends on mkisofs
security_driver = none for ubuntu host, link github issue.
https://github.com/dmacvicar/terraform-provider-libvirt/issues/546
Other
----------------------------------------
Create a password hash.
```shell
python3 -c 'import crypt; print(crypt.crypt("test", crypt.mksalt(crypt.METHOD_SHA512)))'
```

@ -0,0 +1,32 @@
#cloud-config
# vim: syntax=yaml
users:
- name: admin
# If we don't supress the user group then cloud init will fail because there
# is allready an admin group in the ubuntu base image.
no_user_group: true
groups: users, admin, sudo
shell: /usr/bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ${admin-pub-key}
- name: root
ssh_authorized_keys:
- ${admin-pub-key}
ssh_pwauth: true
disable_root: false
chpasswd:
list:
- root:${admin-passwd}
- admin:${admin-passwd}
expire: false
hostname: ${hostname}
# Use this when it's determined that we need a bigger disk image.
# This must be used in conjuction with 'size' in 'libvirt_volume'
# growpart:
# mode: auto
# devices: ['/']

@ -0,0 +1,34 @@
# A CIDR block ending in '/32' equates to a single IP address, '0.0.0.0/0'
# equates to any ip address.
admin-ips = [ "8.8.8.8/32", "0.0.0.0/0" ]
disk-image-dir = "/path/to/disk/pool/"
libvirt-connection-url = "qemu+ssh://<user>@<host>/system"
master-nodes = 1
worker-nodes = 2
node-memory = 2048
node-vcpus = 2
# 1 GiB, 1 vcpu, only one that is free.
# This one won't work with k8s because it requires at least 2 vcpus.
aws-ec2-instance-type = "t2.micro"
# 4 GiB, 2 vcpus
# aws-ec2-instnce-type = "t2.medium"
# AWS Amazon Linux 2 AMI (HVM), SSD Volume Type - Oregon - 2021.11.11 - free
# base-image = "ami-00be885d550dcee43"
# AWS Amazon Linux 2 AMI (HVM), SSD Volume Type - us-east-2 - 2021.11.12 - free
base-image = "ami-0dd0ccab7e2801812"
# base-image = "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64-disk-kvm.img"
# From https://cloud.centos.org/centos/7/images/ from 2020-11-12 06:52
# base-image = "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2009.qcow2"
# Password hash created with:
# python3 -c 'import crypt; print(crypt.crypt("linux", crypt.mksalt(crypt.METHOD_SHA512)))'
# where "linux" is the password.
root-admin-passwd = "$6$fiLRWvGQkdK.MnZA$Co9NkA5ruuBUA389JzmKJiC8gKRohmyM09AFnVBOD7ErZnxK4RHMUlKvYg1HSgwaCXTl7H/q1svoeQeUfgc6f0"
root-admin-pub-key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDfDcjMFmWd6qy9KIlnIHNbEfeNLHC885UUH3jGwESmMTpFfPUn01t9hq5GGaFDrBR55VgdKebAv2JSVl209+r3tE5XxUX5/s2Pu3o2283PiZhA+D18skL7fzaolygOY8mxi9CZSDFia//lLbqT/OE45VGahVBRtda4gmjrade0XRKqjJUCkIo6huG9Ub6yP4gFtFU/C1rRvQo0hqT/imsMYU0Q5XzrKVWv3CpzA7EIQq8llU0fRGMuXWYYOXznPeqqf5BTbWhMWUXVS0o7Cz+zvbxwq1dOR1qHbJ8Vrkt30Cz5QEd159dIM3LHCtOHnveeOpkFo0RqkhQdpZM+2cKzESvivGNGP9h+PrSjcveADxVwDHcxguumUyM012M3yR8cK9KY+GqW5jPdAs13yXGTG4OWiQKeKEgX910l/FndhQi0tSpSEhIlfcEpa3k3P8RrhKJbwiRgR7Qvus4R/KU+lx4OiOr4RKyPQJobC0i0/bvqkw+UHWp4U0Hqivjsb6k= admin"

@ -0,0 +1,19 @@
#!/bin/sh
# Use eval $(./get-vm-ips.sh) to set env vars for ips.
terraform refresh > /dev/null
IPS_JSON="$(terraform show -json | jq '.values.outputs')"
echo $IPS_JSON | \
jq '."master-ips".value[]' | \
nl -v 0 | \
awk '{print "export MASTER" $1 "=" $2}' | \
sed 's/"//g'
echo $IPS_JSON | \
jq '."worker-ips".value[]' | \
nl -v 0 | \
awk '{print "export WORKER" $1 "=" $2}' | \
sed 's/"//g'

@ -0,0 +1,144 @@
terraform {
required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.6.11"
}
}
}
################################################################################
# cloud-init
################################################################################
data "template_file" "master-node-user-datas" {
template = file("${path.module}/cloud_init.cfg")
vars = {
admin-passwd = "${var.root-admin-passwd}"
admin-pub-key = "${var.root-admin-pub-key}"
hostname = "${var.vm-name-prefix}-master-${count.index}"
}
count = var.master-nodes
}
data "template_file" "worker-node-user-datas" {
template = file("${path.module}/cloud_init.cfg")
vars = {
admin-passwd = "${var.root-admin-passwd}"
admin-pub-key = "${var.root-admin-pub-key}"
hostname = "${var.vm-name-prefix}-worker-${count.index}"
}
count = var.worker-nodes
}
################################################################################
# aws
# To use the aws module, uncomment the aws modules/resources and comment out the
# libvirt modules/resources.
################################################################################
provider "aws" {
region = "us-east-2"
}
module "aws-network" {
source = "./modules/aws-network"
name-prefix = var.vm-name-prefix
vpc-cidr-block = var.aws-vpc-cidr-block
subnet-cidr-block = var.aws-subnet-cidr-block
admin-ips = var.admin-ips
}
# This key pair is not actually used. Keys are added to the nodes via cloud-init
# instead. We just add this here that this key will show up in the AWS console."
resource "aws_key_pair" "key" {
key_name = "${var.vm-name-prefix}-key}"
public_key = var.root-admin-pub-key
tags = {
Name = "${var.vm-name-prefix}-key"
}
}
module "master-nodes" {
source = "./modules/aws-nodes"
ami = var.base-image
ec2-instance-type = var.aws-ec2-instance-type
subnet-id = module.aws-network.subnet.id
security-group-ids = [module.aws-network.default-security-group.id]
user-datas = data.template_file.master-node-user-datas
num-nodes = var.master-nodes
name-prefix = "${var.vm-name-prefix}-master"
}
module "worker-nodes" {
source = "./modules/aws-nodes"
ami = var.base-image
ec2-instance-type = var.aws-ec2-instance-type
subnet-id = module.aws-network.subnet.id
security-group-ids = [module.aws-network.default-security-group.id]
user-datas = data.template_file.worker-node-user-datas
num-nodes = var.worker-nodes
name-prefix = "${var.vm-name-prefix}-worker"
}
################################################################################
# end aws
################################################################################
################################################################################
# libvirt
# To use the libvirt module, uncomment the libvirt modules/resources and comment
# out the aws modules/resources.
################################################################################
# provider "libvirt" {
# uri = var.libvirt-connection-url
# }
#
# module "master-nodes" {
# source = "./modules/libvirt-nodes"
# pool-name = libvirt_pool.images.name
# name-prefix = "${var.vm-name-prefix}-master"
# num-nodes = var.master-nodes
# node-memory = var.node-memory
# node-vcpus = var.node-vcpus
# base-image = var.base-image
# root-admin-passwd = var.root-admin-passwd
# root-admin-pub-key = var.root-admin-pub-key
# libvirt-connection-url = var.libvirt-connection-url
# user-datas = data.template_file.master-node-user-datas
# }
#
# module "worker-nodes" {
# source = "./modules/libvirt-nodes"
# pool-name = libvirt_pool.images.name
# name-prefix = "${var.vm-name-prefix}-worker"
# num-nodes = var.worker-nodes
# node-memory = var.node-memory
# node-vcpus = var.node-vcpus
# base-image = var.base-image
# root-admin-passwd = var.root-admin-passwd
# root-admin-pub-key = var.root-admin-pub-key
# libvirt-connection-url = var.libvirt-connection-url
# user-datas = data.template_file.worker-node-user-datas
# }
#
# resource "libvirt_pool" "images" {
# name = var.disk-image-pool-name
# type = "dir"
# path = var.disk-image-dir
# }
################################################################################
# end libvirt
################################################################################
# TODO REM move to other file?
output "master-ips" {
value = module.master-nodes.ips
}
output "worker-ips" {
value = module.worker-nodes.ips
}

@ -0,0 +1,67 @@
resource "aws_vpc" "vpc" {
cidr_block = var.vpc-cidr-block
tags = {
Name = "${var.name-prefix}-vpc"
}
}
resource "aws_subnet" "subnet" {
vpc_id = aws_vpc.vpc.id
cidr_block = var.subnet-cidr-block
# availability_zone = var.avail_zone
tags = {
Name = "${var.name-prefix}-subnet"
}
}
resource "aws_default_security_group" "sg" {
vpc_id = aws_vpc.vpc.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = var.admin-ips
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
prefix_list_ids = []
}
tags = {
Name = "${var.name-prefix}-ssh-from-admins--sg"
}
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "${var.name-prefix}-igw"
}
}
resource "aws_default_route_table" "route-table" {
default_route_table_id = aws_vpc.vpc.main_route_table_id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
# default route, mapping VPC CIDR block to "local", created implicitly and
# cannot be specified.
tags = {
Name = "${var.name-prefix}-route-table"
}
}
# Associate subnet with Route Table
resource "aws_route_table_association" "a-rtb-subnet" {
subnet_id = aws_subnet.subnet.id
route_table_id = aws_default_route_table.route-table.id
}

@ -0,0 +1,11 @@
output "vpc" {
value = aws_vpc.vpc
}
output "subnet" {
value = aws_subnet.subnet
}
output "default-security-group" {
value = aws_default_security_group.sg
}

@ -0,0 +1,23 @@
variable "admin-ips" {
description = "A list of ips or cidr blocks that are allowed to connect to the nodes."
type = list(string)
}
variable "name-prefix" {
default = "tf"
description = "This prefix will be used in all the names of the resources creates in our AWS network."
type = string
}
variable "subnet-cidr-block" {
default = "10.0.1.0/24"
description = "The address space to be used for this subnet."
type = string
}
variable "vpc-cidr-block" {
default = "10.0.0.0/16"
description = "The address space to be used for out networks VPC."
type = string
}

@ -0,0 +1,14 @@
resource "aws_instance" "nodes" {
ami = var.ami
instance_type = var.ec2-instance-type
# key_name = aws_key_pair.debug1.key_name
associate_public_ip_address = true
subnet_id = var.subnet-id
vpc_security_group_ids = var.security-group-ids
user_data = element(var.user-datas.*.rendered, count.index)
count = var.num-nodes
tags = {
Name = "${var.name-prefix}-${count.index}"
}
}

@ -0,0 +1,3 @@
output "ips" {
value = aws_instance.nodes.*.public_ip
}

@ -0,0 +1,36 @@
variable "ami" {
description = "The AWS AMI to be used for all the nodes"
type = string
}
variable "ec2-instance-type" {
default = "t2.micro"
description = "The AWS instance type to use for all nodes."
type = string
}
variable "name-prefix" {
default = "tf-node"
description = "This prefix will be applied to all names created by this module."
type = string
}
variable "num-nodes" {
default = 1
description = "The number of nodes to create from the given input parameters."
type = number
}
variable "user-datas" {
description = "A list of cloud-init configs that get applied to their corresponding node."
}
variable "subnet-id" {
description = "The ID of the subnet that all the nodes will be added to."
type = string
}
variable "security-group-ids" {
description = "A list of security group IDs to be applied to all the nodes."
type = list(string)
}

@ -0,0 +1,74 @@
terraform {
required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.6.11"
}
}
}
provider "libvirt" {
uri = var.libvirt-connection-url
}
resource "libvirt_volume" "node-images" {
name = "${var.name-prefix}-${count.index}"
pool = var.pool-name
source = var.base-image
count = var.num-nodes
format = "qcow2"
}
data "template_file" "network-config" {
template = file("${path.module}/network_config.cfg")
}
resource "libvirt_cloudinit_disk" "node-inits" {
name = "${var.name-prefix}-${count.index}-init"
user_data = element(var.user-datas.*.rendered, count.index)
network_config = data.template_file.network-config.rendered
pool = var.pool-name
count = var.num-nodes
}
resource "libvirt_domain" "nodes" {
count = var.num-nodes
name = "${var.name-prefix}-${count.index}"
memory = var.node-memory
vcpu = var.node-vcpus
cloudinit = element(libvirt_cloudinit_disk.node-inits.*.id, count.index)
network_interface {
network_name = "default"
hostname = "${var.name-prefix}-${count.index}"
wait_for_lease = true
}
# IMPORTANT: this is a known bug on cloud images, since they expect a console
# we need to pass it
# https://bugs.launchpad.net/cloud-images/+bug/1573095
console {
type = "pty"
target_port = "0"
target_type = "serial"
}
console {
type = "pty"
target_type = "virtio"
target_port = "1"
}
disk {
volume_id = element(libvirt_volume.node-images.*.id, count.index)
}
graphics {
type = "spice"
listen_type = "address"
autoport = true
}
}

@ -0,0 +1,4 @@
version: 2
ethernets:
ens3:
dhcp4: true

@ -0,0 +1,4 @@
output "ips" {
value = libvirt_domain.nodes.*.network_interface.0.addresses.0
}

@ -0,0 +1,46 @@
variable "base-image" {
default = "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64-disk-kvm.img"
description = "The base image to be used for all nodes."
}
variable "libvirt-connection-url" {
description = "The libvirt connection URI, ie. qemu+ssh://<user>@<host>/system"
}
variable "name-prefix" {
default = "k8s-node"
description = "This will be a prefix for all resource names, ie. domains will be created suck as \"k8s-node-2\"."
}
variable "node-memory" {
default = "2048"
description = "The amount of memory to be used for all the nodes."
type = number
}
variable "node-vcpus" {
default = "2"
description = "The amount of vcpus to be used for all the nodes."
type = number
}
variable "user-datas" {
description = "A list of cloud-init configs that get applied to their corresponding node."
}
variable "num-nodes" {
description = "The number of nodes to create with this config."
}
variable "pool-name" {
default = "default"
description = "The name of the pool to put all disk images in."
}
variable "root-admin-passwd" {
description = "This will be the password for the root and admin user. The format of this can by any format accepted by cloud-init's chpasswd module."
}
variable "root-admin-pub-key" {
description = "The public key to be added to authorized_keys for the root and admin accounts."
}

@ -0,0 +1,73 @@
variable "admin-ips" {
description = "A list of ips or cidr blocks that are allowed to connect to the nodes."
type = list(string)
}
variable "aws-ec2-instance-type" {
default = "t2.micro"
description = "The AWS instance type to use for all nodes."
}
variable "aws-subnet-cidr-block" {
default = "10.0.1.0/24"
description = "The address space to be used for this subnet."
}
variable "aws-vpc-cidr-block" {
default = "10.0.0.0/16"
description = "The address space to be used for the VPC that all the AWS nodes will be in."
}
variable "disk-image-dir" {
description = "This is the location on the KVM hypervisor host where all the disk images will be kept."
}
variable "disk-image-pool-name" {
default = "k8s-tf-images"
description = "The name of the disk pool where all the images will be kept."
}
variable "libvirt-connection-url" {
description = "The libvirt connection URI, ie. qemu+ssh://<user>@<host>/system"
}
variable "node-memory" {
default = "2048"
description = "The amount of memory to be used for all the nodes."
type = number
}
variable "node-vcpus" {
default = "2"
description = "The amount of vcpus to be used for all the nodes."
type = number
}
variable "root-admin-passwd" {
description = "This will be the password for the root and admin user. The format of this can by any format accepted by cloud-init's chpasswd module."
}
variable "root-admin-pub-key" {
description = "The public key to be added to authorized_keys for the root and admin accounts."
}
variable "master-nodes" {
default = 1
description = "The number of master nodes to create."
type = number
}
variable "worker-nodes" {
default = 2
description = "The number of worker nodes to create."
type = number
}
variable "base-image" {
default = "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64-disk-kvm.img"
}
variable "vm-name-prefix" {
default = "k8s-tf"
description = "This prefix will appear before all VM names and hostnames, ie. k8s-tf-master-0."
}
Loading…
Cancel
Save