Init Commit
parent
1c0bb08c9f
commit
c6e5a56ba6
@ -0,0 +1,16 @@
|
|||||||
|
# check_defined is a function that will error if the passed in variable is
|
||||||
|
# undefined.
|
||||||
|
#
|
||||||
|
# usage:
|
||||||
|
# install:
|
||||||
|
# $(call check_defined, var1)
|
||||||
|
# $(call check_defined, var2)
|
||||||
|
#
|
||||||
|
# Copied this from https://gist.github.com/bbl/bf4bf5875d0c705c4cd78d264f98a8b1
|
||||||
|
check_defined = \
|
||||||
|
$(strip $(foreach 1,$1, \
|
||||||
|
$(call __check_defined,$1,$(strip $(value 2)))))
|
||||||
|
__check_defined = \
|
||||||
|
$(if $(value $1),, \
|
||||||
|
$(error Undefined $1$(if $2, ($2))))
|
||||||
|
|
||||||
@ -0,0 +1,473 @@
|
|||||||
|
|
||||||
|
################################################################################
|
||||||
|
# includes
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Not sure why this was needed. Without this dummy target the include was
|
||||||
|
# causing the `check-%` target to get hit.
|
||||||
|
check-defined.mk: ;@:
|
||||||
|
|
||||||
|
include common/makefiles/check-defined.mk
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# end includes
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# environment variables
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# The location of the ansible inventory file.
|
||||||
|
INVENTORY ?= inventory
|
||||||
|
|
||||||
|
# Variables for some of the common docker images we use for tests.
|
||||||
|
UBUNTU_20_04_DOCKER_IMAGE ?= docker.shnee.net/docker-hub/library/ubuntu:20.04
|
||||||
|
UBUNTU_22_04_DOCKER_IMAGE ?= docker.shnee.net/docker-hub/library/ubuntu:22.04
|
||||||
|
MOLECULE_DOCKER_IMAGES ?= $(UBUNTU_22_04_DOCKER_IMAGE)
|
||||||
|
|
||||||
|
# The number of platforms to run a molecule scenario in parallel. This means
|
||||||
|
# that if MOLECULE_DOCKER_IMAGES is set to 'ubi7 ubi8 fedora' and
|
||||||
|
# CONCURRENT_MOLECULE_PLATFORMS is set to 2, then the firt iteration of the
|
||||||
|
# scenarion will be run with 'ubi7 ubi8' and a second iteration will be run with
|
||||||
|
# 'fedora'.
|
||||||
|
CONCURRENT_MOLECULE_PLATFORMS ?= 999
|
||||||
|
|
||||||
|
# This variable will hold a subset of MOLECULE_DOCKER_IMAGES that holds the
|
||||||
|
# platforms to be used for the current iteration of the molecule scenario.
|
||||||
|
CURRENT_MOLECULE_PLATFORMS ?=
|
||||||
|
|
||||||
|
# You can set this to a file containing the password for your vaults so you
|
||||||
|
# don't have to enter the password in everytime. This variable should not be
|
||||||
|
# committed with a value.
|
||||||
|
VAULT_PASSWD_FILE ?=
|
||||||
|
|
||||||
|
ifdef VAULT_PASSWD_FILE
|
||||||
|
VAULT_PASSWD_ENV = ANSIBLE_VAULT_PASSWORD_FILE=$(VAULT_PASSWD_FILE)
|
||||||
|
endif
|
||||||
|
|
||||||
|
REPO_ENV_VARS ?=
|
||||||
|
ENV_VARS = PY_COLORS=1 \
|
||||||
|
ANSIBLE_FORCE_COLOR=1 \
|
||||||
|
MOLECULE_PLATFORMS="$(call molecule-platforms)" \
|
||||||
|
$(VAULT_PASSWD_ENV) \
|
||||||
|
$(REPO_ENV_VARS)
|
||||||
|
|
||||||
|
# This target can be used to set environment variables in your local
|
||||||
|
# environment. Run this to set the necessary environment variables:
|
||||||
|
# eval $(make -s env-vars)
|
||||||
|
#
|
||||||
|
# TODO VAULT_PASSWD_ENV and REPO_ENV_VARS can't be handled like the other
|
||||||
|
# environment variables. For VAULT_PASSWD_ENV you have to conditionally do
|
||||||
|
# the 'export' if the variable is defined. For REPO_ENV_VARS you have to
|
||||||
|
# iterate over each key-value pairs in the variable.
|
||||||
|
.PHONY: env-vars
|
||||||
|
env-vars:
|
||||||
|
$(ENV_VARS) sh -c 'printf "\n\
|
||||||
|
export PY_COLORS=$$PY_COLORS\n\
|
||||||
|
export ANSIBLE_FORCE_COLOR=$$ANSIBLE_FORCE_COLOR\n\
|
||||||
|
export MOLECULE_PLATFORMS=\"$$MOLECULE_PLATFORMS\"\n\
|
||||||
|
"'
|
||||||
|
|
||||||
|
# TODO update comment
|
||||||
|
# The pip packages to install when doing offline pypi installs.
|
||||||
|
PIP_PKGS ?= ansible ansible-lint docker
|
||||||
|
|
||||||
|
# This makefile will attempt to use sudo when needed. In case you're running on
|
||||||
|
# a system without sudo go and unset this variable.
|
||||||
|
#
|
||||||
|
# The '-i' flag is really only needed to use the SCL sandbox stuff, but it
|
||||||
|
# doesn't hurt to leave it on all the time?
|
||||||
|
SUDO ?= sudo -i
|
||||||
|
|
||||||
|
ROLE_NAME ?= $(shell basename $$PWD | sed 's/_/-/g')
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# end environment variables
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# The URL to use for libvirt.
|
||||||
|
# TODO REM don't commit this specific qemu host.
|
||||||
|
# LIBVIRT_DEFAULT_URI ?= qemu:///system
|
||||||
|
LIBVIRT_DEFAULT_URI ?= qemu+ssh://shnee@rita/system
|
||||||
|
|
||||||
|
VIRSH ?= virsh --connect $(LIBVIRT_DEFAULT_URI)
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# functions
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
run-molecule = $(ENV_VARS) LIBVIRT_DEFAULT_URI=$(LIBVIRT_DEFAULT_URI) \
|
||||||
|
MOLECULE_DOCKER_IMAGE=$(MSS_CI_DOCKER_IMAGE) \
|
||||||
|
molecule $1 -s $2 $(DEBUG_ARGS)
|
||||||
|
|
||||||
|
# This returns a json formatted string to be used as a value for the `platforms`
|
||||||
|
# field in a `molecule.yml` docker scenario.
|
||||||
|
molecule-platforms = $(shell \
|
||||||
|
printf "[ " && \
|
||||||
|
for image in $(MOLECULE_DOCKER_IMAGES); do \
|
||||||
|
printf $(call docker-image-to-molecule-platform-json,$$image)", "; \
|
||||||
|
done | sed 's/, $$//' && \
|
||||||
|
printf " ]" \
|
||||||
|
)
|
||||||
|
|
||||||
|
get-num-of-molecule-docker-images = $(shell \
|
||||||
|
echo $(MOLECULE_DOCKER_IMAGES) | wc --word)
|
||||||
|
get-num-of-molecule-iterations = $(shell \
|
||||||
|
echo $$(( ( $(call get-num-of-molecule-docker-images) / \
|
||||||
|
$(CONCURRENT_MOLECULE_PLATFORMS) ) + \
|
||||||
|
( ( $(call get-num-of-molecule-docker-images) % \
|
||||||
|
$(CONCURRENT_MOLECULE_PLATFORMS) ) != 0 ) )))
|
||||||
|
|
||||||
|
# TODO relies on 'iter' being set by caller.
|
||||||
|
get-current-molecule-platforms = \
|
||||||
|
export START=$$(( $(CONCURRENT_MOLECULE_PLATFORMS) * $$iter )) && \
|
||||||
|
export STOP=$$(( $$START + $(CONCURRENT_MOLECULE_PLATFORMS) )) && \
|
||||||
|
export INDEX=0 && \
|
||||||
|
export CURR_PLATFORMS="" && \
|
||||||
|
for image in $(MOLECULE_DOCKER_IMAGES); do \
|
||||||
|
if [ $$INDEX -ge $$START ] && [ $$INDEX -lt $$STOP ]; then \
|
||||||
|
CURR_PLATFORMS="$${CURR_PLATFORMS}$$image " ; \
|
||||||
|
fi ; \
|
||||||
|
INDEX=$$(( $$INDEX+1 )) ; \
|
||||||
|
done && \
|
||||||
|
echo $$CURR_PLATFORMS
|
||||||
|
|
||||||
|
# Returns a json formatted string representing a molecule platform. This
|
||||||
|
# function takes a single parameter that is the docker images to be used. This
|
||||||
|
# function derives the 'name' field from the docker image. It strips the path
|
||||||
|
# and tag to give it a name, ie. github/user/cool-image:tag becomes
|
||||||
|
# 'cool-image'.
|
||||||
|
#
|
||||||
|
# NOTE: Single quotes are fine when the string is loaded by ansible, however, if
|
||||||
|
# the string is to be used in a jinja template combined with the from_json
|
||||||
|
# filter then you must use double quotes. To ensure that the double quotes
|
||||||
|
# persist you have to do alot of escaping. Each single quote needs to be
|
||||||
|
# replaced by `\\\\\"'.
|
||||||
|
# TODO REM this isn't good enough, we probably need to incorporate the tag in
|
||||||
|
# the name as well.
|
||||||
|
docker-image-to-molecule-platform-json = \
|
||||||
|
"{ 'name': '$(ROLE_NAME)-molecule-$(call \
|
||||||
|
molecule-platform-to-image-name-sed-command,$1)', \
|
||||||
|
'image': '$1', \
|
||||||
|
'pre_build_image': true }"
|
||||||
|
|
||||||
|
molecule-platform-to-image-name-sed-command = $$( echo $1 | \
|
||||||
|
sed 's/^.*\///' | sed 's/:.*$$//' )
|
||||||
|
|
||||||
|
run-molecule-per-docker-image = \
|
||||||
|
for image in $(MOLECULE_DOCKER_IMAGES) ; do \
|
||||||
|
$(call run-molecule,$1,$2,$$image) ; done
|
||||||
|
|
||||||
|
get-scenarios = \
|
||||||
|
$(shell find molecule -name molecule.yml | \
|
||||||
|
sed 's/molecule\/\([^\/]*\).*/\1/g')
|
||||||
|
|
||||||
|
scenario-targets = \
|
||||||
|
$(shell export PREFIXES="test- create- destroy- converge- verify- \
|
||||||
|
idempotence- check-" && \
|
||||||
|
for prefix in $$PREFIXES ; do \
|
||||||
|
for scen in $(SCENARIOS) ; do \
|
||||||
|
echo $$prefix$$scen ; done ; done && \
|
||||||
|
export PREFIXES="login- list-snapshots- apply-snapshot- \
|
||||||
|
revert-snapshot- list-current-snapshot- \
|
||||||
|
delete-snapshot-" && \
|
||||||
|
for prefix in $$PREFIXES ; do \
|
||||||
|
for scen in $(VAGRANT_SCENARIOS) ; do \
|
||||||
|
echo $$prefix$$scen ; done ; done )
|
||||||
|
|
||||||
|
scenario-targets-wrapped = \
|
||||||
|
$(shell echo $(call scenario-targets) | \
|
||||||
|
fmt -w 79 | \
|
||||||
|
sed 's/\(.\)\?$$/\1:/g' )
|
||||||
|
|
||||||
|
print-scenario-targets:
|
||||||
|
@echo $(call scenario-targets) | \
|
||||||
|
sed 's/ /\n/g' | \
|
||||||
|
awk '{ print $$1 ":" }'
|
||||||
|
|
||||||
|
print-scenario-targets-wrapped:
|
||||||
|
@echo $(call scenario-targets) | \
|
||||||
|
fmt -w 79 | \
|
||||||
|
sed 's/\(.\)\?$$/\1:/g'
|
||||||
|
|
||||||
|
# Returns the molecule driver used by the passed in scenario, ie.
|
||||||
|
# $(call get-driver,default)
|
||||||
|
# returns: docker
|
||||||
|
get-driver = \
|
||||||
|
$(shell molecule list -f plain 2> /dev/null | \
|
||||||
|
grep "^\([-[:alnum:]]\+\s\+\)\{3\}$1" | \
|
||||||
|
awk '{ print $$2 }' )
|
||||||
|
|
||||||
|
get-driver-scenarios = \
|
||||||
|
$(shell $(ENV_VARS) molecule list -f plain 2> /dev/null | \
|
||||||
|
grep "^\([-[:alnum:]]\+\s\+\)$1" | \
|
||||||
|
awk '{ print $$4 }' | \
|
||||||
|
sort | \
|
||||||
|
uniq)
|
||||||
|
|
||||||
|
get-scenario-molecule-yaml-file = $(shell echo molecule/$1/molecule.yml)
|
||||||
|
|
||||||
|
get-scenario-platforms = \
|
||||||
|
$(shell cat $(call get-scenario-molecule-yaml-file,$1) | \
|
||||||
|
yq '.platforms[] | .name' | \
|
||||||
|
sed 's/"//g' )
|
||||||
|
|
||||||
|
get-scenario-first-platform = \
|
||||||
|
$(shell echo $(call get-scenario-platforms,$1) | \
|
||||||
|
awk '{print $$1}' )
|
||||||
|
|
||||||
|
list-scenario-snapshots = \
|
||||||
|
$(shell echo $(call get-scenario-platforms,$1) | \
|
||||||
|
awk '{ print "$(VIRSH) snapshot-list --tree $1_" $$1 }' )
|
||||||
|
|
||||||
|
space-separated-list-to-json-string-array = \
|
||||||
|
$(shell echo $1 | \
|
||||||
|
sed 's/\(\S\+\)/"\1"/g' | \
|
||||||
|
sed 's/ /, /g' | \
|
||||||
|
awk '{ print "[ " $$0 " ]" }' )
|
||||||
|
|
||||||
|
define driver-scenario-check
|
||||||
|
@export SCEN=$1 && \
|
||||||
|
export EXP_DRIVER=$2 && \
|
||||||
|
export DRIVER=$(call get-driver,$1) && \
|
||||||
|
if ! [ $$DRIVER == $$EXP_DRIVER ]; then \
|
||||||
|
echo "ERROR: Expecting driver $$EXP_DRIVER, but scenario $$SCEN uses $$DRIVER."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
endef
|
||||||
|
|
||||||
|
define get-vm-name
|
||||||
|
$(shell $(call driver-scenario-check,$1,vagrant) && \
|
||||||
|
echo $1_$2 )
|
||||||
|
endef
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# End Functions
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
SCENARIOS := $(call get-scenarios)
|
||||||
|
VAGRANT_SCENARIOS := $(call get-driver-scenarios,vagrant)
|
||||||
|
|
||||||
|
DRIVER=
|
||||||
|
|
||||||
|
DEBUG_ARGS =
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Targets
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
list-scenarios:
|
||||||
|
@echo $(SCENARIOS)
|
||||||
|
|
||||||
|
# TODO This can probably be changed to '--debug' flag to molecule.
|
||||||
|
# TODO this also only works with molecule because of the '--'
|
||||||
|
debug:
|
||||||
|
$(eval DEBUG_ARGS=-- -vvv)
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Run multiple tests at once.
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
.PHONY: test test-all test-all-docker test-all-ec2 test-all-vagrant
|
||||||
|
test: test-all
|
||||||
|
|
||||||
|
test-all: test-all-docker test-all-ec2 test-all-vagrant
|
||||||
|
|
||||||
|
test-all-docker:
|
||||||
|
for scenario in $(call get-driver-scenarios,docker) ; do \
|
||||||
|
$(MAKE) test-$$scenario ; done
|
||||||
|
test-all-ec2:
|
||||||
|
for scenario in $(call get-driver-scenarios,ec2) ; do \
|
||||||
|
$(MAKE) test-$$scenario ; done
|
||||||
|
test-all-vagrant:
|
||||||
|
for scenario in $(call get-driver-scenarios,vagrant) ; do \
|
||||||
|
$(MAKE) test-$$scenario ; done
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# End Run multiple tests at once.
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
lint:
|
||||||
|
$(ENV_VARS) molecule lint
|
||||||
|
|
||||||
|
verify-scenario-%:
|
||||||
|
@# If the '%' is not in $(SCENARIOS) then this is not a valid scenario,
|
||||||
|
@# therefore we exit with an error message.
|
||||||
|
@export FOUND=0 && \
|
||||||
|
for scen in $(SCENARIOS) ; do \
|
||||||
|
if [ $$scen == $* ]; then \
|
||||||
|
export FOUND=1 ; fi ; done && \
|
||||||
|
if [ $$FOUND == 0 ]; then \
|
||||||
|
echo "ERROR: $* is not a valid scenario. Valid scenarios are: \
|
||||||
|
$(SCENARIOS)"; \
|
||||||
|
exit 1 ; fi
|
||||||
|
|
||||||
|
test-%: verify-scenario-%
|
||||||
|
# $(call run-molecule,test,$*)
|
||||||
|
@for (( iter=0; \
|
||||||
|
iter<$(call get-num-of-molecule-iterations); \
|
||||||
|
iter++ )); do \
|
||||||
|
make MOLECULE_DOCKER_IMAGES="$$($(call get-current-molecule-platforms))" iter-test-$*; \
|
||||||
|
if [ $$? != 0 ]; then \
|
||||||
|
exit 1; \
|
||||||
|
fi \
|
||||||
|
done
|
||||||
|
|
||||||
|
iter-test-%: verify-scenario-%
|
||||||
|
$(call run-molecule,test,$*)
|
||||||
|
create-%: verify-scenario-%
|
||||||
|
$(call run-molecule,create,$*)
|
||||||
|
converge-%: verify-scenario-%
|
||||||
|
$(call run-molecule,converge,$*)
|
||||||
|
idempotence-%: verify-scenario-%
|
||||||
|
$(call run-molecule,idempotence,$*)
|
||||||
|
check-%: verify-scenario-%
|
||||||
|
$(call run-molecule,check,$*)
|
||||||
|
verify-%: verify-scenario-%
|
||||||
|
$(call run-molecule,verify,$*)
|
||||||
|
destroy-%: verify-scenario-%
|
||||||
|
$(call run-molecule,destroy,$*)
|
||||||
|
|
||||||
|
status:
|
||||||
|
$(ENV_VARS) molecule list
|
||||||
|
|
||||||
|
list-vms:
|
||||||
|
$(VIRSH) list
|
||||||
|
|
||||||
|
login-%: verify-scenario-%
|
||||||
|
@$(call driver-scenario-check,$*,vagrant)
|
||||||
|
molecule login -s $*
|
||||||
|
|
||||||
|
list-snapshots-%: verify-scenario-%
|
||||||
|
$(call driver-scenario-check,$*,vagrant)
|
||||||
|
$(call list-scenario-snapshots,$*)
|
||||||
|
|
||||||
|
list-current-snapshot-%: verify-scenario-%
|
||||||
|
$(call driver-scenario-check,$*,vagrant)
|
||||||
|
$(VIRSH) snapshot-info --current \
|
||||||
|
$*_$(call get-scenario-first-platform,$*)
|
||||||
|
|
||||||
|
# TODO It looks like apply, revert, and delete could be combined.
|
||||||
|
apply-snapshot-%: verify-scenario-%
|
||||||
|
@$(call driver-scenario-check,$*,vagrant)
|
||||||
|
@if [ -z "$(SNAP_NAME)" ]; then \
|
||||||
|
echo "ERROR: You must define both SNAP_NAME when running \
|
||||||
|
apply-snapshot. Got SNAP_NAME: $(SNAP_NAME)."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
for host in $(call get-scenario-platforms,$*) ; do \
|
||||||
|
$(VIRSH) snapshot-create-as $*_$$host $(SNAP_NAME) \"$(SNAP_DESC)\"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
revert-snapshot-%: verify-scenario-%
|
||||||
|
@$(call driver-scenario-check,$*,vagrant)
|
||||||
|
@if [ -z "$(SNAP_NAME)" ]; then \
|
||||||
|
echo "ERROR: You must define both SNAP_NAME when running \
|
||||||
|
apply-snapshot. Got SNAP_NAME: $(SNAP_NAME)."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
for host in $(call get-scenario-platforms,$*) ; do \
|
||||||
|
$(VIRSH) snapshot-revert $*_$$host $(SNAP_NAME) ; \
|
||||||
|
done
|
||||||
|
|
||||||
|
delete-snapshot-%: verify-scenario-%
|
||||||
|
@$(call driver-scenario-check,$*,vagrant)
|
||||||
|
@if [ -z "$(SNAP_NAME)" ]; then \
|
||||||
|
echo "ERROR: You must define both SNAP_NAME when running \
|
||||||
|
apply-snapshot. Got SNAP_NAME: $(SNAP_NAME)."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
for host in $(call get-scenario-platforms,$*) ; do \
|
||||||
|
$(VIRSH) snapshot-delete $*_$$host $(SNAP_NAME) ; \
|
||||||
|
done
|
||||||
|
|
||||||
|
# TODO REM these are realyl just the const for templates.
|
||||||
|
ANSIBLE_CONST_ARGS ?= all \
|
||||||
|
-i localhost, \
|
||||||
|
-m template \
|
||||||
|
--connection=local
|
||||||
|
|
||||||
|
# TODO make a target that updates all files that make generates to update all of
|
||||||
|
# those files.
|
||||||
|
# TODO Combine all these simple templates into a single function.
|
||||||
|
# TODO had to comment this out. Does this target run evrytime?
|
||||||
|
Makefile: common/templates/template-disclaimer.j2
|
||||||
|
Makefile: common/makefiles/templates/main-repo-makefile.j2
|
||||||
|
$(eval TEMPLATE_FILES = \
|
||||||
|
$(call space-separated-list-to-json-string-array,$^))
|
||||||
|
export TARGETS=$$(echo $(call scenario-targets) | \
|
||||||
|
fmt -w 79 | \
|
||||||
|
sed 's/\(.\)\?$$/\1:/g' ) && \
|
||||||
|
ansible $(ANSIBLE_CONST_ARGS) \
|
||||||
|
-e '{template_files: $(TEMPLATE_FILES)}' \
|
||||||
|
-e "template_disclaimer={{ \
|
||||||
|
lookup('ansible.builtin.template', \
|
||||||
|
'common/templates/template-disclaimer.j2') }}" \
|
||||||
|
-e 'generated_make_targets="'"$$TARGETS"'"' \
|
||||||
|
-a "src=$< dest=$(PWD)/$@"
|
||||||
|
cat $@
|
||||||
|
|
||||||
|
ansible.cfg: common/templates/template-disclaimer.j2
|
||||||
|
ansible.cfg: common/templates/default-ansible.cfg.j2
|
||||||
|
$(eval TEMPLATE_FILES = \
|
||||||
|
$(call space-separated-list-to-json-string-array,$^))
|
||||||
|
ansible $(ANSIBLE_CONST_ARGS) \
|
||||||
|
-e '{template_files: $(TEMPLATE_FILES)}' \
|
||||||
|
-e "template_disclaimer={{ \
|
||||||
|
lookup('ansible.builtin.template', \
|
||||||
|
'common/templates/template-disclaimer.j2') }}" \
|
||||||
|
-a "src=$< dest=$(PWD)/$@"
|
||||||
|
cat $@
|
||||||
|
|
||||||
|
.ansible-lint: common/templates/template-disclaimer.j2
|
||||||
|
.ansible-lint: common/templates/default-ansible-lint.j2
|
||||||
|
$(eval TEMPLATE_FILES = \
|
||||||
|
$(call space-separated-list-to-json-string-array,$^))
|
||||||
|
ansible $(ANSIBLE_CONST_ARGS) \
|
||||||
|
-e '{template_files: $(TEMPLATE_FILES)}' \
|
||||||
|
-e "template_disclaimer={{ \
|
||||||
|
lookup('ansible.builtin.template', \
|
||||||
|
'common/templates/template-disclaimer.j2') }}" \
|
||||||
|
-a "src=$< dest=$(PWD)/$@"
|
||||||
|
cat $@
|
||||||
|
|
||||||
|
.yamllint: common/templates/template-disclaimer.j2
|
||||||
|
.yamllint: common/templates/default-yamllint.j2
|
||||||
|
$(eval TEMPLATE_FILES = \
|
||||||
|
$(call space-separated-list-to-json-string-array,$^))
|
||||||
|
ansible $(ANSIBLE_CONST_ARGS) \
|
||||||
|
-e '{template_files: $(TEMPLATE_FILES)}' \
|
||||||
|
-e "template_disclaimer={{ \
|
||||||
|
lookup('ansible.builtin.template', \
|
||||||
|
'common/templates/template-disclaimer.j2') }}" \
|
||||||
|
-a "src=$< dest=$(PWD)/$@"
|
||||||
|
cat $@
|
||||||
|
|
||||||
|
README.md: common/templates/template-disclaimer.j2
|
||||||
|
README.md: common/templates/role-testing.txt
|
||||||
|
README.md: docs/templates/README.md.j2
|
||||||
|
$(eval TEMPLATE_FILES = \
|
||||||
|
$(call space-separated-list-to-json-string-array,$^))
|
||||||
|
export COMMON_TEST_DOCS="$$(cat common/templates/role-testing.txt)" && \
|
||||||
|
ansible $(ANSIBLE_CONST_ARGS) \
|
||||||
|
-e "common_test_docs=\"$$COMMON_TEST_DOCS\"" \
|
||||||
|
-e '{template_files: $(TEMPLATE_FILES)}' \
|
||||||
|
-e "template_disclaimer={{ \
|
||||||
|
lookup('ansible.builtin.template', \
|
||||||
|
'common/templates/template-disclaimer.j2') }}" \
|
||||||
|
-a "src=$< dest=$(PWD)/$@"
|
||||||
|
cat $@
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help:
|
||||||
|
@echo "TODO This help is still a WIP it\'s missing targets."
|
||||||
|
@echo ""
|
||||||
|
@echo "test - Runs all scenarios (tests)"
|
||||||
|
@echo "test-all-docker - Run all the scenarios that use the docker driver."
|
||||||
|
@echo "test-all-ec2 - Run all the scenarios that use the EC2 driver."
|
||||||
|
@echo "test-all-vagrant - Run all the scenarios that use the vagrant driver. These"
|
||||||
|
@echo " scenarios require that the user has a KVM/qemu hypervisor"
|
||||||
|
@echo " installed."
|
||||||
|
|
||||||
|
# TODO I dont remember if this is still needed.
|
||||||
|
.SUFFIXES:
|
||||||
@ -0,0 +1,30 @@
|
|||||||
|
{{ template_disclaimer }}
|
||||||
|
|
||||||
|
# Each test will be run on each of the docker images defined in this space
|
||||||
|
# separated list.
|
||||||
|
#
|
||||||
|
# imagers. They currently can't be targeted by ansible because they don't have
|
||||||
|
# python.
|
||||||
|
MOLECULE_DOCKER_IMAGES ?= $(UBI7_DOCKER_IMAGE) \
|
||||||
|
$(UBI8_DOCKER_IMAGE)
|
||||||
|
|
||||||
|
CONCURRENT_MOLECULE_PLATFORMS ?= 1
|
||||||
|
|
||||||
|
# REPO_ENV_VARS ?= \
|
||||||
|
# ANSIBLE_BECOME_PASSWORD_FILE=molecule/shared/locked-down-sudo-become.pw
|
||||||
|
REPO_ENV_VARS ?=
|
||||||
|
|
||||||
|
shared/.git:
|
||||||
|
git submodule update --init
|
||||||
|
|
||||||
|
-include common/makefiles/common.mk
|
||||||
|
|
||||||
|
# TODO add a comment about how these target will override the ones in the
|
||||||
|
# common makefile. -include repo specific targets.
|
||||||
|
# TODO add comments.
|
||||||
|
%: shared/.git
|
||||||
|
@$(MAKE) -f common/makefiles/common.mk $@
|
||||||
|
|
||||||
|
# These will never get hit because the common makefiles targets for these will
|
||||||
|
# get hit first. We add these here to allow auto-complete of these targets.
|
||||||
|
{{ generated_make_targets }}
|
||||||
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
role_name_check: 1
|
||||||
|
|
||||||
|
dependency:
|
||||||
|
name: galaxy
|
||||||
|
options:
|
||||||
|
role-file: requirements.yml
|
||||||
|
|
||||||
|
driver:
|
||||||
|
name: vagrant
|
||||||
|
provider:
|
||||||
|
name: libvirt
|
||||||
|
|
||||||
|
platforms:
|
||||||
|
- name: example
|
||||||
|
box: generic/rhel7
|
||||||
|
memory: 1024
|
||||||
|
cpus: 1
|
||||||
|
# `interfaces`, `provider_options`, and `instance_raw_config_args` must be
|
||||||
|
# commented out if using a local libvirt hypervisor.
|
||||||
|
interfaces:
|
||||||
|
- network_name: public_network
|
||||||
|
ip: "${VM_IP:-192.168.1.103}"
|
||||||
|
dev: br0
|
||||||
|
provider_options:
|
||||||
|
qemu_use_session: false
|
||||||
|
uri: "${LIBVIRT_DEFAULT_URI:-qemu:///system}"
|
||||||
|
connect_via_ssh: true
|
||||||
|
# TODO this might have only been necessary when we were STIGing the machine,
|
||||||
|
instance_raw_config_args:
|
||||||
|
- "vm.provision :shell, inline: \
|
||||||
|
\"sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' \
|
||||||
|
/etc/ssh/sshd_config && systemctl restart sshd.service\""
|
||||||
|
|
||||||
|
provisioner:
|
||||||
|
name: ansible
|
||||||
|
playbooks:
|
||||||
|
converge: ../shared/converge.yml
|
||||||
|
verify: ../shared/verify.yml
|
||||||
|
prepare: ../shared/prepare.yml
|
||||||
|
# ansible_args:
|
||||||
|
# - --extra-vars
|
||||||
|
# - domain=test
|
||||||
|
options:
|
||||||
|
diff: true
|
||||||
|
config_options:
|
||||||
|
defaults:
|
||||||
|
stdout_callback: yaml
|
||||||
|
# This can give us some timing info on a task.
|
||||||
|
# callbacks_enabled: profile_tasks
|
||||||
|
# Defining ths iventory is only needed because to pull vars from here
|
||||||
|
inventory:
|
||||||
|
hosts:
|
||||||
|
all:
|
||||||
|
hosts: # TODO The we're defining these hosts does not scale. Only one
|
||||||
|
# scenario can be run per host. Maybe we can find a way to grab
|
||||||
|
# the PID and attach it to the name?
|
||||||
|
example:
|
||||||
|
# `ansible_host` must be commented out when using a local
|
||||||
|
# libvirt hypervisor.
|
||||||
|
ansible_host: "${VM_IP:-192.168.1.103}"
|
||||||
|
vars:
|
||||||
|
artif_url: "${ARTIF_URL}"
|
||||||
|
|
||||||
|
verifier:
|
||||||
|
name: ansible
|
||||||
@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This script will do an offline install of ansible.
|
||||||
|
# It requires that the variable ANSIBLE_RPMS_TAR_GZ be set to the path of a
|
||||||
|
# tar.gz file containing RPMs for Ansible and it's dependencies.
|
||||||
|
|
||||||
|
# TODO Combine this and other rpm tar.gz archives.
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
# Make sure our environment variable is set before using it and creating a
|
||||||
|
# temporary directory.
|
||||||
|
stat "$ANSIBLE_RPMS_TAR_GZ"
|
||||||
|
|
||||||
|
# Create a temporary location to extract the RPMs to.
|
||||||
|
mkdir tmp
|
||||||
|
|
||||||
|
# Extract the RPM tar.gz file to a temporary directory.
|
||||||
|
tar xvzf "$ANSIBLE_RPMS_TAR_GZ" -C ./tmp
|
||||||
|
|
||||||
|
# Perform a localinstall of all the RPMs extracted from ANSIBLE_RPMS_TAR_GZ.
|
||||||
|
#
|
||||||
|
# Ignore the shellshock error about quoting the find, because in this case we
|
||||||
|
# don't the string to split.
|
||||||
|
# shellcheck disable=SC2046
|
||||||
|
yum localinstall --disablerepo=* --nogpgcheck -y $(find ./tmp -name "*.rpm")
|
||||||
|
|
||||||
|
# Delete the temporary RPM package.
|
||||||
|
rm -rf tmp
|
||||||
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This script will do an offline install of packages that are included in
|
||||||
|
# RPM_TAR_GZS. Every RPM file in the archives will be installed. RPM_TAR_GZS
|
||||||
|
# must be defined and it needs to be a space delimited list of RPM archives.
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
echo "Installing RPMs from archives: $RPM_TAR_GZS"
|
||||||
|
|
||||||
|
# Create a temporary directory where all archives in $RPM_TAR_GZS will be
|
||||||
|
# extracted to.
|
||||||
|
mkdir tmp
|
||||||
|
|
||||||
|
# For each archive in RPM_TAR_GZS extract the contents to ./tmp
|
||||||
|
for ARCHIVE in $RPM_TAR_GZS; do
|
||||||
|
tar xvf "$ARCHIVE" -C tmp/
|
||||||
|
done
|
||||||
|
|
||||||
|
# Perform a localinstall of all the RPMs extracted from RPM_TAR_GZS.
|
||||||
|
#
|
||||||
|
# Ignore the shellshock error about quoting the find, because in this case we
|
||||||
|
# don't the string to split.
|
||||||
|
# shellcheck disable=SC2046
|
||||||
|
yum localinstall --disablerepo=* --nogpgcheck -y $(find ./tmp -name "*.rpm")
|
||||||
|
|
||||||
|
# Delete the temporary RPM packages.
|
||||||
|
rm -rf tmp
|
||||||
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This script will do an offline install of the packages defined by PIP_PKGS and
|
||||||
|
# will install them from the tar.gzs defined by PIP_TARS_GZS. These variables
|
||||||
|
# must be defined and each of them should be a space delimited list.
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
# TODO REM add a way to configure python.
|
||||||
|
echo "Installing packages \"$PIP_PKGS\" from archives: $PIP_TARS_GZS"
|
||||||
|
|
||||||
|
# Create temporary directories.
|
||||||
|
#
|
||||||
|
# Archives defined on PIP_TARS_GZS are extracted to `tmp`. The wheel files are
|
||||||
|
# pulled out of `tmp` and put into `wheels`.
|
||||||
|
mkdir tmp wheels
|
||||||
|
|
||||||
|
# For each archive in PIP_TARS_GZS, extract the contents into tmp.
|
||||||
|
for ARCHIVE in $PIP_TARS_GZS; do
|
||||||
|
tar xvzf "$ARCHIVE" -C tmp/
|
||||||
|
done
|
||||||
|
|
||||||
|
# Find all the `whl` files and copy them into `wheels`.
|
||||||
|
find tmp -name "*.whl" -exec cp {} wheels/ \;
|
||||||
|
|
||||||
|
# Install all packages defined in PIP_PKGS.
|
||||||
|
#
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
pip install \
|
||||||
|
--no-index \
|
||||||
|
--find-links=wheels/ \
|
||||||
|
$PIP_PKGS
|
||||||
|
|
||||||
|
# Remove temporary directories.
|
||||||
|
rm -rf tmp wheels
|
||||||
@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -u
|
||||||
|
|
||||||
|
# This script will do an offline install of python.
|
||||||
|
# It requires that the variable PYTHON_RPMS_TAR_GZ be set to the path of a
|
||||||
|
# tar.gz file containing RPMs for python and it's dependencies.
|
||||||
|
|
||||||
|
PROFILE_D_PYTHON=/etc/profile.d/python-scl.sh
|
||||||
|
|
||||||
|
echo "Installing python from RPMs in $PYTHON_RPMS_TAR_GZ"
|
||||||
|
|
||||||
|
# Create a temporary location to extract the RPMs to.
|
||||||
|
mkdir tmp
|
||||||
|
|
||||||
|
# Extract the RPM tar.gz file to a temporary directory.
|
||||||
|
tar xvzf "$PYTHON_RPMS_TAR_GZ" -C ./tmp
|
||||||
|
|
||||||
|
# Perform a localinstall of all the RPMs extracted from PYTHON_RPMS_TAR_GZ.
|
||||||
|
#
|
||||||
|
# Ignore the shellshock error about quoting the find, because in this case we
|
||||||
|
# don't the string to split.
|
||||||
|
# shellcheck disable=SC2046
|
||||||
|
yum localinstall --disablerepo=* --nogpgcheck -y $(find ./tmp -name "*.rpm")
|
||||||
|
|
||||||
|
# Delete the temporary RPM package.
|
||||||
|
rm -rf tmp
|
||||||
|
|
||||||
|
# Add this to /etc/profile.d so that this python version will get loaded
|
||||||
|
# automatically when a user logs in.
|
||||||
|
echo "source /opt/rh/rh-python38/enable" | tee "$PROFILE_D_PYTHON"
|
||||||
|
chown root:root "$PROFILE_D_PYTHON"
|
||||||
|
chmod 644 "$PROFILE_D_PYTHON"
|
||||||
@ -0,0 +1,4 @@
|
|||||||
|
{{ template_disclaimer }}
|
||||||
|
exclude_paths:
|
||||||
|
- ./ansible_collections
|
||||||
|
- ./roles
|
||||||
@ -0,0 +1,13 @@
|
|||||||
|
{{ template_disclaimer }}
|
||||||
|
[defaults]
|
||||||
|
|
||||||
|
# Installs collections into ./ansible_collections/namespace/collection_name
|
||||||
|
collections_paths = ./
|
||||||
|
|
||||||
|
# Installs roles into ./roles/namespace.rolename
|
||||||
|
roles_path = ./roles
|
||||||
|
|
||||||
|
# Use the YAML callback plugin.
|
||||||
|
stdout_callback = yaml
|
||||||
|
# Use the stdout_callback when running ad-hoc commands.
|
||||||
|
bin_ansible_callbacks = True
|
||||||
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
{{ template_disclaimer }}
|
||||||
|
# Based on ansible-lint config
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
braces:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
level: error
|
||||||
|
brackets:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
level: error
|
||||||
|
colons:
|
||||||
|
max-spaces-after: -1
|
||||||
|
level: error
|
||||||
|
commas:
|
||||||
|
max-spaces-after: -1
|
||||||
|
level: error
|
||||||
|
comments: disable
|
||||||
|
comments-indentation: disable
|
||||||
|
document-start: disable
|
||||||
|
empty-lines:
|
||||||
|
max: 3
|
||||||
|
level: error
|
||||||
|
hyphens:
|
||||||
|
level: error
|
||||||
|
indentation: disable
|
||||||
|
key-duplicates: enable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
new-lines:
|
||||||
|
type: unix
|
||||||
|
{# TODO Add a way to add ignore files. #}
|
||||||
|
{# Maybe we want to add roles all the time? #}
|
||||||
|
{# ignore: | #}
|
||||||
|
{# roles/** #}
|
||||||
@ -0,0 +1,33 @@
|
|||||||
|
To run the tests simply run `make test`.
|
||||||
|
|
||||||
|
Some of the tests require certain environment variables to be set. `make` will
|
||||||
|
set the variables for you. In addition you can set the variables in your local
|
||||||
|
environment by running:
|
||||||
|
```shell
|
||||||
|
eval $(make -s env-vars)
|
||||||
|
```
|
||||||
|
|
||||||
|
This role uses molecule for testing. Each molecule test is called a scenario. To
|
||||||
|
list the scenarios defined for this role run `make list-scenarios`. You can run
|
||||||
|
a single scenario by running `make test-<scenario name>`.
|
||||||
|
|
||||||
|
Some tests rely on an environment variable, `MOLECULE_DOCKER_IMAGES`, to define
|
||||||
|
the platforms for the test. `MOLECULE_DOCKER_IMAGES` will get set automatically
|
||||||
|
by `make`, but can also be overridden by the user. `MOLECULE_DOCKER_IMAGES`
|
||||||
|
should be defined as a space separated list of docker images. The test will be
|
||||||
|
run all the images defined in `MOLECULE_DOCKER_IMAGES` in parallel. You can
|
||||||
|
control the parallelism of the test by setting `CONCURRENT_MOLECULE_PLATFORMS`
|
||||||
|
to the number of platforms you want to test at once. If
|
||||||
|
`CONCURRENT_MOLECULE_PLATFORMS` is less than the total number of
|
||||||
|
`MOLECULE_DOCKER_IMAGES` then the test will keep iterating over
|
||||||
|
`MOLECULE_DOCKER_IMAGES` `CONCURRENT_MOLECULE_PLATFORMS`-at-a-time until all
|
||||||
|
platforms have been tested.
|
||||||
|
|
||||||
|
### Vagrant Tests
|
||||||
|
|
||||||
|
All of the vagrant scenarios (tests) in this collection uses libvirt (KVM/QEMU)
|
||||||
|
for the hypervisor. We are unable to setup a KVM hypervisor in AWS for a
|
||||||
|
reasonable price. Therefore, if you want to run the vagrant scenarios you must
|
||||||
|
have access to a KVM hypervisor. In addition, for some of the tests you will
|
||||||
|
also need access to GitLab and Artifactory which may require the hypervisor be
|
||||||
|
connected to the AWS VPN.
|
||||||
@ -0,0 +1,9 @@
|
|||||||
|
# This file was auto-generated by make. Do not edit this file directly!
|
||||||
|
#
|
||||||
|
# The following templates were used when creating this file:
|
||||||
|
{% for template_file in template_files %}
|
||||||
|
# - {{ template_file }}
|
||||||
|
{% endfor %}
|
||||||
|
#
|
||||||
|
# TODO REM add docs on what file to change for overriding make target or just
|
||||||
|
# adding new repo specific ones.
|
||||||
Loading…
Reference in New Issue