Start automating demo infra provisioning.

This commit is contained in:
2023-03-10 10:24:23 +13:00
parent ee142acc6e
commit 429ce73a1b
16 changed files with 485 additions and 1 deletions

44
.gitignore vendored Normal file
View File

@ -0,0 +1,44 @@
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
# Keys
*.pem
# Inventories
inventory
# Controller config files
*.cfg
# Controller license files
*manifest.zip
# Environment variable files
.env
# Ignore any .tfvars files that are generated automatically for each Terraform run. Most
# .tfvars files are managed as part of configuration and so should be included in
# version control.
#
# example.tfvars
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
#
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*

View File

@ -0,0 +1,36 @@
#+TITLE: Deploying demo infrastructure
#+AUTHOR: James Blair <jablair@redhat.com>
#+DATE: <2023-03-10 Fri 10:15>
This guide will outline the steps to follow to deploy the infrastructure required to run the demo for this talk. Infrastructure provisioning is performed via [[https://www.ansible.com/][ansible]] using the [[https://www.terraform.io/][terraform]] collection.
To run the demo we need one rhel virtual machine, these machines will run our ~microshoft~ kubernetes cluster which will have our ansible automation platform and jira pods deployed.
To get started we need to define some credentials into an ~.env~ file. Note that these credentials are ignored in the repo ~.gitignore~ file for security reasons.
#+NAME: Create secret env file
#+begin_src tmate
cat << EOF > .env
export TF_VAR_subscription_pw=placeholder
export TF_VAR_aws_region=ap-southeast-2
export TF_VAR_aws_access_key=placeholder
export TF_VAR_aws_secret_key=placeholder
EOF
#+end_src
Once secrets have been defined run the code block below to install our dependencies and run the ansible playbook that will deploy our infrastructure.
#+NAME: Install dependencies and run
#+begin_src tmate
# Source secrets
source ../.env
# Install certified terraform collection
ansible-galaxy collection install cloud.terraform
ansible-galaxy collection install awx.awx
# Run the deploy playbook
ansible-playbook -i localhost demo-infra-deploy.yaml
#+end_src

View File

@ -0,0 +1,10 @@
# Application definition
variable "app_name" {
type = string
description = "Application name"
}
variable "app_environment" {
type = string
description = "Application environment"
}

View File

@ -0,0 +1,23 @@
- name: "Configure demo infra"
hosts: all
gather_facts: false
vars:
ansible_host_key_checking: false
tasks:
- name: "Wait for ssh connection"
ansible.builtin.wait_for_connection:
delay: 30
timeout: 600
- name: "Register machine via subscription manager"
community.general.redhat_subscription:
state: present
username: rh-ee-jablair
password: "{{ lookup('env', 'TF_VAR_subscription_pw') }}"
auto_attach: false
- name: "Ensure manage repos set correctly"
ansible.builtin.shell: |
sudo sed -i 's/manage_repos = 0/manage_repos = 1/g' /etc/rhsm/rhsm.conf

View File

@ -0,0 +1,21 @@
- name: "Deploy demo infrastructure"
hosts: localhost
connection: local
gather_facts: true
tasks:
- name: "Ensure required variables exist"
assert:
that:
- "'TF_VAR_aws_region' in ansible_env"
- "'TF_VAR_aws_secret_key' in ansible_env"
- "'TF_VAR_aws_access_key' in ansible_env"
- "'TF_VAR_subscription_pw' in ansible_env"
fail_msg: "Required input variables are not set, refer README.org."
- name: "Apply terraform plan"
cloud.terraform.terraform:
project_path: "{{ playbook_dir }}"
state: present
force_init: true

View File

@ -0,0 +1,18 @@
# Generates a secure private key and encodes it as pem
resource "tls_private_key" "key_pair" {
algorithm = "RSA"
rsa_bits = 4096
}
# Create the Key Pair
resource "aws_key_pair" "key_pair" {
key_name = "${lower(var.app_name)}-${lower(var.app_environment)}-linux-${lower(var.aws_region)}"
public_key = tls_private_key.key_pair.public_key_openssh
}
# Save file
resource "local_file" "ssh_key" {
filename = "${aws_key_pair.key_pair.key_name}.pem"
content = tls_private_key.key_pair.private_key_pem
file_permission = 0600
}

View File

@ -0,0 +1,105 @@
# Create elastic ip for the ec2 instance
resource "aws_eip" "linux-eip" {
count = 4
vpc = true
tags = {
Name = "${lower(var.app_name)}-${var.app_environment}-linux-eip"
Environment = var.app_environment
}
}
# Define the security group for the ec2 instance
resource "aws_security_group" "aws-linux-sg" {
name = "${lower(var.app_name)}-${var.app_environment}-linux-sg"
description = "Allow incoming HTTP connections"
vpc_id = aws_vpc.vpc.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "Allow incoming HTTP connections"
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "Allow incoming HTTPS connections"
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "Allow incoming SSH connections"
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${lower(var.app_name)}-${var.app_environment}-linux-sg"
Environment = var.app_environment
}
}
# Create EC2 Instance
resource "aws_instance" "linux-server" {
count = 1
ami = data.aws_ami.rhel_8_7.id
instance_type = var.linux_instance_type
subnet_id = aws_subnet.public-subnet.id
vpc_security_group_ids = [aws_security_group.aws-linux-sg.id]
associate_public_ip_address = var.linux_associate_public_ip_address
source_dest_check = false
key_name = aws_key_pair.key_pair.key_name
# root disk
root_block_device {
volume_size = var.linux_root_volume_size
volume_type = var.linux_root_volume_type
delete_on_termination = true
encrypted = true
}
# extra disk
ebs_block_device {
device_name = "/dev/xvda"
volume_size = var.linux_data_volume_size
volume_type = var.linux_data_volume_type
encrypted = true
delete_on_termination = true
}
tags = {
Name = "${lower(var.app_name)}-${var.app_environment}-linux-server"
Environment = var.app_environment
}
# Ensure the machine has started with a remote exec
provisioner "remote-exec" {
inline = ["echo hello world"]
connection {
host = self.public_ip
type = "ssh"
user = "ec2-user"
private_key = file(format("%s.%s", self.key_name, "pem"))
}
}
}
# Associate Elastic IP to Linux Server
resource "aws_eip_association" "linux-eip-association" {
count = 1
instance_id = aws_instance.linux-server[count.index].id
allocation_id = aws_eip.linux-eip[count.index].id
}

View File

@ -0,0 +1,15 @@
output "vm_linux_server_instance_id" {
value = aws_instance.linux-server[*].id
}
output "vm_linux_server_instance_public_dns" {
value = aws_instance.linux-server[*].public_dns
}
output "vm_linux_server_instance_public_ip" {
value = aws_eip.linux-eip[*].public_ip
}
output "vm_linux_server_instance_private_ip" {
value = aws_instance.linux-server[*].private_ip
}

View File

@ -0,0 +1,33 @@
variable "linux_instance_type" {
type = string
description = "EC2 instance type for server"
default = "t2.micro"
}
variable "linux_associate_public_ip_address" {
type = bool
description = "Associate a public ip address to the ec2 instance"
default = true
}
variable "linux_root_volume_size" {
type = number
description = "Volume size of root volume of server"
}
variable "linux_data_volume_size" {
type = number
description = "Volume size of data volume of server"
}
variable "linux_root_volume_type" {
type = string
description = "Volume type of root volume of server. Can be standard, gp3, gp2, io1, sc1 or st1"
default = "gp2"
}
variable "linux_data_volume_type" {
type = string
description = "Volume type of data volume of server. Can be standard, gp3, gp2, io1, sc1 or st1"
default = "gp2"
}

View File

@ -0,0 +1,48 @@
# Create the vpc
resource "aws_vpc" "vpc" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
tags = {
Name = "${lower(var.app_name)}-${lower(var.app_environment)}-vpc"
Environment = var.app_environment
}
}
# Define the public subnet
resource "aws_subnet" "public-subnet" {
vpc_id = aws_vpc.vpc.id
cidr_block = var.public_subnet_cidr
availability_zone = var.aws_az
tags = {
Name = "${lower(var.app_name)}-${lower(var.app_environment)}-public-subnet"
Environment = var.app_environment
}
}
# Define the internet gateway
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "${lower(var.app_name)}-${lower(var.app_environment)}-igw"
Environment = var.app_environment
}
}
# Define the public route table
resource "aws_route_table" "public-rt" {
vpc_id = aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "${lower(var.app_name)}-${lower(var.app_environment)}-public-subnet-rt"
Environment = var.app_environment
}
}
# Assign the public route table to the public subnet
resource "aws_route_table_association" "public-rt-association" {
subnet_id = aws_subnet.public-subnet.id
route_table_id = aws_route_table.public-rt.id
}

View File

@ -0,0 +1,20 @@
# Availability zone
variable "aws_az" {
type = string
description = "AWS AZ"
default = "ap-southeast-2a"
}
# Address range for vpc
variable "vpc_cidr" {
type = string
description = "CIDR for the VPC"
default = "10.1.64.0/18"
}
# Subnet variables
variable "public_subnet_cidr" {
type = string
description = "CIDR for the public subnet"
default = "10.1.64.0/24"
}

View File

@ -0,0 +1,15 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}
provider "aws" {
access_key = var.aws_access_key
secret_key = var.aws_secret_key
region = var.aws_region
}

View File

@ -0,0 +1,15 @@
# AWS connection & authentication
variable "aws_access_key" {
type = string
description = "AWS access key"
}
variable "aws_secret_key" {
type = string
description = "AWS secret key"
}
variable "aws_region" {
type = string
description = "AWS region"
}

View File

@ -0,0 +1,53 @@
# RHEL 9.0
data "aws_ami" "rhel_9_0" {
most_recent = true
owners = ["309956199498"] // Red Hat's Account ID
filter {
name = "name"
values = ["RHEL-9.0*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
# RHEL 8.7
data "aws_ami" "rhel_8_7" {
most_recent = true
owners = ["309956199498"] // Red Hat's Account ID
filter {
name = "name"
values = ["RHEL-8.7*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}

View File

@ -0,0 +1,18 @@
# Application definition
app_name = "microshift" # Don't enter any spaces
app_environment = "dev" # Dev, Test, Staging, Prod, etc
# Network ranges
vpc_cidr = "10.11.0.0/16"
public_subnet_cidr = "10.11.1.0/24"
# Aws region to deploy into
aws_region = "ap-southeast-2"
# Linux server variables
linux_instance_type = "t2.large"
linux_associate_public_ip_address = true
linux_root_volume_size = 20
linux_root_volume_type = "gp2"
linux_data_volume_size = 10
linux_data_volume_type = "gp2"

View File

@ -4,4 +4,14 @@
ChatOps is a collaboration model that connects people, tools, process, and automation into a transparent workflow. This flow connects the work needed, the work happening, and the work done in a persistent location staffed by the people, bots, and related tools. ChatOps is a collaboration model that connects people, tools, process, and automation into a transparent workflow. This flow connects the work needed, the work happening, and the work done in a persistent location staffed by the people, bots, and related tools.
This is a short 15 minute lightning talk showcasing how we can leverage the [[https://www.ansible.com/products/automation-platform][Ansible Automation Platform]] Automation [[https://www.ansible.com/products/controller][Controller]] as the engine to power This is a short 15 minute lightning talk showcasing how we can leverage the [[https://www.ansible.com/products/automation-platform][Ansible Automation Platform]] Automation [[https://www.ansible.com/products/controller][Controller]] as the engine to power ChatOps style end to self service workflows in Atlassian Jira.
* Pre-requisites
This talk requires an existing OpenShift 4 cluster to be running and assumes you have permission to deploy workloads to the cluster.
* Deploy jira instance
Our first step is to deploy a containerised instance of Jira server.