Compare commits
251 Commits
69f30da89a
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 7480632312 | |||
| fe4338721a | |||
| 6e96618617 | |||
| ed46e3b3df | |||
| ec0ad02d70 | |||
| a70d5aed9e | |||
| 8c44988865 | |||
| be702fe47d | |||
| bec9d39ba1 | |||
| 52536b2e3e | |||
| 96af899045 | |||
| 1877d9c0ff | |||
| 730a3f5a5b | |||
| 73d7324927 | |||
| a04af136a7 | |||
| 1224f23c88 | |||
| 7947702050 | |||
| ef78bbdfe9 | |||
| 879aaecdcf | |||
| 638c51d539 | |||
| 04d263374a | |||
| a99499e26b | |||
| 4515f9b096 | |||
| ca73036cd3 | |||
| 4a8d8b409b | |||
| 565330ab50 | |||
| 617fc7bdcc | |||
| 0b061fa8b7 | |||
| f30a8af73f | |||
| e9c4fbd5fc | |||
| 3f0c29fd65 | |||
| 381ebf0da9 | |||
| 94ba768ae1 | |||
| 23b5ea24d8 | |||
| f8dcb947fd | |||
| e8b416180e | |||
| 0640f60ae4 | |||
| 0558a0a947 | |||
| ed36707987 | |||
| 4e48cc4f48 | |||
| 318769929a | |||
| 2368711f07 | |||
| 3512aebbb0 | |||
| 3f6495041c | |||
| 11b8154424 | |||
| 0c75128408 | |||
| 0885136ca9 | |||
| 7ee2a55cdc | |||
| d87cb4a04e | |||
| 47aa8c9e4c | |||
| a09f46b7f7 | |||
| e419983e4d | |||
| ef0c2b0845 | |||
| e539ac1e0b | |||
| 867a404dae | |||
| 753707ea26 | |||
| 63089b7a65 | |||
| a96fdab1aa | |||
| b48a372aca | |||
| 8a9e27786c | |||
| 0cf8a70488 | |||
| 8d871fca05 | |||
| 1b4eb57f71 | |||
| 050af1207a | |||
| 5f359384b6 | |||
| a6bd11e25e | |||
| 7871b1ce08 | |||
| d2b26d41c9 | |||
| 26e4a00431 | |||
| 6a38706456 | |||
| 38398e19be | |||
| 79b80c89db | |||
| 1bb9bfe26a | |||
| e85283e927 | |||
| 3b4e38752f | |||
| 319d56642b | |||
| f2b66c2e9a | |||
| 63137af30c | |||
| a0a7dc9dea | |||
| be402a72b2 | |||
| dda02b356a | |||
| b67cf47b46 | |||
| 874177ceba | |||
| 3bdad8e1b6 | |||
| 8cbae32461 | |||
| 82c68c6088 | |||
| 8927163bb0 | |||
| 6d09334b53 | |||
| 43d8a43db6 | |||
| c5f697f48c | |||
| 350abf8e74 | |||
| b5cf0bf9a5 | |||
| 1b8aca79ce | |||
| 71fd6b7e7e | |||
| 5b0671f44a | |||
| d3a93b8d51 | |||
| 12600dab12 | |||
| b92f0cd473 | |||
| 96ebf493f9 | |||
| 0fe8ba04b9 | |||
| 83c84851f4 | |||
| 8bbcc623dc | |||
| fd45c146b6 | |||
| 0ba437ec88 | |||
| 5169cad7ee | |||
| 82c6323d0a | |||
| 3b35f51759 | |||
| 8b5040d6d5 | |||
| b5a1dad1e2 | |||
| fa43603ab0 | |||
| 9c0881ff5f | |||
| e02211f781 | |||
| ca517645fe | |||
| c65aa7803a | |||
| a8d12e1371 | |||
| 66f2bc4259 | |||
| 147f547bda | |||
| 5d6d8a5412 | |||
| c74a394845 | |||
| 21d1ad0cfa | |||
| 2de0461379 | |||
| a407ffcc8e | |||
| ca9a65adf8 | |||
| 76e71a9b1c | |||
| 89a9ba3e66 | |||
| f2c3d64381 | |||
| 466b75a4bc | |||
| e5ed8820f6 | |||
| eb73dd17d5 | |||
| ce5c3b1cff | |||
| 3171641aa8 | |||
| 9e0f699449 | |||
| 2c0df4bf64 | |||
| 5d545d3dcd | |||
| a8697c2fc2 | |||
| dde817962b | |||
| 4951289a4e | |||
| f19fb8cbf8 | |||
| 671b3adc99 | |||
| bc5dc9e835 | |||
| 7004cc535c | |||
| 4f6ca41ee0 | |||
| 24968e0ec2 | |||
| a1f6624930 | |||
| 9769fefa79 | |||
| 4f225728ba | |||
| 4427dc635e | |||
| af6471c905 | |||
| ac3e2c4f24 | |||
| a63205d940 | |||
| 938513607f | |||
| 0943e8182b | |||
| 36840753c0 | |||
| 679efe5bda | |||
| f9c7fbcf26 | |||
| bbf57b4ff3 | |||
| 522b9dd5ff | |||
| 0248e74d55 | |||
| 7dc75b5106 | |||
| 8611d0ae84 | |||
| 0f82c9f76e | |||
| 73615265dd | |||
| e6eda458c0 | |||
| 4f33509757 | |||
| 872fe4cb1e | |||
| 87ea4f6d10 | |||
| 1beb008be4 | |||
| eef0531232 | |||
| 8ce152bb6b | |||
| b960cfed77 | |||
| a30d8b5951 | |||
| 53c78e39fc | |||
| af8990759a | |||
| e8eeb281cf | |||
| de5c88b458 | |||
| 19fb31c917 | |||
| dd6556fdef | |||
| ba30dd5ea3 | |||
| 5fe2185b02 | |||
| 2e1a057483 | |||
| a238cf3a2f | |||
| eedb480174 | |||
| 4ee286e015 | |||
| 7d6e58db36 | |||
| d99413ca65 | |||
| e256f1863d | |||
| 5e0c52395a | |||
| 530df69d94 | |||
| f26d064d40 | |||
| 740d9b2a34 | |||
| 8a4191ebeb | |||
| e414959f54 | |||
| 03a44eb4ee | |||
| 40c54867c1 | |||
| 1181559043 | |||
| 316746d968 | |||
| ac89205adc | |||
| f6465348f7 | |||
| 5e0d4340c0 | |||
| 046d487abc | |||
| de975f102f | |||
| 58697eb7ea | |||
| 5ecf6cc64d | |||
| 95a250701c | |||
| 17552ceda7 | |||
| 40d4135e72 | |||
| 969d5c4e84 | |||
| ecb8c2d04f | |||
| 1136c8912a | |||
| c392a31a76 | |||
| 64f6520d65 | |||
| 5367a710f0 | |||
| fbc0e1f20e | |||
| 594a18b54c | |||
| a42bea93ae | |||
| 21017c7d12 | |||
| 73ebf57e43 | |||
| ae538c4ea9 | |||
| 814b308637 | |||
| 67a5514cd4 | |||
| 9755604d51 | |||
| 0753676c9a | |||
| e2514583c4 | |||
| 12c5ea4c9a | |||
| 2d74d995c5 | |||
| b26e07fa59 | |||
| e0765a831f | |||
| 372524a6de | |||
| e17f1f2367 | |||
| 666aa64a1b | |||
| 1ab50f8f87 | |||
| d2876f2e17 | |||
| 8aa8f8c551 | |||
| 0ac91e748d | |||
| 3e2fc9b371 | |||
| ed62fc36e3 | |||
| 6b85837712 | |||
| 62fbf03aad | |||
| 94d3c5ea5c | |||
| 9b5a92f77a | |||
| c0711870af | |||
| a43b6174e8 | |||
| f9ceb402da | |||
| f93b5b19f8 | |||
| a7c0413f69 | |||
| 96b833f57a | |||
| 3183aff512 | |||
| 26c51a5dbe | |||
| 4cf16bcf8f | |||
| 8fecbe71f5 | |||
| b6cbaebf02 |
3
.eslintrc.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"extends": "next/core-web-vitals"
|
||||
}
|
||||
13
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: /
|
||||
schedule:
|
||||
interval: monthly
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: monthly
|
||||
23
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
name: Build static site
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "npm"
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install
|
||||
|
||||
- name: Build with next.js
|
||||
run: npm run build
|
||||
72
.github/workflows/nextjs.yml
vendored
@ -1,18 +1,11 @@
|
||||
# Sample workflow for building and deploying a Next.js site to GitHub Pages
|
||||
#
|
||||
# To get started with Next.js see: https://nextjs.org/docs/getting-started
|
||||
#
|
||||
---
|
||||
name: Deploy Next.js site to Pages
|
||||
|
||||
on:
|
||||
# Runs on pushes targeting the default branch
|
||||
push:
|
||||
branches: ["main"]
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to gitHub pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
@ -25,44 +18,22 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Detect package manager
|
||||
id: detect-package-manager
|
||||
run: |
|
||||
if [ -f "${{ github.workspace }}/yarn.lock" ]; then
|
||||
echo "manager=yarn" >> $GITHUB_OUTPUT
|
||||
echo "command=install" >> $GITHUB_OUTPUT
|
||||
echo "runner=yarn" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
elif [ -f "${{ github.workspace }}/package.json" ]; then
|
||||
echo "manager=npm" >> $GITHUB_OUTPUT
|
||||
echo "command=ci" >> $GITHUB_OUTPUT
|
||||
echo "runner=npx --no-install" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
else
|
||||
echo "Unable to determine package manager"
|
||||
exit 1
|
||||
fi
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "16"
|
||||
cache: ${{ steps.detect-package-manager.outputs.manager }}
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
with:
|
||||
# Automatically inject basePath in your Next.js configuration file and disable
|
||||
# server side image optimization (https://nextjs.org/docs/api-reference/next/image#unoptimized).
|
||||
#
|
||||
# You may remove this line if you want to manage the configuration yourself.
|
||||
static_site_generator: next
|
||||
node-version: "20"
|
||||
cache: "npm"
|
||||
|
||||
- name: Setup pages
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Restore cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.next/cache
|
||||
@ -71,20 +42,21 @@ jobs:
|
||||
# If source files changed but packages didn't, rebuild from a prior cache.
|
||||
restore-keys: |
|
||||
${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json', '**/yarn.lock') }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: ${{ steps.detect-package-manager.outputs.manager }} ${{ steps.detect-package-manager.outputs.command }}
|
||||
- name: Build with Next.js
|
||||
run: ${{ steps.detect-package-manager.outputs.runner }} next build
|
||||
- name: Static HTML export with Next.js
|
||||
run: ${{ steps.detect-package-manager.outputs.runner }} next export
|
||||
run: npm install
|
||||
|
||||
- name: Build with next.js
|
||||
run: npm run build
|
||||
|
||||
- name: nojekyll
|
||||
run: touch ./out/.nojekyll
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v2
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./out
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
@ -92,6 +64,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
- name: Deploy to gitHub pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
4
LICENSE
@ -208,7 +208,7 @@ If you develop a new program, and you want it to be of the greatest possible use
|
||||
|
||||
To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.
|
||||
|
||||
ocp-app-delivery-workshop
|
||||
workshops
|
||||
Copyright (C) 2023 jmhbnz
|
||||
|
||||
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
|
||||
@ -221,7 +221,7 @@ Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:
|
||||
|
||||
ocp-app-delivery-workshop Copyright (C) 2023 jmhbnz
|
||||
workshops Copyright (C) 2023 jmhbnz
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.
|
||||
|
||||
|
||||
11
README.org
@ -1,11 +1,10 @@
|
||||
#+TITLE: OpenShift Application Delivery Workshop
|
||||
#+TITLE: Workshops
|
||||
#+AUTHOR: James Blair
|
||||
#+DATE: <2023-12-04 Mon>
|
||||
|
||||
This repository contains a basic [[https://nextjs.org/][nextjs]] frontend designed to be exported as a static site and served via [[https://pages.github.com/][github pages]].
|
||||
|
||||
The frontend contains a walkthrough for workshop designed to be run over three hours covering basic application delivery, monitoring and security on OpenShift. The workshop content is tailored towards participants that are new to containers and kubernetes and will not be developing applications but will instead be expected to deliver existing applications onto OpenShift.
|
||||
This mono repo contains a basic [[https://nextjs.org/][nextjs]] frontend designed to be exported as a static site and served via [[https://pages.github.com/][github pages]].
|
||||
|
||||
The frontend is used to serve workshop instructions for custom hands on workshops I have created.
|
||||
|
||||
** Local development
|
||||
|
||||
@ -16,7 +15,7 @@ To set up a local development environment run the following:
|
||||
npm install
|
||||
|
||||
# Build and serve the site
|
||||
npm run build && npm run serve
|
||||
npm run build && npm run dev
|
||||
#+end_src
|
||||
|
||||
|
||||
@ -29,5 +28,5 @@ To export the site to static html to serve for example via github pages, run:
|
||||
npm install
|
||||
|
||||
# Build and export the site
|
||||
npm run build && npm export
|
||||
npm run build
|
||||
#+end_src
|
||||
|
||||
@ -10,7 +10,7 @@ const GAScript = () => {
|
||||
src={`https://www.googletagmanager.com/gtag/js?id=${siteMetadata.analytics.googleAnalyticsId}`}
|
||||
/>
|
||||
|
||||
<Script strategy="lazyOnload">
|
||||
<Script id="GoogleAnalytics" strategy="lazyOnload">
|
||||
{`
|
||||
window.dataLayer = window.dataLayer || [];
|
||||
function gtag(){dataLayer.push(arguments);}
|
||||
|
||||
@ -10,7 +10,7 @@ const PlausibleScript = () => {
|
||||
data-domain={siteMetadata.analytics.plausibleDataDomain}
|
||||
src="https://plausible.io/js/plausible.js"
|
||||
/>
|
||||
<Script strategy="lazyOnload">
|
||||
<Script id="PlausibleAnalytics" strategy="lazyOnload">
|
||||
{`
|
||||
window.plausible = window.plausible || function() { (window.plausible.q = window.plausible.q || []).push(arguments) }
|
||||
`}
|
||||
|
||||
@ -3,7 +3,7 @@ import Script from 'next/script'
|
||||
const SimpleAnalyticsScript = () => {
|
||||
return (
|
||||
<>
|
||||
<Script strategy="lazyOnload">
|
||||
<Script id="SimpleAnalytics" strategy="lazyOnload">
|
||||
{`
|
||||
window.sa_event=window.sa_event||function(){var a=[].slice.call(arguments);window.sa_event.q?window.sa_event.q.push(a):window.sa_event.q=[a]};
|
||||
`}
|
||||
|
||||
106
data/app-delivery/README.org
Normal file
@ -0,0 +1,106 @@
|
||||
#+TITLE: OpenShift Workshops
|
||||
#+AUTHOR: James Blair
|
||||
#+DATE: <2024-07-24 Wed>
|
||||
|
||||
This repository contains a basic [[https://nextjs.org/][nextjs]] frontend designed to be exported as a static site and served via [[https://pages.github.com/][github pages]].
|
||||
|
||||
The frontend is used to serve workshop instructions for several workshops.
|
||||
|
||||
|
||||
* Setting up a cluster for the workshop
|
||||
|
||||
The workshop expects an OpenShift 4.14 cluster with a few pre-requisites.
|
||||
|
||||
** Add redhat-cop helm chart repository
|
||||
|
||||
Required so the Gitea helm chart will be available for all users.
|
||||
|
||||
#+begin_src bash
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: helm.openshift.io/v1beta1
|
||||
kind: HelmChartRepository
|
||||
metadata:
|
||||
name: redhat-cop
|
||||
spec:
|
||||
connectionConfig:
|
||||
url: 'https://redhat-cop.github.io/helm-charts'
|
||||
name: Red Hat Community
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
** Install web terminal operator
|
||||
|
||||
So our workshop participants don't need to install ~oc~ locally.
|
||||
|
||||
#+begin_src bash
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: web-terminal
|
||||
namespace: openshift-operators
|
||||
spec:
|
||||
channel: fast
|
||||
installPlanApproval: Automatic
|
||||
name: web-terminal
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
** Create an operatorgroup for each user
|
||||
|
||||
We want each user to be able to install the same operator so we can pre-create namespaces and seed them with OperatorGroups to reduce complexity.
|
||||
|
||||
#+begin_src bash
|
||||
cat << EOF > operatorgroup.yaml
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: grafana-og
|
||||
spec:
|
||||
targetNamespaces:
|
||||
- user$user
|
||||
upgradeStrategy: Default
|
||||
EOF
|
||||
|
||||
for user in $(seq 1 30); do
|
||||
export user=${user}
|
||||
oc new-project user"${user}"
|
||||
envsubst < operatorgroup.yaml | oc create --filename - --namespace user"${user}"
|
||||
oc adm policy add-role-to-user --namespace user"${user}" admin user"${user}"
|
||||
done
|
||||
#+end_src
|
||||
|
||||
|
||||
** Install openshift gitops operator
|
||||
|
||||
Each user will deploy their own argocd instance so we need to install the openshift gitops operator for all namespaces.
|
||||
|
||||
#+begin_src bash
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: openshift-gitops-operator
|
||||
namespace: openshift-gitops-operator
|
||||
spec:
|
||||
channel: latest
|
||||
installPlanApproval: Automatic
|
||||
name: openshift-gitops-operator
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: openshift-gitops-operator.v1.13.0
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
** Scale cluster worker nodes
|
||||
|
||||
We are going to have ~25 workshop attendees all deploying applications, let's ensure the cluster has enough capacity to handle it!
|
||||
|
||||
#+begin_src bash
|
||||
oc scale machineset cluster-xxz98-mk8x7-worker-ap-southeast-1b -n openshift-machine-api --replicas 10
|
||||
#+end_src
|
||||
182
data/app-delivery/exercise1.mdx
Normal file
@ -0,0 +1,182 @@
|
||||
---
|
||||
title: Getting familiar with OpenShift
|
||||
exercise: 1
|
||||
date: '2023-12-04'
|
||||
tags: ['openshift','containers','kubernetes']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "In this first exercise we'll get familiar with OpenShift."
|
||||
---
|
||||
|
||||
Red Hat [OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. In this first excercise we'll get logged into our cluster and familarise ourselves with the OpenShift web console and web terminal.
|
||||
|
||||
The OpenShift Container Platform web console is a feature-rich user interface with both an **Administrator** perspective and a **Developer** perspective accessible through any modern web browser. You can use the web console to visualize, browse, and manage your OpenShift cluster and the applications running on it.
|
||||
|
||||
In addition to the web console, OpenShift includes command line tools to provide users with a nice interface to work with applications deployed to the platform. The `oc` command line tool is available for Linux, macOS or Windows.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
## 1.1 - Login to lab environment
|
||||
|
||||
An OpenShift `4.16` cluster has already been provisioned for you to complete these excercises. Open your web browser and navigate to the workshop login page https://demo.redhat.com/workshop/qrz23h.
|
||||
|
||||
Once the page loads you can login with the details provided by your workshop facilitator.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop login page* |
|
||||
</Zoom>
|
||||
|
||||
## 1.2 - Login to the cluster web console
|
||||
|
||||
Once you're logged into the lab environnment we can open up the OpenShift web console and login with the credentials provided.
|
||||
|
||||
When first logging in you will be prompted to take a tour of the **Developer** console view, let's do that now.
|
||||
|
||||
<Zoom>
|
||||
|  |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Developer perspective web console tour* |
|
||||
</Zoom>
|
||||
|
||||
## 1.3 - Understanding projects
|
||||
|
||||
[Projects](https://docs.openshift.com/container-platform/4.16/applications/projects/working-with-projects.html) are a logical boundary to help you organize your applications. An OpenShift project allows a community of users (or a single user) to organize and manage their work in isolation from other projects.
|
||||
|
||||
Each project has its own resources, role based access control (who can or cannot perform actions), and constraints (quotas and limits on resources, etc).
|
||||
|
||||
Projects act as a "wrapper" around all the application services you (or your teams) are using for your work.
|
||||
|
||||
In this lab environment, you already have access to single project: `userX` (Where X is the number of your user allocted for the workshop from the previous step.)
|
||||
|
||||
Let's click into our `Project` from the left hand panel of the **Developer** web console perspective. We should be able to see that our project has no `Deployments` and there are no compute cpu or memory resources currently being consumed.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Developer perspective project view* |
|
||||
</Zoom>
|
||||
|
||||
## 1.4 - Switching between perspectives
|
||||
|
||||
Different roles have different needs when it comes to viewing details within the OpenShift web console. At the top of the left navigation menu, you can toggle between the Administrator perspective and the Developer perspective.
|
||||
|
||||
Select **Administrator** to switch to the Administrator perspective.
|
||||
|
||||
Once the Administrator perspective loads, you should be in the "Home" view and see a wider array of menu sections in the left hand navigation panel.
|
||||
|
||||
Switch back to the **Developer** perspective. Once the Developer perspective loads, select the **Topology** view.
|
||||
|
||||
Right now, there are no applications or components to view in your `userX` project, but once you begin working on the lab, you’ll be able to visualize and interact with the components in your application here.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Switching web console perspectives* |
|
||||
</Zoom>
|
||||
|
||||
## 1.5 - Launching a web terminal
|
||||
|
||||
While web interfaces are comfortable and easy to use, sometimes we want to quickly run more advanced commands to get things done. That is where the `oc` command line utility comes in.
|
||||
|
||||
One handy feature of the OpenShift web console is we can launch a web terminal that will create a browser based terminal that already has the `oc` command logged in and ready to use.
|
||||
|
||||
Let's launch a web terminal now by clicking the terminal button in the top right hand corner and then clicking **Start** with our `userX` project selected.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Launching your web terminal* |
|
||||
</Zoom>
|
||||
|
||||
## 1.6 - Running oc commands
|
||||
|
||||
The [`oc` command line utility](https://docs.openshift.com/container-platform/4.16/cli_reference/openshift_cli/getting-started-cli.html) is a superset of the upstream kubernetes `kubectl` command line utility. This means it can do everything that `kubectl` can do, plus some additional OpenShift specific commands.
|
||||
|
||||
Let's try a few commands now:
|
||||
|
||||
### Checking our current project
|
||||
|
||||
Most actions we take in OpenShift will be in relation to a particular project. We can check which project we are currently actively using by running the `oc project` command.
|
||||
|
||||
We should see output similar to below showing we are currently using our `userX` project:
|
||||
|
||||
```bash
|
||||
bash-4.4 ~ $ oc project
|
||||
Using project "user1" from context named "user1-context" on server "https://172.31.0.1:443".
|
||||
```
|
||||
|
||||
### Getting help and explaining concepts
|
||||
|
||||
As with any command line utility, there can be complexity that quickly surfaces. Thankfully the `oc` command line utility has excellent built in help.
|
||||
|
||||
Let's take a look at that now.
|
||||
|
||||
To get an understanding of all the options available, try running `oc help`. You should see options similar to the below sample:
|
||||
|
||||
```text
|
||||
bash-4.4 ~ $ oc help
|
||||
OpenShift Client
|
||||
|
||||
This client helps you develop, build, deploy, and run your applications on any
|
||||
OpenShift or Kubernetes cluster. It also includes the administrative
|
||||
commands for managing a cluster under the 'adm' subcommand.
|
||||
|
||||
Basic Commands:
|
||||
login Log in to a server
|
||||
new-project Request a new project
|
||||
new-app Create a new application
|
||||
status Show an overview of the current project
|
||||
project Switch to another project
|
||||
projects Display existing projects
|
||||
explain Get documentation for a resource
|
||||
|
||||
Build and Deploy Commands:
|
||||
rollout Manage a Kubernetes deployment or OpenShift deployment config
|
||||
rollback Revert part of an application back to a previous deployment
|
||||
new-build Create a new build configuration
|
||||
start-build Start a new build
|
||||
cancel-build Cancel running, pending, or new builds
|
||||
import-image Import images from a container image registry
|
||||
tag Tag existing images into image streams
|
||||
|
||||
```
|
||||
|
||||
To get a more detailed explanataion about a specific concept we can use the `oc explain` command.
|
||||
|
||||
Let's run `oc explain project` now to learn more about the concept of a project we introduced earlier:
|
||||
|
||||
```text
|
||||
bash-4.4 ~ $ oc explain project
|
||||
KIND: Project
|
||||
VERSION: project.openshift.io/v1
|
||||
|
||||
DESCRIPTION:
|
||||
Projects are the unit of isolation and collaboration in OpenShift. A
|
||||
project has one or more members, a quota on the resources that the project
|
||||
may consume, and the security controls on the resources in the project.
|
||||
Within a project, members may have different roles - project administrators
|
||||
can set membership, editors can create and manage the resources, and
|
||||
viewers can see but not access running containers. In a normal cluster
|
||||
project administrators are not able to alter their quotas - that is
|
||||
restricted to cluster administrators.
|
||||
|
||||
Listing or watching projects will return only projects the user has the
|
||||
reader role on.
|
||||
|
||||
An OpenShift project is an alternative representation of a Kubernetes
|
||||
namespace. Projects are exposed as editable to end users while namespaces
|
||||
are not. Direct creation of a project is typically restricted to
|
||||
administrators, while end users should use the requestproject resource.
|
||||
```
|
||||
|
||||
That's a quick introduction to the `oc` command line utility. Let's close our web terminal now so we can move on to the next excercise.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Closing your web terminal* |
|
||||
</Zoom>
|
||||
|
||||
Well done, you're now ready to move on to Exercise 2 and deploy an application! 🎉
|
||||
123
data/app-delivery/exercise2.mdx
Normal file
@ -0,0 +1,123 @@
|
||||
---
|
||||
title: Deploying your first application
|
||||
exercise: 2
|
||||
date: '2023-12-05'
|
||||
tags: ['openshift','containers','kubernetes','deployments','images']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to deploy your first app!"
|
||||
---
|
||||
|
||||
Now that we have had a tour of the OpenShift web console to get familiar, let's use the web console to deploy our first application.
|
||||
|
||||
Let’s start by doing the simplest thing possible - get a plain old [OCI](https://opencontainers.org) format container image to run on OpenShift. This is incredibly simple to do. With OpenShift it can be done directly from the web console.
|
||||
|
||||
Before we begin, if you would like a bit more background on what a container is or why they are important click the following link to learn more: https://www.redhat.com/en/topics/containers#overview
|
||||
|
||||
## 2.1 - Deploying the container image
|
||||
|
||||
In this exercise, we’re going to deploy the **web** component of the ParksMap application which uses OpenShift's service discovery mechanism to discover any accompanying backend services deployed and shows their data on the map. Below is a visual overview of the complete ParksMap application.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ParksMap application architecture* |
|
||||
</Zoom>
|
||||
|
||||
Within the **Developer** perspective, click the **+Add** entry on the left hand menu.
|
||||
|
||||
Once on the **+Add** page, click **Container images** to open a dialog that will allow you to quickly deploy an image.
|
||||
|
||||
In the **Image name** field enter the following:
|
||||
|
||||
```text
|
||||
quay.io/openshiftroadshow/parksmap:latest
|
||||
```
|
||||
|
||||
Leave all other fields at their defaults (but take your time to scroll down and review each one to familiarise yourself! 🎓)
|
||||
|
||||
Click **Create** to deploy the application.
|
||||
|
||||
OpenShift will create a [`Deployment`](https://docs.openshift.com/container-platform/4.16/applications/deployments/what-deployments-are.html) that will pull this container image if it does not exist already on the cluster and create a [`Pod`](https://docs.openshift.com/container-platform/4.16/nodes/pods/nodes-pods-using.html) that our container will run inside. You will be taken back to the **Topology** view in the **Developer** perspective which will show the new "Parksmap" application.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying the container image* |
|
||||
</Zoom>
|
||||
|
||||
## 2.2 - Reviewing our deployed application
|
||||
|
||||
If you click on the **parksmap** entry in the **Topology** view, you will see some information about that deployed application.
|
||||
|
||||
The **Resources** tab may be displayed by default. If so, click on the **Details** tab. On that tab, you will see that there is a single `Pod` that was created by your actions.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying the container image* |
|
||||
</Zoom>
|
||||
|
||||
> Note: A pod is the smallest deployable unit in Kubernetes and is effectively a grouping of one or more individual containers. Any containers deployed within a pod are guaranteed to run on the same machine. It is very common for pods in kubernetes to only hold a single container, although sometimes auxiliary services can be included as additional containers in a pod when we want them to run alongside our application container.
|
||||
|
||||
## 2.2 - Accessing the application
|
||||
|
||||
Now that we have the ParksMap application deployed. How do we access it??
|
||||
|
||||
This is where OpenShift [`Routes`](https://docs.openshift.com/container-platform/4.16/networking/routes/route-configuration.html) and [`Services`](https://docs.openshift.com/container-platform/4.16/rest_api/network_apis/service-v1.html) come in.
|
||||
|
||||
While **Services** provide internal abstraction and load balancing within an OpenShift cluster, sometimes clients outside of the OpenShift cluster need to access an application. The way that external clients are able to access applications running in OpenShift is through an OpenShift **Route**.
|
||||
|
||||
You may remember that when we deployed the ParksMap application, there was a checkbox ticked to automatically create a **Route**. Thanks to this, all we need to do to access the application is go the **Resources** tab of the application details pane and click the url shown under the **Routes** header.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Opening ParksMap application Route* |
|
||||
</Zoom>
|
||||
|
||||
Clicking the link you should now see the ParksMap application frontend 🎉
|
||||
|
||||
> Note: If this is the first time opening this page, the browser will ask permission to get your position. This is needed by the Frontend app to center the world map to your location, if you don’t allow it, it will just use a default location.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ParksMap application frontend* |
|
||||
</Zoom>
|
||||
|
||||
## 2.3 - Checking application logs
|
||||
|
||||
If we deploy an application and something isn't working the way we expect, reviewing the application logs can often be helpful. OpenShift includes built in support for reviewing application logs.
|
||||
|
||||
Let's try it now for our ParksMap frontend.
|
||||
|
||||
In the **Developer** perspective, open the **Topology** view.
|
||||
|
||||
Click your "Parksmap" application icon then click on the **Resources** tab.
|
||||
|
||||
From the **Resources** tab click **View logs**
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Accessing the ParksMap application logs* |
|
||||
</Zoom>
|
||||
|
||||
## 2.4 - Checking application resource usage
|
||||
|
||||
Another essential element of supporting applications on OpenShift is understanding what resources the application is consuming, for example cpu, memory, network bandwidth and storage io.
|
||||
|
||||
OpenShift includes built in support for reviewing application resource usage. Let's take a look at that now.
|
||||
|
||||
In the **Developer** perspective, open the **Observe** view. You should see the **Dashboard** tab.
|
||||
|
||||
How much cpu and memory is your ParksMap application currently using?
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Checking the ParksMap application resource usage* |
|
||||
</Zoom>
|
||||
|
||||
You've finished exercise 2, awesome! 🎉
|
||||
117
data/app-delivery/exercise3.mdx
Normal file
@ -0,0 +1,117 @@
|
||||
---
|
||||
title: Scaling and self-healing applications
|
||||
exercise: 3
|
||||
date: '2023-12-06'
|
||||
tags: ['openshift','containers','kubernetes','deployments','autoscaling']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's scale our application up 📈"
|
||||
---
|
||||
|
||||
We have our application deployed, let's scale it up to make sure it will be resilient to failures.
|
||||
|
||||
While **Services** provide discovery and load balancing for **Pods**, the higher level **Deployment** resource specifies how many replicas (pods) of our application will be created and is a simplistic way to configure scaling for the application.
|
||||
|
||||
> Note: To learn more about **Deployments** refer to this [documentation](https://docs.openshift.com/container-platform/4.16/applications/deployments/what-deployments-are.html).
|
||||
|
||||
## 3.1 - Reviewing the parksmap deployment
|
||||
|
||||
Let's start by confirming how many `replicas` we currently specify for our ParksMap application. We'll also use this exercise step to take a look at how all resources within OpenShift can be viewed and managed as [YAML](https://www.redhat.com/en/topics/automation/what-is-yaml) formatted text files which is extremely useful for more advanced automation and GitOps concepts.
|
||||
|
||||
Start in the **Topology** view of the **Developer** perspective.
|
||||
|
||||
Click on your "Parksmap" application icon and click on the **D parksmap** deployment name at the top of the right hand panel.
|
||||
|
||||
From the **Deployment details** view we can click on the **YAML** tab and scroll down to confirm that we only specify `1` replica for the ParksMap application currently.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
replicas: 1
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ParksMap application deployment replicas* |
|
||||
</Zoom>
|
||||
|
||||
## 3.2 - Intentionally crashing the application
|
||||
|
||||
With our ParksMap application only having one pod replica currently it will not be tolerant to failures. OpenShift will automatically restart the single pod if it encounters a failure, however during the time the application pod takes to start back up our users will not be able to access the application.
|
||||
|
||||
Let's see that in practice by intentionally causing an error in our application.
|
||||
|
||||
Start in the **Topology** view of the **Developer** perspective and click your Parksmap application icon.
|
||||
|
||||
In the **Resources** tab of the information pane open a second browser tab showing the ParksMap application **Route** that we explored in the previous exercise. The application should be running as normal.
|
||||
|
||||
Click on the pod name under the **Pods** header of the **Resources** tab and then click on the **Terminal** tab. This will open a terminal within our running ParksMap application container.
|
||||
|
||||
Inside the terminal run the following to intentionally crash the application:
|
||||
|
||||
```bash
|
||||
kill 1
|
||||
```
|
||||
|
||||
The pod will automatically be restarted by OpenShift however if you refresh your second browser tab with the application **Route** you should be able to see the application is momentarily unavailable.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Intentionally crashing the ParksMap application* |
|
||||
</Zoom>
|
||||
|
||||
## 3.3 - Scaling up the application
|
||||
|
||||
As a best practice, wherever possible we should try to run multiple replicas of our pods so that if one pod is unavailable our application will continue to be available to users.
|
||||
|
||||
Let's scale up our application and confirm it is now fault tolerant.
|
||||
|
||||
In the **Topology** view of the **Developer** perspective click your Parksmap application icon.
|
||||
|
||||
In the **Details** tab of the information pane click the **^ Increase the pod count** arrow to increase our replicas to `2`. You will see the second pod starting up and becoming ready.
|
||||
|
||||
> Note: You can also scale the replicas of a deployment in automated and event driven fashions in response to factors like incoming traffic or resource consumption, or by using the `oc` cli for example `oc scale --replicas=2 deployment/parksmap`.
|
||||
|
||||
Once the new pod is ready, repeat the steps from task `3.2` to crash one of the pods. You should see that the application continues to serve traffic thanks to our OpenShift **Service** load balancing traffic to the second **Pod**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Scaling up the ParksMap application* |
|
||||
</Zoom>
|
||||
|
||||
## 3.4 - Self healing to desired state
|
||||
|
||||
In the previous example we saw what happened when we intentionally crashed our application. Let's see what happens if we just outright delete one of our ParksMap applications two **Pods**.
|
||||
|
||||
For this step we'll use the `oc` command line utility to build some more familiarity.
|
||||
|
||||
Let's start by launching back into our web terminal now by clicking the terminal button in the top right hand corner and then clicking **Start** with our `userX` project selected.
|
||||
|
||||
Once our terminal opens let's check our list of **Pods** with `oc get pods`. You should see something similar to the output below:
|
||||
|
||||
```bash
|
||||
bash-4.4 ~ $ oc get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
parksmap-ff7477dc4-2nxd2 1/1 Running 0 79s
|
||||
parksmap-ff7477dc4-n26jl 1/1 Running 0 31m
|
||||
workspace45c88f4d4f2b4885-74b6d4898f-57dgh 2/2 Running 0 108s
|
||||
```
|
||||
|
||||
Copy one of the pod names and delete it via `oc delete pods <podname>`, i.e `oc delete pod parksmap-ff7477dc4-2nxd2`.
|
||||
|
||||
```bash
|
||||
bash-4.4 ~ $ oc delete pod parksmap-ff7477dc4-2nxd2
|
||||
pod "parksmap-ff7477dc4-2nxd2" deleted
|
||||
```
|
||||
|
||||
If we now run `oc get pods` again we will see a new **Pod** has automatically been created by OpenShift to replace the one we fully deleted. This is because OpenShift is a container orchestration engine that will always try and enforce the desired state that we declare.
|
||||
|
||||
In our ParksMap **Deployment** we have declared we always want two replicas of our application running at all times. Even if we (possibly accidentally) delete one, OpenShift will always attempt to self heal to return to our desired state.
|
||||
|
||||
## 3.5 - Bonus objective: Autoscaling
|
||||
|
||||
Before moving on feel free to take a moment to review the concepts of [HorizontalPodAutoscaling](https://docs.openshift.com/container-platform/4.16/nodes/pods/nodes-pods-autoscaling.html), [VerticalPodAutoscaling](https://docs.openshift.com/container-platform/4.16/nodes/pods/nodes-pods-vertical-autoscaler.html) and [Cluster autoscaling](https://docs.openshift.com/container-platform/4.16/machine_management/applying-autoscaling.html).
|
||||
|
||||
Well done, you've finished exercise 3! 🎉
|
||||
133
data/app-delivery/exercise4.mdx
Normal file
@ -0,0 +1,133 @@
|
||||
---
|
||||
title: Deploying an application via helm chart
|
||||
exercise: 4
|
||||
date: '2023-12-06'
|
||||
tags: ['openshift','containers','kubernetes','deployments','helm']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Exploring alternative deployment approaches."
|
||||
---
|
||||
|
||||
In **Exercise 2** we deployed our ParksMap application in the most simplistic way. Just throwing an individual container image at the cluster via the web console and letting OpenShift automate everything else for us.
|
||||
|
||||
With more complex applications comes the need to more finely customise the details of our application **Deployments** along with any other associated resources the application requires.
|
||||
|
||||
Enter the [**Helm**](https://www.redhat.com/en/topics/devops/what-is-helm) project, which can package up our application resources and distribute them as something called a **Helm chart**.
|
||||
|
||||
In simple terms, a **Helm chart** is basically a directory containing a collection of YAML template files, which is zipped into an archive. However the `helm` command line utility has a lot of additional features and is good for customising and overriding specific values in our application templates when we deploy them onto our cluster as well as easily deploying, upgrading or rolling back our application.
|
||||
|
||||
## 4.1 - Deploying a helm chart via the web console
|
||||
|
||||
It is common for organisations that produce and ship applications to provide their applications to organisations as a **Helm chart**.
|
||||
|
||||
Let's get started by deploying a **Helm chart** for the [Gitea](https://about.gitea.com) application which is a git oriented devops platform similar to GitHub or GitLab.
|
||||
|
||||
Start in the **+Add** view of the **Developer** perspective.
|
||||
|
||||
Scroll down and click the **Helm chart** tile. OpenShift includes a visual catalog for any helm chart repositories your cluster has available, for this exercise we will search for **Gitea**.
|
||||
|
||||
Click on the search result and click **Create**.
|
||||
|
||||
In the YAML configuration window enter the following, substituting `userX` with your assigned user and then click **Create** once more.
|
||||
|
||||
```yaml
|
||||
db:
|
||||
password: userX
|
||||
hostname: userX-gitea.apps.cluster-xxz98.xxz98.sandbox619.opentlc.com
|
||||
tlsRoute: true
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Gitea application deployment via helm chart* |
|
||||
</Zoom>
|
||||
|
||||
## 4.2 - Examine deployed application
|
||||
|
||||
Returning to the **Topology** view of the **Developer** perspective you will now see the Gitea application being deployed in your `userX` project (this can take a few minutes to complete). Notice how the application is made up of two separate pods, the `gitea-db` database and the `gitea` frontend web server.
|
||||
|
||||
Once your gitea pods are both running open the **Route** for the `gitea` web frontend and confirm you can see the application web interface.
|
||||
|
||||
Next, if we click on the overall gitea **Helm release** by clicking on the shaded box surrounding our two Gitea pods we can see the full list of resources deployed by this helm chart, which in addition to the two running pods includes the following:
|
||||
|
||||
- 1 **ConfigMap**
|
||||
- 1 **ImageStream**
|
||||
- 2 **PersistentVolumeClaims**
|
||||
- 1 **Route**
|
||||
- 1 **Secret**
|
||||
- 2 **Services**
|
||||
|
||||
> Note: Feel free to try out a `oc explain <resource>` command in your web terminal to learn more about each of the resource types mentioned above, for example `oc explain service`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Gitea helm release resources created* |
|
||||
</Zoom>
|
||||
|
||||
## 4.3 - Upgrade helm chart
|
||||
|
||||
If we want to make a change to the configuration of our Gitea application we can perform a `helm upgrade`. OpenShift has built in support to perform helm upgrades through the web console.
|
||||
|
||||
Start in the **Helm** view of the **Developer** perspective.
|
||||
|
||||
In the **Helm Releases** tab you should see one release called `gitea`.
|
||||
|
||||
Click the three dot menu to the right hand side of the that helm release and click **Upgrade**.
|
||||
|
||||
Now let's intentionally modify the `hostname:` field in the yaml configuration to `hostname: bogushostname.example.com` and click **Upgrade**.
|
||||
|
||||
We will be returned to the **Helm releases** view. Notice how the release status is now Failed (due to our bogus configuration), however the previous release of the application is still running. OpenShift has validated the helm release, determined the updates will not work, and prevented the release from proceeding.
|
||||
|
||||
From here it is trivial to perform a **Rollback** to remove our misconfigured update. We'll do that in the next step.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Attempting a gitea helm upgrade* |
|
||||
</Zoom>
|
||||
|
||||
## 4.4 - Rollback to a previous helm release
|
||||
|
||||
Our previous helm upgrade for the Gitea application didn't succeed due to the misconfiguration we supplied. **Helm** has features for rolling back to a previous release through the `helm rollback` command line interface. OpenShift has made this even easier by adding native support for interactive rollbacks in the OpenShift web console so let's give that a go now.
|
||||
|
||||
Start in the **Helm** view of the **Developer** perspective.
|
||||
|
||||
In the **Helm Releases** tab you should see one release called `gitea`.
|
||||
|
||||
Click the three dot menu to the right hand side of the that helm release and click **Rollback**.
|
||||
|
||||
Select the radio button for revision `1` which should be showing a status of `Deployed`, then click **Rollback**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Rolling back to a previous gitea helm release* |
|
||||
</Zoom>
|
||||
|
||||
## 4.5 - Deleting an application deployed via helm
|
||||
|
||||
Along with upgrades and rollbacks **Helm** also makes deleting deployed applications (along with all of their associated resources) straightforward.
|
||||
|
||||
Before we move on to exercise 5 let's delete the gitea application.
|
||||
|
||||
Start in the **Helm** view of the **Developer** perspective.
|
||||
|
||||
In the **Helm Releases** tab you should see one release called `gitea`.
|
||||
|
||||
Click the three dot menu to the right hand side of the that helm release and click **Delete Helm Release**.
|
||||
|
||||
Enter the `gitea` confirmation at the prompt and click **Delete**. If you now return to the **Topology** view you will see the gitea application deleting.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deleting the gitea application helm release* |
|
||||
</Zoom>
|
||||
|
||||
## 4.6 - Bonus objective: Artifact Hub
|
||||
|
||||
If you have time, take a while to explore https://artifacthub.io/packages/search to see the kinds of applications available in the most popular publicly available Helm Chart repository Artifact Hub.
|
||||
|
||||
You've finished exercise 4, time to deploy an application with an Operator! 🎉
|
||||
136
data/app-delivery/exercise5.mdx
Normal file
@ -0,0 +1,136 @@
|
||||
---
|
||||
title: Deploying an application via operator
|
||||
exercise: 5
|
||||
date: '2023-12-06'
|
||||
tags: ['openshift','containers','kubernetes','operator-framework']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Exploring alternative deployment approaches."
|
||||
---
|
||||
|
||||
Another alternative approach for deploying and managing the lifecycle of more complex applications is via the [Operator Framework](https://operatorframework.io).
|
||||
|
||||
The goal of an **Operator** is to put operational knowledge into software. Previously this knowledge only resided in the minds of administrators, various combinations of shell scripts or automation software like Ansible. It was outside of your Kubernetes cluster and hard to integrate. **Operators** change that.
|
||||
|
||||
**Operators** are the missing piece of the puzzle in Kubernetes to implement and automate common Day-1 (installation, configuration, etc.) and Day-2 (re-configuration, update, backup, failover, restore, etc.) activities in a piece of software running inside your Kubernetes cluster, by integrating natively with Kubernetes concepts and APIs.
|
||||
|
||||
With Operators you can stop treating an application as a collection␃of primitives like **Pods**, **Deployments**, **Services** or **ConfigMaps**, but instead as a singular, simplified custom object that only exposes the specific configuration values that make sense for the specific application.
|
||||
|
||||
## 5.1 - Deploying an operator
|
||||
|
||||
Deploying an application via an **Operator** is generally a two step process. The first step is to deploy the **Operator** itself.
|
||||
|
||||
Once the **Operator** is installed we can deploy the application.
|
||||
|
||||
For this exercise we will install the **Operator** for the [Grafana](https://grafana.com) observability platform.
|
||||
|
||||
Let's start in the **Topology** view of the **Developer** perspective.
|
||||
|
||||
Copy the following YAML snippet to your clipboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: grafana-operator
|
||||
namespace: userX
|
||||
spec:
|
||||
channel: v5
|
||||
installPlanApproval: Automatic
|
||||
name: grafana-operator
|
||||
source: community-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
```
|
||||
|
||||
Click the **+** button in the top right corner menu bar of the OpenShift web console. This is a fast way to quickly import snippets of YAML for testing or exploration purposes.
|
||||
|
||||
Paste the above snippet of YAML into the editor and replace the instance of `userX` with your assigned user.
|
||||
|
||||
Click **Create**. In a minute or so you should see the Grafana operator installed and running in your project.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying grafana operator via static yaml* |
|
||||
</Zoom>
|
||||
|
||||
## 5.2 - Deploying an operator driven application
|
||||
|
||||
With our Grafana operator now running it will be listening for the creation of a `grafana` custom resource. When one is detected the operator will deploy the Grafana application according to the specifcation we supplied.
|
||||
|
||||
Let's switch over to the **Administrator** perspective for this next task to deploy our Grafana instance.
|
||||
|
||||
Under the **Operators** category in the left hand menu click on **Installed Operators**.
|
||||
|
||||
In the **Installed Operators** list you should see a **Grafana Operator** entry, click into that.
|
||||
|
||||
On the **Operator details** screen you will see a list of "Provided APIs". These are custom resource types that we can now deploy with the help of the operator.
|
||||
|
||||
Click on **Create instance** under the provided API titled `Grafana`.
|
||||
|
||||
On the next **Create Grafana** screen click on **YAML View** radio button and enter the following, replacing the two instances of `userX` with your assigned user then click **Create**.
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: Grafana
|
||||
metadata:
|
||||
labels:
|
||||
dashboards: grafana
|
||||
folders: grafana
|
||||
name: grafana
|
||||
namespace: userX
|
||||
spec:
|
||||
config:
|
||||
auth:
|
||||
disable_login_form: 'false'
|
||||
log:
|
||||
mode: console
|
||||
security:
|
||||
admin_password: example
|
||||
admin_user: example
|
||||
route:
|
||||
spec:
|
||||
tls:
|
||||
termination: edge
|
||||
host: grafana-userX.apps.cluster-xxz98.xxz98.sandbox619.opentlc.com
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying grafana application via the grafana operator* |
|
||||
</Zoom>
|
||||
|
||||
## 5.3 Logging into the application
|
||||
|
||||
While we are in the **Administrator** perspective of the web console let's take a look at a couple of sections to confirm our newly deployed Grafana application is running as expected.
|
||||
|
||||
For our first step click on the **Workloads** category on the left hand side menu and then click **Pods**.
|
||||
|
||||
We should see that a `grafana-deployment-<id>` pod with a **Status** of `Running`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Confirming the grafana pod is running* |
|
||||
</Zoom>
|
||||
|
||||
Now that we know the Grafana application **Pod** is running let's open the application and confirm we can log in.
|
||||
|
||||
Click the **Networking** category on the left hand side menu and then click **Routes**.
|
||||
|
||||
Click the **Route** named `grafana-route` and open the url on the right hand side under the **Location** header.
|
||||
|
||||
Once the new tab opens we should be able to login to Grafana using the credentials we supplied in the previous step in the YAML configuration.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Confirming the grafana route is working* |
|
||||
</Zoom>
|
||||
|
||||
## 5.4 - Bonus objective: Grafana dashboards
|
||||
|
||||
If you have time, take a while to learn about the https://grafana.com/grafana/dashboards and how Grafana can be used to visualise just about anything.
|
||||
|
||||
Well done, you've finished exercise 5! 🎉
|
||||
94
data/app-delivery/exercise6.mdx
Normal file
@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Deploying an application from source
|
||||
exercise: 6
|
||||
date: '2023-12-07'
|
||||
tags: ['openshift','containers','kubernetes','s2i','shipwright']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Exploring alternative deployment approaches."
|
||||
---
|
||||
|
||||
Often as a team supporting applications on OpenShift the decision of which deployment method to use will be out of your hands instead be determined by the vendor, organisation or team producing the application in question.
|
||||
|
||||
However, for an interesting scenario let's explore the possibility of what we could do if there is no existing deployment tooling in place and all we are given is a codebase in a git repository.
|
||||
|
||||
This is where the concept of **Source to Image** or "s2i" comes in. OpenShift has built in support for building container images using source code from an existing repository. This is accomplished using the [source-to-image](https://github.com/openshift/source-to-image) project.
|
||||
|
||||
OpenShift runs the S2I process inside a special **Pod**, called a **Build Pod**, and thus builds are subject to quotas, limits, resource scheduling, and other aspects of OpenShift. A full discussion of S2I is beyond the scope of this class, but you can find more information about it in the [OpenShift S2I documentation](https://docs.openshift.com/container-platform/4.16/openshift_images/create-images.html).
|
||||
|
||||
## 6.1 - Starting a source to image build
|
||||
|
||||
Deploying an application via an **Source to Image** is straightforward. Let's try it out.
|
||||
|
||||
Start in the **+Add** view of the **Developer** perspective.
|
||||
|
||||
Click **Import from Git** under the **Git Repository** tile.
|
||||
|
||||
**Source to Image** supports a number of popular programming languages as the source. For this example we will use **Python**.
|
||||
|
||||
Enter `https://github.com/openshift-roadshow/nationalparks-py.git` for the **Git Repo URL**.
|
||||
|
||||
OpenShift will automatically guess the git server type and the programming language used by the source code. You will be now asked to select an **Import Strategy**. You have three options:
|
||||
|
||||
- Devfile: this will use Devfile v2 spec to create an application stack. The repo has to contain a file named `devfile.yaml` in the Devfile v2 format.
|
||||
|
||||
- Dockerfile: this will create a Container image from an existing Dockerfile.
|
||||
|
||||
- Builder Image: this will use a mechanism called Source-to-Image to create automatically a container image directly from the source code.
|
||||
|
||||
Select **Builder Image** strategy as we are going to create the container image directly from the source code.
|
||||
|
||||
Select **Python** as the **Builder Image** type and **Python 3.8-ubi8** as the **Builder Image Version**.
|
||||
|
||||
Scroll down and under the **General** header click the **Application** drop down and select **Create application** entering **workshop** as the name.
|
||||
|
||||
Scroll down reviewing the other options then click **Create**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Creating a source to image build in OpenShift* |
|
||||
</Zoom>
|
||||
|
||||
## 6.2 - Monitoring the build
|
||||
|
||||
To see the build logs, in **Topology** view of the **Developer** perspective, click the nationalparks python icon, then click on **View Logs** in the **Builds** section of the **Resources** tab.
|
||||
|
||||
Based on the application’s language, the build process will be different. However, the initial build will take a few minutes as the dependencies are downloaded. You can see all of this happening in real time!
|
||||
|
||||
From the `oc` command line utility, you can also see **Builds**, let's open our **Web Terminal** back up and take a look:
|
||||
|
||||
```bash
|
||||
oc get builds
|
||||
```
|
||||
|
||||
You will see output similar to the example below:
|
||||
|
||||
```bash
|
||||
NAME TYPE FROM STATUS STARTED DURATION
|
||||
nationalparks-py-git-1 Source Git@f87895b Complete 7 minutes ago 48s
|
||||
```
|
||||
|
||||
Let's also take a look at the logs from the `oc` command line with:
|
||||
|
||||
```bash
|
||||
oc logs -f builds/nationalparks-py-git-1
|
||||
```
|
||||
|
||||
After the build has completed and successfully:
|
||||
|
||||
- The S2I process will push the resulting image to the internal OpenShift image registry.
|
||||
|
||||
- The Deployment (D) will detect that the image has changed, and this will cause a new deployment to happen.
|
||||
|
||||
- A ReplicaSet (RS) will be spawned for this new deployment.
|
||||
|
||||
- The ReplicaSet will detect no Pods are running and will cause one to be deployed, as our default replica count is just 1.
|
||||
|
||||
To conclude, when issuing the `oc get pods` command, you will see that the build **Pod** has finished (exited) and that an application **Pod** is in a ready and running state.
|
||||
|
||||
## 6.3 - Bonus objective: Podman
|
||||
|
||||
If you have time, take a while to understand how [Podman](https://developers.redhat.com/articles/2022/05/02/podman-basics-resources-beginners-and-experts) can be used to build container images on your device outside of an OpenShift cluster.
|
||||
|
||||
Awesome you've finished exercise 6! 🎉
|
||||
90
data/app-delivery/exercise7.mdx
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
title: Optional - Deploying an application via gitops
|
||||
exercise: 7
|
||||
date: '2024-07-25'
|
||||
tags: ['openshift','containers','kubernetes','argocd','gitops']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Keen to explore a more advanced deployment pattern?"
|
||||
---
|
||||
|
||||
|
||||
Now that you've had a taste of most of the more basic deployment methods let's introduce the concept of [GitOps](https://www.redhat.com/en/topics/devops/what-is-gitops) and deploy an application using this more advanced approach.
|
||||
|
||||
In simple terms GitOps uses Git repositories as a single source of truth to deliver applications or infrastructure as code. Whenever you merge or push code into a specifc Git branch in a repository, an GitOps continuous delivery tool such as [ArgoCD](https://argo-cd.readthedocs.io/en/stable) can then automatically sync that to one or more Kubernetes clusters.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ArgoCD user interface* |
|
||||
</Zoom>
|
||||
|
||||
For many organisations GitOps is a goal deployment methodology as application definitions, configurations, and environments should ideally be declarative and version controlled. Application deployment and lifecycle management should be automated, auditable, and easy to understand.
|
||||
|
||||
Since 2021 OpenShift has included a fully supported [OpenShift GitOps](https://www.redhat.com/en/blog/announcing-openshift-gitops) operator, based on the upstream ArgoCD project.
|
||||
|
||||
This operator has already been installed on your cluster so let's take it for a spin now! 🚀
|
||||
|
||||
## 7.1 - Deploy openshift gitops
|
||||
|
||||
To get started with OpenShift GitOps we will need an instance of ArgoCD deployed.
|
||||
|
||||
Click the **+** button in the top right corner menu bar of the OpenShift web console. This is a fast way to quickly import snippets of YAML for testing or exploration purposes.
|
||||
|
||||
Paste the below snippet of YAML into the editor and replace the instance of `userX` with your assigned user.
|
||||
|
||||
Click **Create**. In a minute or so you should see the ArgoCD instance running successfully in your project.
|
||||
|
||||
```bash
|
||||
apiVersion: argoproj.io/v1beta1
|
||||
kind: ArgoCD
|
||||
metadata:
|
||||
finalizers:
|
||||
- argoproj.io/finalizer
|
||||
name: argocd
|
||||
namespace: userX
|
||||
spec:
|
||||
rbac:
|
||||
defaultPolicy: role:admin
|
||||
scopes: '[groups]'
|
||||
server:
|
||||
route:
|
||||
enabled: true
|
||||
sso:
|
||||
dex:
|
||||
openShiftOAuth: true
|
||||
provider: dex
|
||||
```
|
||||
|
||||
## 7.2 Login to argocd
|
||||
|
||||
With ArgoCD running let's open the route in a new tab in our browser and click **Log in with OpenShift**. You can retrieve the ~Route~ by running the following command in your web terminal:
|
||||
|
||||
```bash
|
||||
oc get route argocd-server
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ArgoCD login* |
|
||||
</Zoom>
|
||||
|
||||
## 7.3 Deploy an application
|
||||
|
||||
Now that you're logged into ArgoCD, have a go at creating a new `Application` using the ArgoCD web interface by clicking **+ New App**. The workload we'll deploy is a new mission critical training simulator called "Quake 3 Arena".
|
||||
|
||||
Use the following values for your Application:
|
||||
|
||||
|Field|Value|
|
||||
|-----|-----|
|
||||
|Name |`quake`|
|
||||
|Project|`default`|
|
||||
|Repository URL|`https://github.com/jmhbnz/workshops`|
|
||||
|Path|`data/app-delivery`|
|
||||
|Cluster URL| `https://kubernetes.default.svc`|
|
||||
|Namespace|`userX`|
|
||||
|
||||
## 7.4 Access the mission critical simulator - challenge
|
||||
|
||||
Your final challenge for this exercise is to access the mission critical training simulator by creating a `Route`.
|
||||
117
data/app-delivery/quake3.yaml
Normal file
@ -0,0 +1,117 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: quake
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
run: quake
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: quake
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '8080'
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- q3
|
||||
- server
|
||||
- --config=/config/config.yaml
|
||||
- --content-server=http://127.0.0.1:9090
|
||||
- --agree-eula
|
||||
image: docker.io/criticalstack/quake:latest
|
||||
name: server
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
volumeMounts:
|
||||
- name: quake3-server-config
|
||||
mountPath: /config
|
||||
- name: quake3-content
|
||||
mountPath: /assets
|
||||
- command:
|
||||
- q3
|
||||
- content
|
||||
- --seed-content-url=http://content.quakejs.com
|
||||
image: docker.io/criticalstack/quake:latest
|
||||
name: content-server
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
volumeMounts:
|
||||
- name: quake3-content
|
||||
mountPath: /assets
|
||||
volumes:
|
||||
- name: quake3-server-config
|
||||
configMap:
|
||||
name: quake3-server-config
|
||||
- name: quake3-content
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: quake
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
run: quake
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: client
|
||||
- port: 27960
|
||||
targetPort: 27960
|
||||
name: server
|
||||
- port: 9090
|
||||
targetPort: 9090
|
||||
name: content
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: quake3-server-config
|
||||
data:
|
||||
config.yaml: |
|
||||
fragLimit: 25
|
||||
timeLimit: 15m
|
||||
bot:
|
||||
minPlayers: 3
|
||||
game:
|
||||
motd: "Welcome to Critical Stack"
|
||||
type: FreeForAll
|
||||
forceRespawn: false
|
||||
inactivity: 10m
|
||||
quadFactor: 3
|
||||
weaponRespawn: 3
|
||||
server:
|
||||
hostname: "quakekube"
|
||||
maxClients: 12
|
||||
password: "changeme"
|
||||
commands:
|
||||
- addbot sarge 2
|
||||
maps:
|
||||
- name: q3dm7
|
||||
type: FreeForAll
|
||||
timeLimit: 10m
|
||||
- name: q3dm17
|
||||
type: FreeForAll
|
||||
- name: q3wctf1
|
||||
type: CaptureTheFlag
|
||||
captureLimit: 8
|
||||
- name: q3tourney2
|
||||
type: Tournament
|
||||
- name: q3wctf3
|
||||
type: CaptureTheFlag
|
||||
captureLimit: 8
|
||||
- name: ztn3tourney1
|
||||
type: Tournament
|
||||
@ -1,8 +1,8 @@
|
||||
name: Red Hat
|
||||
name: James Blair
|
||||
avatar: /static/images/redhat.png
|
||||
occupation: TSSC Workshop
|
||||
company: Open Source
|
||||
email: redhat@redhat.com
|
||||
occupation: Specialist Architect
|
||||
company: Red Hat
|
||||
email: jablair@redhat.com
|
||||
twitter: https://twitter.com/RedHat
|
||||
github: https://github.com/RedHat
|
||||
linkedin: https://www.linkedin.com/in/RedHat
|
||||
github: https://github.com/jmhbnz
|
||||
linkedin: https://www.linkedin.com/in/RedHat
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
---
|
||||
name: Sparrow Hawk
|
||||
avatar: /static/images/sparrowhawk-avatar.jpg
|
||||
occupation: Wizard of Earthsea
|
||||
company: Earthsea
|
||||
twitter: https://twitter.com/sparrowhawk
|
||||
linkedin: https://www.linkedin.com/sparrowhawk
|
||||
---
|
||||
|
||||
At birth Ged was given the child-name Duny by his mother. He was born on the island of Gont, son of a bronzesmith. His mother died before he reached the age of one. As a small boy, Ged had overheard the village witch, his maternal aunt, using various words of power to call goats. Ged later used the words without understanding of their meanings, to surprising effect.
|
||||
|
||||
The witch knew that using words of power effectively without understanding them required innate power, so she endeavored to teach him what little she knew. After learning more from her, he was able to call animals to him. Particularly, he was seen in the company of wild sparrowhawks so often that his "use name" became Sparrowhawk.
|
||||
0
data/blog/.gitignore
vendored
168
data/compliance/README.org
Normal file
@ -0,0 +1,168 @@
|
||||
#+TITLE: Openshift disconnected security & compliance workshop
|
||||
#+DATE: <2024-08-26 Mon>
|
||||
#+AUTHOR: James Blair
|
||||
|
||||
|
||||
This document captures the steps required to set up an instance of the workshop.
|
||||
|
||||
* Connect to the low side instance
|
||||
|
||||
#+begin_src tmux
|
||||
ssh lab-user@3.143.149.146
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install required tools low side
|
||||
|
||||
#+begin_src tmux
|
||||
cd /mnt/low-side-data/
|
||||
curl -L -o oc-mirror.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/oc-mirror.tar.gz
|
||||
tar -xzf oc-mirror.tar.gz
|
||||
rm -f oc-mirror.tar.gz
|
||||
chmod +x oc-mirror
|
||||
sudo cp -v oc-mirror /bin
|
||||
curl -L -o mirror-registry.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/mirror-registry/latest/mirror-registry.tar.gz
|
||||
curl -L -o openshift-install.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/openshift-install-linux.tar.gz
|
||||
tar -xzf openshift-install.tar.gz openshift-install
|
||||
rm -f openshift-install.tar.gz
|
||||
curl -L -o oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.19/openshift-client-linux.tar.gz
|
||||
tar -xzf oc.tar.gz oc
|
||||
rm -f oc.tar.gz
|
||||
sudo cp -v oc /bin
|
||||
ls -1 /mnt/low-side-data/
|
||||
#+end_src
|
||||
|
||||
|
||||
* Mirror installation content low side
|
||||
|
||||
#+begin_src tmux
|
||||
mkdir -v $HOME/.docker
|
||||
cp -v $HOME/pull-secret-example.json $HOME/.docker/config.json
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
---
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
EOF
|
||||
cd /mnt/low-side-data
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install mirror registry high side
|
||||
|
||||
#+begin_src tmux
|
||||
rsync -avP /mnt/low-side-data/mirror-registry.tar.gz highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
cd /mnt/high-side-data
|
||||
tar -xzvf mirror-registry.tar.gz
|
||||
./mirror-registry install --initPassword discopass
|
||||
#+end_src
|
||||
|
||||
|
||||
* Trust mirror registry high side
|
||||
|
||||
#+begin_src tmux
|
||||
sudo cp -v $HOME/quay-install/quay-rootCA/rootCA.pem /etc/pki/ca-trust/source/anchors/
|
||||
sudo update-ca-trust
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Transfer mirror content from low to high
|
||||
|
||||
#+begin_src tmux
|
||||
exit
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
sudo mv -v /mnt/high-side-data/oc /bin/
|
||||
sudo mv -v /mnt/high-side-data/oc-mirror /bin/
|
||||
sudo mv -v /mnt/high-side-data/openshift-install /bin/
|
||||
cd /mnt/high-side-data
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq1_000000.tar docker://$(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install openshift high side
|
||||
|
||||
#+begin_src tmux
|
||||
cat << EOF > /mnt/high-side-data/install-config.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: disco
|
||||
baseDomain: lab
|
||||
compute:
|
||||
- architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: worker
|
||||
replicas: 0
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
replicas: 1
|
||||
platform:
|
||||
aws:
|
||||
type: m5.8xlarge
|
||||
networking:
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.0.0.0/16
|
||||
networkType: OVNKubernetes
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
platform:
|
||||
aws:
|
||||
region: us-east-2
|
||||
subnets:
|
||||
- $(aws ec2 describe-subnets --output json | jq '.Subnets[0].SubnetId' -r)
|
||||
publish: Internal
|
||||
additionalTrustBundlePolicy: Always
|
||||
EOF
|
||||
if ! test -f "/mnt/high-side-data/id_rsa"; then
|
||||
ssh-keygen -C "OpenShift Debug" -N "" -f /mnt/high-side-data/id_rsa
|
||||
fi
|
||||
echo "sshKey: $(cat /mnt/high-side-data/id_rsa.pub)" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
echo "pullSecret: '$(jq -c . $XDG_RUNTIME_DIR/containers/auth.json)'" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
if (test -e /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
then
|
||||
echo -e "\n\n Looks good, go ahead! \n\n"
|
||||
else
|
||||
echo -e "\n\n Uh oh, something is wrong... \n\n"
|
||||
fi
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
imageContentSources:
|
||||
$(grep "mirrors:" -A 2 --no-group-separator /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
EOF
|
||||
tail -22 /mnt/high-side-data/install-config.yaml
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
additionalTrustBundle: |
|
||||
$(sed 's/^/ /' /home/lab-user/quay-install/quay-rootCA/rootCA.pem)
|
||||
EOF
|
||||
cat /mnt/high-side-data/install-config.yaml
|
||||
cp -v /mnt/high-side-data/install-config.yaml /mnt/high-side-data/install-config.yaml.backup
|
||||
openshift-install create cluster --dir /mnt/high-side-data
|
||||
#+end_src
|
||||
|
||||
|
||||
* Disable default catalog sources high side
|
||||
|
||||
#+begin_src tmux
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
oc patch OperatorHub cluster --type merge -p '{"spec": {"disableAllDefaultSources": true}}'
|
||||
oc create -f /mnt/high-side-data/oc-mirror-workspace/results-*/catalogSource-cs-redhat-operator-index.yaml
|
||||
#+end_src
|
||||
40
data/compliance/exercise1.mdx
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
title: Understanding our lab environment
|
||||
exercise: 1
|
||||
date: '2024-08-22'
|
||||
tags: ['ssh','novnc','workshop','setup']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get familiar with our lab setup."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift 4 Disconnected security & compliance workshop! Here you'll learn about operating a secure and compliant OpenShift 4 cluster in a disconnected network using the following key OpenShift features:
|
||||
|
||||
- [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes)
|
||||
- [Red Hat OpenShift Compliance Operator](https://www.redhat.com/en/blog/a-guide-to-openshift-compliance-operator-best-practices)
|
||||
|
||||
To level set, [Red Hat OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. OpenShift supports running in disconnected networks, though this does change the way the cluster operates because key ingredients like container images, operator bundles, and helm charts must be brought into the environment from the outside world via mirroring.
|
||||
|
||||
There are of course many different options for installing OpenShift in a restricted network; this workshop will not cover the deployment of a cluster, instead you will have an existing installed cluster allocated to you which has been created in advance. Your tasks during this workshop will be to improve the security and compliance of the cluster and workloads running on it.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - Reserve a lab environment
|
||||
|
||||
An OpenShift `4.14` cluster has already been provisioned for you to complete these excercises. To reserve an environment go to [this Google Sheets spreadsheet](https://docs.google.com/spreadsheets/d/1tddgRA6suefTaITyRx87IoRCfCJ7El9Hdr6HB8K7Mvo/edit?usp=sharing). Update your name next to an `Available` environment and change the status to `Allocated`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment worksheet* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Login via ssh and vnc
|
||||
|
||||
To complete the lab exercises you'll use a mix of an `ssh` terminal session for running OpenShift client `oc` commands, and then a browser based vnc session in order to access the OpenShift cluster web console.
|
||||
|
||||
Links to a browser based terminal and vnc session are available in the spreadsheet, along with any credentials required. You are welcome to use your own terminal or vnc software if you prefer.
|
||||
|
||||
Once you have both a terminal and vnc session working you're ready to get underway with the workshop, please move on to exercise 2 🚀
|
||||
228
data/compliance/exercise2.mdx
Normal file
@ -0,0 +1,228 @@
|
||||
---
|
||||
title: Mirror required content
|
||||
exercise: 2
|
||||
date: '2024-08-23'
|
||||
tags: ['oc-mirror','mirror-registry','openshift','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "You want features? Mirror them in!🪞"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation for a private cluster on AWS using the [IPI install method](https://docs.openshift.com/container-platform/4.14/installing/installing_aws/installing-aws-private.html#installing-aws-private), and does not have any post installation features added.
|
||||
|
||||
During this workshop we want to secure the cluster with Red Hat Advanced Cluster Security, understand our compliance posture against [NIST 800-53](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final) with the OpenShift Compliance Operator and then explore some bonus activities like deploying Red Hat Developer Hub.
|
||||
|
||||
To install and configure these features we first need to mirror some additional content into our disconnected environment, let's get started.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment summary* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.1 - Open a terminal on your low side
|
||||
|
||||
Our first step to prepare to mirror content is to get connected to our low side jump host via `ssh`. You can use the web terminal link in your browser or alternatively your own local terminal with the command below (replacing the placeholder ip with the one you have been allocated).
|
||||
|
||||
```bash
|
||||
ssh lab-user@<ip address>
|
||||
```
|
||||
|
||||
You'll be prompted to enter a password which you can find in your allocated environment details.
|
||||
|
||||
After connecting change directory to the low side workspace where the intial cluster installation was already completed for you and review the folder contents:
|
||||
|
||||
```bash
|
||||
cd /mnt/low-side-data
|
||||
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Your workspace will look similar to the one below:
|
||||
|
||||
```bash
|
||||
[lab-user@jump low-side-data]$ ls -lah
|
||||
total 21G
|
||||
drwxr-xr-x. 4 lab-user lab-user 4.0K Sep 2 12:46 .
|
||||
drwxr-xr-x. 3 root root 27 Aug 31 22:00 ..
|
||||
-rw-r--r--. 1 lab-user lab-user 305 Sep 2 12:38 imageset-config.yaml
|
||||
-rw-r--r--. 1 lab-user lab-user 696M Sep 2 12:37 mirror-registry.tar.gz
|
||||
-rw-r--r--. 1 lab-user lab-user 20G Sep 2 12:46 mirror_seq1_000000.tar
|
||||
-rwxr-xr-x. 1 lab-user lab-user 146M Mar 26 22:17 oc
|
||||
-rwxr-x--x. 1 lab-user lab-user 144M Aug 7 06:30 oc-mirror
|
||||
-rw-------. 1 lab-user lab-user 160K Sep 2 12:41 .oc-mirror.log
|
||||
drwxr-xr-x. 3 lab-user lab-user 17 Sep 2 12:38 oc-mirror-workspace
|
||||
-rwxr-xr-x. 1 lab-user lab-user 631M Aug 7 07:40 openshift-install
|
||||
drwxr-x---. 2 lab-user lab-user 28 Sep 2 12:46 publish
|
||||
```
|
||||
|
||||
|
||||
## 2.2 - Get familiar with oc-mirror
|
||||
|
||||
To mirror content into our disconnected environment we will be using the [`oc-mirror`](https://github.com/openshift/oc-mirror) openshift client utility.
|
||||
|
||||
To configure what content `oc-mirror` will download and mirror for us we use a YAML formatted file called an `ImageSetConfiguration`. This file declares:
|
||||
|
||||
1. **What to download** which can include (OpenShift itself, operator bundles, helm charts, or specific container images)
|
||||
2. **What versions of each item to download**
|
||||
3. **Where to store the downloaded content**
|
||||
|
||||
The `oc-mirror` utility also has some features for listing available content for mirroring, let's try that now! Run the following commands in your ssh terminal:
|
||||
|
||||
```bash
|
||||
# List available openshift release versions
|
||||
oc-mirror list releases
|
||||
|
||||
# List operator catalogs for a specific openshift release
|
||||
oc-mirror list operators --catalogs --version=4.14
|
||||
|
||||
# List all operators in a specific catalogs
|
||||
oc-mirror list operators --catalog registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
```
|
||||
|
||||
Using the built in help have a go at using `oc-mirror` to identify details of a specific operator.
|
||||
|
||||
We can also use the `oc-mirror` utility to understand the state of any existing mirror content bundles. We have a content bundle called `mirror_seq1_000000.tar` available from the initial installation of your OpenShift cluster, let's inspect that now.
|
||||
|
||||
```bash
|
||||
oc-mirror describe mirror_seq1_000000.tar | more
|
||||
```
|
||||
|
||||
This bundle archive was created by the `oc-mirror` utility using the configuration file called `imageset-config.yaml` which is also in the same directory. Let's review that file:
|
||||
|
||||
```bash
|
||||
cat imageset-config.yaml
|
||||
```
|
||||
|
||||
Your file should look something like the example below, we can see the the `4.14.35` version of OpenShift is specified to be downloaded, along with the `registry.redhat.io/rhel8/support-tools` additional standalone container image.
|
||||
|
||||
```yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
```
|
||||
|
||||
|
||||
## 2.3 - Confirm local cache is up to date
|
||||
|
||||
A local cache of content already exists from when the cluster installation was initially performed in advance of this workshop. Let's confirm everything is still up to date by re-running the `oc-mirror` command specifying our configuration file and the location on our disk.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take several minutes to complete but should complete with `No new images detected, process stopping` to confirm the existing cache is up to date.
|
||||
|
||||
|
||||
## 2.4 - Add new mirror content
|
||||
|
||||
For our workshop exercises today we need to mirror some additional operators, namely the **OpenShift Compliance Operator**, **Red Hat Advanced Cluster Security**, and **Red Hat Developer Hub**. Run the command below to update your `imageset-config.yaml` file to match the example below
|
||||
|
||||
```bash
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
operators:
|
||||
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
packages:
|
||||
- name: rhdh
|
||||
channels:
|
||||
- name: fast
|
||||
minVersion: '1.1.1'
|
||||
maxVersion: '1.1.1'
|
||||
- name: compliance-operator
|
||||
channels:
|
||||
- name: stable
|
||||
- name: rhacs-operator
|
||||
channels:
|
||||
- name: stable
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
helm: {}
|
||||
EOF
|
||||
```
|
||||
|
||||
After updating the configuration file we can re-run our `oc-mirror` command to bring the new content into our local collection on disk in `/mnt/low-side-data`.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take up to 10 minutes to complete depending on connection speeds.
|
||||
|
||||
|
||||
## 2.5 - Mirror updated content to high side registry
|
||||
|
||||
Once the local mirror update has completed we now need to transfer this content to our high side and mirror it from disk into the OpenShift Mirror Registry running in our disconnected high side.
|
||||
|
||||
In this workshop we will use `rsync` to copy our content to our high side system, let's do that now:
|
||||
|
||||
```bash
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
```
|
||||
|
||||
> Note: `oc-mirror` creates incremental mirror content files in order to prevent duplicating content. You will notice your low side mirror workspace includes a new file `mirror_seq2_000000.tar` which is significantly smaller than the original mirror archive.
|
||||
|
||||
Once the transfer has completed we need to log into our high side disconnected system and run `oc-mirror` from that side to upload the content from the new archive into our disconnected container registry
|
||||
|
||||
```bash
|
||||
ssh highside
|
||||
```
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq2_000000.tar docker://$(hostname):8443
|
||||
```
|
||||
|
||||
## 2.6 - Verify new operators are available
|
||||
|
||||
After a couple of minutes the mirror process will complete. We then need to tell OpenShift about the new content that is available by running the commands below.
|
||||
|
||||
```bash
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
for file in $(find ./oc-mirror-workspace -type f -name '*.yaml'); do oc apply -f $file; done
|
||||
```
|
||||
|
||||
> Note: In our `oc-mirror-workspace` directory each time we mirror new content a new `results-<id>` directory will be created which may contain `imageContentSourcePolicy.yaml` or `catalogSource-cs-<index>.yaml` files which we need to apply to our cluster to tell it about the new content that is available.
|
||||
|
||||
Once the updates are applied we can then check that our new operators are available in the OpenShift Web Console using our browser based vnc session:
|
||||
|
||||
1. Open your vnc browser tab
|
||||
2. Use the left menu panel, click **Settings** and then select **Remote Resizing** as the scaling mode to improve viewing experience.
|
||||
3. Click **Connect** and when prompted enter the password in your environment spreadsheet row, then click **Send credentials**.
|
||||
4. A Firefox browser window should already be open, you can manually start if using the top left applications menu if needed.
|
||||
5. Click the bookmark toolbar option for **DISCO - OpenShift**.
|
||||
6. Log in when prompted with the username **kubeadmin** and the kubeadmin password listed in your environment spreadsheet (you can also find this password in your highside bastion ssh session by running `cat /mnt/high-side-data/auth/kubeadmin-password`). Note that to paste in the web based vnc session you need to use the left hand panel to pass the clipboard content through to the session.
|
||||
7. Navigate to **Operators** on the left menu, and then click **OperatorHub**, you should see the newly mirrored operators are now available in your disconnected cluster!
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Check disconnected operator hub* |
|
||||
</Zoom>
|
||||
|
||||
If your mirroring has completed successfully you are ready to move on to exercise 3 and install the three new operators 🎉
|
||||
150
data/compliance/exercise3.mdx
Normal file
@ -0,0 +1,150 @@
|
||||
---
|
||||
title: Install operators on a disconnected cluster
|
||||
exercise: 3
|
||||
date: '2024-08-27'
|
||||
tags: ['openshift','operators','operator-hub','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Operators?!? 🤔 - Think app store for Kubernetes 🌟"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation using the IPI install method, and does not have any post installation features added.
|
||||
|
||||
In a broad sense many OpenShift features are added via [Operators](https://www.redhat.com/en/technologies/cloud-computing/openshift/what-are-openshift-operators). Operators automate the creation, configuration, and management of instances of Kubernetes-native applications. Operators can provide automation at every level of the stack—from managing the parts that make up the platform all the way to applications that are provided as a managed service.
|
||||
|
||||
In the previous exercise we mirrored some new operator bundles into our disconnected network. In this exercise we'll install those operators and explore the features they provide us via [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources) they provide.
|
||||
|
||||
> Note: For some trivia, Red Hat created and open sourced the [Operator Framework](https://github.com/operator-framework), then later contributed the project to the Cloud Native Computing Foundation in 2021, ensuring all organisations can benefit from our experience building and supporting operator driven clusters since ~2016.
|
||||
>
|
||||
> 
|
||||
|
||||
|
||||
## 3.1 - Installing compliance operator
|
||||
|
||||
First up let's install the [Red Hat OpenShift Compliance Operator](https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-overview.html).
|
||||
|
||||
For some brief context the Compliance Operator assists platform teams by automating the inspection of numerous technical implementations and compares those against certain aspects of industry standards. For our purposes today that industry standard will be **NIST 800-53**.
|
||||
|
||||
The Compliance Operator assesses compliance of both the Kubernetes API resources of OpenShift Container Platform, as well as the nodes running the cluster. The Compliance Operator uses [OpenSCAP](https://www.open-scap.org), a NIST-certified tool, to scan and enforce security policies provided by the content.
|
||||
|
||||
To install the operator we can use either the OpenShift Web Console, or the terminal with `oc` cli. In this workshop we will install the operator with the Web Console using our vnc browser tab. Thanks to our previous exercise mirroring content and making it available via the cluster disconnected OperatorHub catalogs we can enjoy the same user experience to install the operator as if our cluster was fully connected.
|
||||
|
||||
1. Open your vnc browser tab and return to the OpenShift Web Console browser tab you opened in the previous exercise.
|
||||
2. Click on the **Compliance Operator** in **OperatorHub** to open the right hand panel, then click the blue **Install** button at the top of the panel.
|
||||
3. On the install details screen stick with all the default values and simply click **Install**
|
||||
4. After a short wait the Compliance Operator will be installed and ready for use 🎉
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Install OpenShift Compliance Operator* |
|
||||
</Zoom>
|
||||
|
||||
With the Compliance Operator installed feel free to explore which new Custom Resources the Operator makes available. We'll return to these in future exercises to begin using them.
|
||||
|
||||
|
||||
## 3.2 - Installing the rhacs operator
|
||||
|
||||
Next up we'll install the [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes) Operator.
|
||||
|
||||
Red Hat Advanced Cluster Security (RHACS) has direct integration with the Compliance Operator to provide a frontend user experience for running compliance scans along with viewing results.
|
||||
|
||||
To try the alternative operator installation method this time we will install the operator via the `oc` cli in our terminal.
|
||||
|
||||
Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: rhacs-operator
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhacs-operator.v4.5.1
|
||||
EOF
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Advanced Cluser Security for Kubernetes** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.3 - Installing the developer hub operator
|
||||
|
||||
The final operator we will install for this workshop relates to [Red Hat Developer Hub](https://developers.redhat.com/rhdh/overview).
|
||||
|
||||
Red Hat Developer Hub is an Internal Developer Portal (IDP) based on the upstream [Backstage](https://backstage.io) project initially created at Spotify. With Red Hat Developer Hub combined with Red Hat OpenShift we can enable platform engineering teams to offer software templates and pre-architected and supported approaches to make life easier for development teams, ease onboarding and reduce friction and frustration.
|
||||
|
||||
We'll also install the Red Hat Developer Hub using the `oc` cli in our terminal. Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
namespace: rhdh-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhdh
|
||||
namespace: rhdh-operator
|
||||
spec:
|
||||
channel: fast
|
||||
installPlanApproval: Automatic
|
||||
name: rhdh
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhdh-operator.v1.1.1
|
||||
EOF
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Red Hat Developer Hub** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
</Zoom>
|
||||
|
||||
If all three operators are now installed congratulations you are ready to move on to Exercise 4 🎉
|
||||
|
||||
191
data/compliance/exercise4.mdx
Normal file
@ -0,0 +1,191 @@
|
||||
---
|
||||
title: Deploy advanced cluster security
|
||||
exercise: 4
|
||||
date: '2024-08-31'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to up our security & compliance game! 🔒"
|
||||
---
|
||||
|
||||
With our Red Hat Advanced Cluster Security Operator installed and standing by to do some work for us, let's give it some work to do by telling it to deploy Red Hat Advanced Cluster Security onto our cluster.
|
||||
|
||||
|
||||
## 4.1 - Getting familiar with rhacs
|
||||
|
||||
Before we get into the technical implementation let's take a moment to get up to speed with Red Hat Advanced Cluster Security works.
|
||||
|
||||
Fundamentally you install RHACS as a set of containers in your OpenShift Container Platform or Kubernetes cluster. RHACS includes the following services:
|
||||
|
||||
1. **Central** services you install on a designated "hub" cluster. Central installs the Central, Scanner, and Scanner DB services. The Central service provides access to a user interface through a web UI or the RHACS portal. It also handles API interactions and provides persistent storage. Scanner analyzes images for known vulnerabilities. It uses Scanner DB as a cache for vulnerability definitions.
|
||||
2. **Secured cluster** services you install on each cluster you want to secure by RHACS. This installs the Collector, Sensor, and Admission Controller services. Collector collects runtime information on container security and network activity. It then sends data to Sensor, which monitors your Kubernetes cluster for policy detection and enforcement. Admission Controller monitors workloads and prevents users from creating them in RHACS when they violate security policies.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security high level architecture* |
|
||||
</Zoom>
|
||||
|
||||
> Note: For an overview of which sources Red Hat Advanced Cluster Security uses for vulnerability information and a more detailed walkthrough of each component, take a moment to review https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/architecture/index.
|
||||
|
||||
|
||||
## 4.2 - Deploying central services
|
||||
|
||||
Let's now create our **Central** services on our cluster by creating a new `Central` custom resource which our newly installed operator will then manage and deploy on our behalf. We'll deploy these services into a new namespace called `acs-central`.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: acs-central
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: Central
|
||||
metadata:
|
||||
name: stackrox-central-services
|
||||
namespace: acs-central
|
||||
spec:
|
||||
central:
|
||||
exposure:
|
||||
route:
|
||||
enabled: true
|
||||
egress:
|
||||
connectivityPolicy: Offline
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: The values we used for the `Central` instance are all defaults, aside from `connectivityPolicy: Offline`, which tells Red Hat Advanced Cluster Security it will be operating in a disconnected environment. For more details on how RHACS works in a disconnected environment refer to https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode.
|
||||
|
||||
Once the `Central` resource has been created you can check the state of the RHACS pods by running `oc get pods -n acs-central` in your highside terminal. Or navigating to **Workloads** > **Pods** for the `acs-central` project in the OpenShift Web Console.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security central pods* |
|
||||
</Zoom>
|
||||
|
||||
Once all pods are `Running` and `Ready` you can move on to the next step.
|
||||
|
||||
|
||||
## 4.3 - Logging into rhacs dashboard
|
||||
|
||||
Time to bring up our RHACS dashboard. We'll first retrieve the `admin` user password which was auto generated by the operator and stored in a **Secret**. Then we can open the **Route** for RHACS in a new browser tab and log in.
|
||||
|
||||
1. Return to your vnc session and the open tab with our OpenShift Web Console.
|
||||
2. Click **Workloads** > **Secrets**, ensuring you are looking at the `acs-central` **Project**.
|
||||
3. Click into the `central-htpasswd` **Secret**
|
||||
4. Scroll down and click **Reveal values** on the right hand side.
|
||||
5. Copy the `password` field, we'll need this shortly.
|
||||
6. Navigate to **Networking** > **Routes** in the left hand menu.
|
||||
7. Click on the **Location** URL for the route named `central`.
|
||||
8. Login with the username `admin` and the password you copied earlier.
|
||||
|
||||
> Note: Ironically (given the subject matter), you may receive a tls verification warning when opening the rhacs dashboard. This is expected in this short lived workshop environment (because James is lazy) and should be accepted (Kids please don't do this at home 😂).
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Logging into Red Hat Advanced Cluster Security dashboard* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.4 - Securing our hub cluster
|
||||
|
||||
To begin securing our OpenShift "hub" cluster with RHACS we need to:
|
||||
|
||||
1. Generate an init bundle to download and apply to the cluster.
|
||||
2. Create and apply a `SecuredCluster` custom resource.
|
||||
|
||||
We'll start with generating the init bundle. Just for future familiarity for this step we'll use and follow the official RHACS documentation: https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/installing/installing-rhacs-on-red-hat-openshift#portal-generate-init-bundle_init-bundle-ocp
|
||||
|
||||
Follow the steps in `4.3.1.1` to generate an init bundle named `hub` using the RHACS dashboard, selecting the **Operator** based installation method.
|
||||
|
||||
Once the `hub-Operator-secrets-cluster-init-bundle.yaml` file has been downloaded we'll apply it to the cluster using the OpenShift Web Console **Import YAML** feature.
|
||||
|
||||
1. Create a new project in the Web Console named `acs-securedcluster`.
|
||||
2. Click **Import YAML** in the top right of the OpenShift Web Console.
|
||||
3. Open your **Downloads** directory in the file browser using the **Places** top left menu.
|
||||
4. Open the `hub-Operator-secrets-cluster-init-bundle.yaml` file in a text editor and copy the contents.
|
||||
5. Paste the contents into the **Import YAML** text field and click the blue **Create** button.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Importing an init bundle into our hub cluster* |
|
||||
</Zoom>
|
||||
|
||||
> Note: These init bundles contain secrets enabling a secured cluster to communicate with RHACS Central so it's important to store these securely. For automation purposes you can also generate init bundles with the RHACS API or the `roxctl` CLI, for example `roxctl -e <ACS CONSOLE URL>:443 central init-bundles generate demo-cluster --output-secrets /tmp/demo-cluster.yaml --password <ACS ADMIN PASSWORD>`.
|
||||
|
||||
Once our init bundle has been created we can create our `SecuredCluster` custom resource to complete the cluster onboarding process. We'll do that with our `oc` terminal session.
|
||||
|
||||
Copy the command below and run it in your highside web terminal:
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace acs-securedcluster apply --filename -
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: SecuredCluster
|
||||
metadata:
|
||||
name: stackrox-secured-cluster-services
|
||||
spec:
|
||||
monitoring:
|
||||
openshift:
|
||||
enabled: true
|
||||
auditLogs:
|
||||
collection: Auto
|
||||
network:
|
||||
policies: Enabled
|
||||
admissionControl:
|
||||
listenOnUpdates: true
|
||||
bypass: BreakGlassAnnotation
|
||||
contactImageScanners: ScanIfMissing
|
||||
listenOnCreates: true
|
||||
replicas: 3
|
||||
timeoutSeconds: 10
|
||||
listenOnEvents: true
|
||||
scannerV4:
|
||||
db:
|
||||
persistence:
|
||||
persistentVolumeClaim:
|
||||
claimName: scanner-v4-db
|
||||
indexer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: Default
|
||||
scanner:
|
||||
analyzer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: AutoSense
|
||||
perNode:
|
||||
collector:
|
||||
collection: CORE_BPF
|
||||
forceCollection: false
|
||||
imageFlavor: Regular
|
||||
taintToleration: TolerateTaints
|
||||
clusterName: hub
|
||||
centralEndpoint: 'https://central-acs-central.apps.disco.lab:443'
|
||||
EOF
|
||||
```
|
||||
|
||||
After a short wait for pods to initialise in the `acs-securedcluster` namespace you should be able to see the cluster is now secured in RHACS by checking the **Platform Configuration** > **Clusters** overview which should show the `hub` cluster as `✅ Healthy`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Hub cluster is now secured by Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
If you now have Red Hat Advanced Cluster Security **Central** and **SecuredCluster** components deployed then congratulations your RHACS instance is fully deployed and you're ready to start improving your cluster security and compliance posture in Exercise 5! 🎉
|
||||
|
||||
216
data/compliance/exercise5.mdx
Normal file
@ -0,0 +1,216 @@
|
||||
---
|
||||
title: Running a cluster compliance scan
|
||||
exercise: 5
|
||||
date: '2024-09-01'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's check our cluster compliance against NIST 800-53 👀"
|
||||
---
|
||||
|
||||
We've done the work to set the OpenShift Compliance Operator and Red Hat Advanced Cluster Security up on our cluster, now let's make the most of it by using them to schedule and run a compliance scan on our cluster.
|
||||
|
||||
For the scan we'll be using the included `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift` and `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Node level` scan profiles that are included with the OpenShift Compliance Operator.
|
||||
|
||||
Two scan profiles are required as we need to scan both the OpenShift cluster, as well as each individual node running [RHEL CoreOS](https://docs.openshift.com/container-platform/4.14/architecture/architecture-rhcos.html).
|
||||
|
||||
For more details on these compliance profiles please take some time to review:
|
||||
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate.html
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate-node.html
|
||||
- https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-supported-profiles.html
|
||||
|
||||
|
||||
## 5.1 - Scheduling a scan
|
||||
|
||||
There are two methods you can use to schedule Compliance Operator scans:
|
||||
|
||||
1. Creating a `ScanSetting` and `ScanSettingBinding` custom resource. This does not require Red Hat Advanced Cluster Security, and can be easily managed by GitOps, however is not beginner friendly and lacks any graphical frontend to easily explore cluster compliance status. For an overview of this approach please take a few minutes to review https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-scans.html#compliance-operator-scans
|
||||
2. Creating a **Scan Schedule** in Red Hat Advanced Cluster Security. This is the approach we will be using in this workshop as it is the most intuitive option.
|
||||
|
||||
Complete the steps below to create your scan schedule:
|
||||
|
||||
1. Return to your browser tab in the vnc session with the Red Hat Advanced Cluster Security dashboard open.
|
||||
2. Navigate to **Compliance** > **Schedules** in the left hand menu.
|
||||
3. Click the blue **Create Scan Schedule** button in the middle of the screen.
|
||||
4. Enter the name `daily-nist-800-53-moderate` and set the **Time** field to `00:00` then click **Next**.
|
||||
5. On the next screen select your `hub` cluster, then click **Next**.
|
||||
6. On the profile screen tick `ocp4-moderate` and `ocp4-moderate-node`, then click **Next**.
|
||||
7. Click **Next** once more on the **Reports** screen and the click **Save**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Creating a compliance scan schedule in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
After creating the scan schedule results will be shortly available in the RHACS console. While we wait for the automatically triggered initial scan to complete, let's use the `oc` cli to review the `ScanSetting` that was created behind the scenes when we created the **Scan Schedule** in the RHACS dashboard.
|
||||
|
||||
Run the commands below to review your `ScanSetting` resource:
|
||||
|
||||
```bash
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate
|
||||
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate --output yaml
|
||||
```
|
||||
|
||||
You should see details output similar to the example below. Notice the more advanced settings available in the custom resource including `rawResultsStorage.rotation` and `roles[]` which you may want to customize in your environment.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ScanSetting
|
||||
maxRetryOnTimeout: 3
|
||||
metadata:
|
||||
annotations:
|
||||
owner: stackrox
|
||||
labels:
|
||||
app.kubernetes.io/created-by: sensor
|
||||
app.kubernetes.io/managed-by: sensor
|
||||
app.kubernetes.io/name: stackrox
|
||||
name: daily-nist-800-53-moderate
|
||||
namespace: openshift-compliance
|
||||
rawResultStorage:
|
||||
pvAccessModes:
|
||||
- ReadWriteOnce
|
||||
rotation: 3
|
||||
size: 1Gi
|
||||
roles:
|
||||
- master
|
||||
- worker
|
||||
scanTolerations:
|
||||
- operator: Exists
|
||||
schedule: 0 0 * * *
|
||||
showNotApplicable: false
|
||||
strictNodeScan: false
|
||||
suspend: false
|
||||
timeout: 30m0s
|
||||
```
|
||||
|
||||
|
||||
## 5.2 - Review cluster compliance
|
||||
|
||||
Once your cluster scan completes return to your vnc browser tab with the Red Hat Advanced Cluster Security Dashboard open. We'll take a look at our overall cluster compliance now against the compliance profile.
|
||||
|
||||
> Note: Please be aware of the usage disclaimer shown at the top of the screen *"Red Hat Advanced Cluster Security, and its compliance scanning implementations, assists users by automating the inspection of numerous technical implementations that align with certain aspects of industry standards, benchmarks, and baselines. It does not replace the need for auditors, Qualified Security Assessors, Joint Authorization Boards, or other industry regulatory bodies."*.
|
||||
|
||||
Navigate to **Compliance** > **Coverage** and review the overall result for the `ocp4-moderate` and `ocp4-moderate-node` profiles. The results should look something similar to the examples below:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Your cluster should come out compliant with ~65% of the `ocp4-moderate` profile and ~93% of the `ocp4-moderate-node` profile. Not a bad start, let's review an example of an individual result now.
|
||||
|
||||
|
||||
## 5.3 - Review indvidual `Manual` compliance results
|
||||
|
||||
Reviewing the detailed results any checks that are not passing will either be categorised as `Failing` or `Manual`. While we do everthing we can to automate the compliance process there are still a small number of controls you need to manage outside the direct automation of the Compliance Operator.
|
||||
|
||||
Looking at the `ocp4-moderate` results for our `hub` cluster. A good example of a `Manual` check is `ocp4-moderate-accounts-restrict-service-account-tokens`. Let's get an overview of the check, the rationale and our instructions to address it manually by clicking into that check in the list, and opening the **Details** tab. You can jump directly to it with this url: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-accounts-restrict-service-account-tokens?detailsTab=Details
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan result details in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
We can see in this example it's essentially a judgement call. Our instructions are:
|
||||
|
||||
> For each pod in the cluster, review the pod specification and ensure that pods that do not need to explicitly communicate with the API server have `automountServiceAccountToken` configured to `false`.
|
||||
|
||||
Now just because this check is classified as `Manual`, does not mean that we are now all on our own. There are extremely powerful policy engine & policy violation tracking features in RHACS that we can use investigate the status of this check further.
|
||||
|
||||
A default policy is available out of the box called **Pod Service Account Token Automatically Mounted**. By default this policy is in **Inform only** mode, which means deployments that violate this policy will not be prevented by the RHACS admission controller, or scaled down if already running by the RHACS runtime protection. However we can still use this policy as is to inform on the current state of any cluster in our fleet that is secured by RHACS.
|
||||
|
||||
1. First let's navigate to **Platform Configuration** > **Policy Management** in the left hand menu.
|
||||
2. In the Policy list scroll down to find **Pod Service Account Token Automatically Mounted** and click the policy title.
|
||||
3. Have a read of the policy details, then scroll down to review the **Scope exclusions**. You will see Red Hat has already done some work for you to define some standard OpenShift cluster control plane deployments which do need the token mounted and are safely & intentionally excluded from the policy to save you time.
|
||||
4. The policy should already be enabled so let's click on **Violations** in the left hand menu to review any current instances where this policy is currently being violated. You should have one entry in the list for the `kube-rbac-proxy`. This is actually a standard openshift pod in the `openshift-machine-config-operator` namespace, and does actually require the api token mounted, so we could safely add this deployment to our policy exclusions.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Reviewing a policy & policy violations in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
At this point as a platform engineer we have some flexibility about how we handle this particular compliance check, one option would be to switch the **Pod Service Account Token Automatically Mounted** policy to `Inform & enforce` mode, to prevent any future deployments to any cluster in your fleet secured by RHACS from having this common misconfiguration. As a result of implementing this mitigation you could consider adjusting the compliance profile to remove or change the priority of this `Manual` check as desired. Refer to https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-tailor.html
|
||||
|
||||
## 5.4 - Review individual `Failed` compliance results
|
||||
|
||||
For our last task on this exercise let's review a `Failed` check, and apply the corresponding remediation automatically to improve our compliance posture.
|
||||
|
||||
This time, rather than using the RHACS Dashboard we'll review the check result and apply the remediation using our terminal and `oc` cli.
|
||||
|
||||
Let's start by retrieving one of our failed checks:
|
||||
|
||||
```bash
|
||||
oc get ComplianceCheckResult --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
Each `ComplianceCheckResult` represents a result of one compliance rule check. If the rule can be remediated automatically, a `ComplianceRemediation` object with the same name, owned by the `ComplianceCheckResult` is created. Unless requested, the remediations are not applied automatically, which gives an OpenShift Container Platform administrator the opportunity to review what the remediation does and only apply a remediation once it has been verified.
|
||||
|
||||
> Note: Not all `ComplianceCheckResult` objects create `ComplianceRemediation` objects. Only `ComplianceCheckResult` objects that can be remediated automatically do. A `ComplianceCheckResult` object has a related remediation if it is labeled with the `compliance.openshift.io/automated-remediation` label.
|
||||
|
||||
Let's inspect the corresponding `ComplianceRemediation` for this check:
|
||||
|
||||
```bash
|
||||
oc get ComplianceRemediation --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
You should see output similar to the example below. We can see in the `spec:` that it essentially contains a yaml resource patch for our `APIServer` resource named `cluster` to specify `spec.encryption.type` be set to `aescbc`.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ComplianceRemediation
|
||||
metadata:
|
||||
annotations:
|
||||
compliance.openshift.io/xccdf-value-used: var-apiserver-encryption-type
|
||||
labels:
|
||||
compliance.openshift.io/scan-name: ocp4-moderate
|
||||
compliance.openshift.io/suite: daily-nist-800-53-moderate
|
||||
name: ocp4-moderate-api-server-encryption-provider-cipher
|
||||
namespace: openshift-compliance
|
||||
spec:
|
||||
apply: false
|
||||
current:
|
||||
object:
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
encryption:
|
||||
type: aescbc
|
||||
outdated: {}
|
||||
type: Configuration
|
||||
status:
|
||||
applicationState: NotApplied
|
||||
```
|
||||
|
||||
Let's apply this automatic remediation now:
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance patch complianceremediation/ocp4-moderate-api-server-encryption-provider-cipher --patch '{"spec":{"apply":true}}' --type=merge
|
||||
```
|
||||
|
||||
> Note: This remediation has impacts for pods in the `openshift-apiserver` namespace. If you check those pods quickly with an `oc get pods --namespace openshift-apiserver` you will notice a rolling restart underway.
|
||||
|
||||
Now it's time for some instant gratification. Let's bring up this compliance check in our vnc browser tab with the RHACS dashboard open by going to: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-api-server-encryption-provider-cipher?detailsTab=Results
|
||||
|
||||
You will see it currently shows as `Failed`. We can trigger a re-scan with the `oc` command below in our terminal:
|
||||
|
||||
> Note: Due to the api server rolling restart when this remediation was applied you may need to perform a fresh terminal login with `oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true`
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance annotate compliancescans/ocp4-moderate compliance.openshift.io/rescan=
|
||||
```
|
||||
|
||||
Hitting refresh, the check should now report `Pass`, and our overall percentage compliance against the baseline should have also now increased. Congratulations, time to move on to exercise 6 🚀
|
||||
174
data/compliance/exercise6.mdx
Normal file
@ -0,0 +1,174 @@
|
||||
---
|
||||
title: Retrieving raw compliance results
|
||||
exercise: 6
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Need to integrate results with another platform? No problem!"
|
||||
---
|
||||
|
||||
Often organisations will have dedicated software for managing governance, risk and compliance or need to provide results to external auditors. In these situations while the dashboards within Red Hat Advanced Cluster Security, or `ComplianceCheckResult` objects in the OpenShift APIServer are helpful, what we really need to do is integrate these results into our third party compliance management platform or pass results in a standardised format to third parties.
|
||||
|
||||
In this exercise, we'll briefly step through retrieving raw compliance results, in the well known **Asset Reporting Framework** (ARF) format.
|
||||
|
||||
The Asset Reporting Format is a data model to express the transport format of information about assets, and the relationships between assets and reports. The standardized data model facilitates the reporting, correlating, and fusing of asset information throughout and between organizations. ARF is vendor and technology neutral, flexible, and suited for a wide variety of reporting applications.
|
||||
|
||||
For more details on the format specification refer to https://www.nist.gov/publications/specification-asset-reporting-format-11
|
||||
|
||||
|
||||
## 6.1 - Understanding raw result storage
|
||||
|
||||
When the Compliance Operator runs a scan, raw results are stored in a `PersistentVolume`. The following `oc` command shows the mapping `PersistentVolume` name for a given scan name.
|
||||
|
||||
Let's use our scan name that we set up previously, `daily-nist-800-53-moderate`:
|
||||
|
||||
```bash
|
||||
oc get --namespace openshift-compliance compliancesuites daily-nist-800-53-moderate --output json | jq '.status.scanStatuses[].resultsStorage'
|
||||
```
|
||||
|
||||
We should see results showing the name of each `PersistentVolume` for each profile that was scanned, below is an example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "ocp4-moderate",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-master",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-worker",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
```
|
||||
|
||||
We can view the details of these `PersistentVolumes` as follows:
|
||||
|
||||
|
||||
```bash
|
||||
oc get pvc --namespace openshift-compliance ocp4-moderate
|
||||
```
|
||||
|
||||
|
||||
## 6.2 - Retrieving results from a volume
|
||||
|
||||
Let's retrieve some specific results files from a volume by mounting the volume into a pod, and then using `oc` to copy the volume contents to our highside ssh host.
|
||||
|
||||
We can create a pod using the `rhel8/support-tools` additional image that was mirrored into our disconnected environment.
|
||||
|
||||
> Note: Note the use of the pinned sha256 image digest below rather than standard image tags, this is a requirement of the mirroring process.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace openshift-compliance apply --filename -
|
||||
apiVersion: "v1"
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pv-extract
|
||||
spec:
|
||||
containers:
|
||||
- name: pv-extract-pod
|
||||
image: registry.redhat.io/rhel8/support-tools@sha256:ab42416e9e3460f6c6adac4cf09013be6f402810fba452ea95bd717c3ab4076b
|
||||
command: ["sleep", "3000"]
|
||||
volumeMounts:
|
||||
- mountPath: "/ocp4-moderate-scan-results"
|
||||
name: ocp4-moderate-scan-vol
|
||||
volumes:
|
||||
- name: ocp4-moderate-scan-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: ocp4-moderate
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: Spawning a pod that mounts the `PersistentVolume` will keep the claim as `Bound`. If the volume’s storage class in use has permissions set to `ReadWriteOnce`, the volume is only mountable by one pod at a time. You must delete the pod upon completion, or it will not be possible for the Operator to schedule a pod and continue storing results in this location.
|
||||
|
||||
With the volume mounted we can copy the results out to our machine:
|
||||
|
||||
```bash
|
||||
mkdir /mnt/high-side-data/compliance-results
|
||||
oc cp pv-extract:/ocp4-moderate-scan-results --namespace openshift-compliance .
|
||||
```
|
||||
|
||||
After the copy has completed we should delete our helper pod to unbind the volume:
|
||||
|
||||
```bash
|
||||
oc delete pod pv-extract --namespace openshift-compliance
|
||||
```
|
||||
|
||||
|
||||
## 6.3 - Reviewing raw result files
|
||||
|
||||
Now that we have a copy of the raw result files, let's see what they look like.
|
||||
|
||||
Starting with an `ls -lah` in our highside terminal we can see each scan result is stored in a numbered directory, yours should look similar to the example below:
|
||||
|
||||
```bash
|
||||
drwxr-xr-x. 5 lab-user lab-user 42 Sep 1 20:35 .
|
||||
drwxr-xr-x. 7 lab-user lab-user 4.0K Sep 1 20:28 ..
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 0
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 1
|
||||
drwxr-xr-x. 2 lab-user lab-user 6 Sep 1 20:35 lost+found
|
||||
```
|
||||
|
||||
If we take a look at one of the specific directories with `ls -lah compliance-results/1/` we'll see an archive file:
|
||||
|
||||
```bash
|
||||
-rw-r--r--. 1 lab-user lab-user 251K Sep 1 20:35 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
```
|
||||
|
||||
Let's drop into that directory and extract it now to take a look at the contents, run the commands below in your highside ssh terminal:
|
||||
|
||||
> Note: If you get an error from the `bunzip2` command below you may need to first install it with `sudo yum install --yes bzip2`.
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data/compliance-results/1
|
||||
bunzip2 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
mv ocp4-moderate-api-checks-pod.xml.bzip2.out ocp4-moderate-api-checks-pod.xml
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Now we're getting somewhere, we can see we have `.xml` file. Let's take a quick peek at the contents:
|
||||
|
||||
```bash
|
||||
head ocp4-moderate-api-checks-pod.xml
|
||||
```
|
||||
|
||||
You should see an xml document snippet similar to the example below:
|
||||
|
||||
```xml
|
||||
<core:relationships xmlns:arfvocab="http://scap.nist.gov/specifications/arf/vocabulary/relationships/1.0#">
|
||||
<core:relationship type="arfvocab:createdFor" subject="xccdf1">
|
||||
<core:ref>collection1</core:ref>
|
||||
</core:relationship>
|
||||
<core:relationship type="arfvocab:isAbout" subject="xccdf1">
|
||||
<core:ref>asset0</core:ref>
|
||||
</core:relationship>
|
||||
</core:relationships>
|
||||
```
|
||||
|
||||
|
||||
## 6.4 - Generating reports with openscap tooling
|
||||
|
||||
To finish off this exercise let's go one step further and use OpenSCAP tooling to generate an html based report we can open in our vnc Firefox browser.
|
||||
|
||||
Run the commands below in your high side terminal, we'll start by installing the `openscap-scanner` package.
|
||||
|
||||
```bash
|
||||
sudo yum install -y openscap-scanner
|
||||
```
|
||||
|
||||
One the tooling is installed let's generate the report:
|
||||
|
||||
```bash
|
||||
oscap xccdf generate report ocp4-moderate-api-checks-pod.xml > report.html
|
||||
```
|
||||
|
||||
So far we've done all this on our high side terminal. We need to get this report artifact to our low side server where our Firefox vnc session is running, let's copy it out now:
|
||||
|
||||
```bash
|
||||
exit # Return to low side server
|
||||
rsync highside:/mnt/high-side-data/compliance-results/1/report.html /home/lab-user/Downloads/report.html
|
||||
```
|
||||
|
||||
Finally - we can open up our report in our web based Firefox vnc session! Once you've reviewed the report you can move on to exercise 7 🚀
|
||||
76
data/compliance/exercise7.mdx
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
title: Bonus - Making the most of rhacs
|
||||
exercise: 7
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Optional challenge - if you have time"
|
||||
---
|
||||
|
||||
So you've deployed Red Hat Advanced Cluster Security and completed some day one configuration. Now what?? One of the key day two activities for RHACS in a disconnected environment is ensuring you can keep the vulnerability database up to date.
|
||||
|
||||
At a high level, the RHACS **Scanner** component maintains a database of vulnerabilities. When Red Hat Advanced Cluster Security for Kubernetes (RHACS) runs in normal mode, **Central** retrieves the latest vulnerability data from the internet, and Scanner retrieves vulnerability data from Central.
|
||||
|
||||
However, if you are using RHACS in offline mode, **you must manually update the vulnerability data**. To manually update the vulnerability data, you must upload a definitions file to Central, and Scanner then retrieves the vulnerability data from Central.
|
||||
|
||||
In both online and offline mode, Scanner checks for new data from Central every `5` minutes by default. In online mode, Central also checks for new data from the internet approximately every `5-20` minutes.
|
||||
|
||||
The offline data source is updated approximately every 3 hours. After the data has been uploaded to Central, Scanner downloads the data and updates its local vulnerability database.
|
||||
|
||||
|
||||
## 7.1 - Update rhacs definitions with roxctl
|
||||
|
||||
To update the definitions in offline mode, perform the following steps:
|
||||
|
||||
1. Download the definitions.
|
||||
2. Upload the definitions to Central.
|
||||
|
||||
As a challenge, try following the documentation https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode#download-scanner-definitions_enable-offline-mode to perform the update.
|
||||
|
||||
> Note: I suggest exploring `roxctl` CLI as the method for downloading updates in your low side environment. You could then copy both `roxctl` and the definitions update to your high side environment and use `roxtctl` once more (this time with an API token) in order to update the definitions.
|
||||
|
||||
|
||||
## 7.2 - Prioritise security remediation by risk
|
||||
|
||||
Completed your vulnerability definitions update? Awesome! Feel free to explore some of the other features of Red Hat Advanced Cluster Security using your web based vnc session and the RHACS dashboard.
|
||||
|
||||
Let’s take a look at the **Risk** view, where we go beyond the basics of vulnerabilities to understand how deployment configuration and runtime activity impact the likelihood of an exploit occurring and how successful those exploits will be.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Understanding risk exposure in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Risk is also influenced by runtime activity - and Deployments that have activity that could indicate a breach in progress have a red dot on the left. Obviously - the first one in the list should be our first focus.
|
||||
|
||||
The reality of security is that it’s just not possible to tackle all sources of Risk, so organizations end up prioritizing their efforts. We want RHACS to help inform that prioritization.
|
||||
|
||||
As a challange have a go at mirroring and deploying a new additional container image into your disconnected environment repeating steps we completed earlier. Try creating a deployment for that image to bring it up on your cluster, the **Developer** perspective in the OpenShift Web Console can save you some time here.
|
||||
|
||||
Once the container is running, use the RHACS dashboard to check what the deployments risk level is? What are the factors contributing to that?
|
||||
|
||||
|
||||
## 7.3 - Exploring the rhacs policy engine
|
||||
|
||||
Red Hat Advanced Cluster Security for Kubernetes allows you to use out-of-the-box security policies and define custom multi-factor policies for your container environment.
|
||||
|
||||
Configuring these policies enables you to automatically prevent high-risk service deployments in your environment and respond to runtime security incidents.
|
||||
|
||||
All of the policies that ship with the product are designed with the goal of providing targeted remediation that improves security hardening.
|
||||
|
||||
Take some time to reivew the default policies by clicking **Platform Configuration** > **Policy Management**. You’ll see this list contains many **Build** and **Deploy** time policies to catch misconfigurations early in the pipeline, but also **Runtime** policies that point back to specific hardening recommendations.
|
||||
|
||||
These policies come from us at Red Hat - our expertise, our interpretation of industry best practice, and our interpretation of common compliance standards, but you can modify them or create your own.
|
||||
|
||||
If you have some time take a look at the options for editing default policies to change their enforcement behavior or scope.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Policy management in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
If you're ready for a different topic, head over to Exercise 8, for the final tasks today to deploy Red Hat Developer Hub 🙂
|
||||
78
data/compliance/exercise8.mdx
Normal file
@ -0,0 +1,78 @@
|
||||
---
|
||||
title: Bonus - Installing red hat developer hub
|
||||
exercise: 8
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','backstage','developer-hub','operator']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Upping our dx in a disconnected environment"
|
||||
---
|
||||
|
||||
We've had a good dig into cluster security and compliance. Let's change gears for this final exercise to get some quick practice deploying [Red Hat Developer Hub](https://developers.redhat.com/rhdh/overview) in a disconnected cluster.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 8.1 - Deploying red hat developer hub
|
||||
|
||||
Earlier in exercise 3 we deployed the Red Hat Developer Hub Operator. We'll now instruct that operator to deploy an instance of Developer Hub for us by creating a `Backstage` custom resource.
|
||||
|
||||
Run the following command in your highside terminal session:
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhdh
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: rhdh.redhat.com/v1alpha1
|
||||
kind: Backstage
|
||||
metadata:
|
||||
name: developer-hub
|
||||
namespace: rhdh
|
||||
spec:
|
||||
application:
|
||||
appConfig:
|
||||
mountPath: /opt/app-root/src
|
||||
extraFiles:
|
||||
mountPath: /opt/app-root/src
|
||||
replicas: 1
|
||||
route:
|
||||
enabled: true
|
||||
database:
|
||||
enableLocalDb: true
|
||||
EOF
|
||||
```
|
||||
|
||||
Once the pods in the `rhdh` namespace are `Ready` we can retrieve and open the `Route` for our new Developer Hub instance and complete our first time login.
|
||||
|
||||
```bash
|
||||
oc get route --namespace rhdh backstage-developer-hub --output jsonpath='{.spec.host}'
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *First login for Red Hat Developer Hub* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 8.2 - Understanding developer hub
|
||||
|
||||
With Developer Hub deployed, you will notice by default there isn't much going on in the dashboard. This is because Developer Hub is a platform that has to be specifically customised for your environment through the extraordinary plugin ecosystem.
|
||||
|
||||
Take a moment to explore what directions you could potentially take your deployment via the plugin marketplace https://backstage.io/plugins.
|
||||
|
||||
Red Hat support a curated and opinionated set of plugins, you can take a look at those here https://developers.redhat.com/rhdh/plugins
|
||||
|
||||
We don't have time in this workshop to fully dig into Red Hat Developer Hub however if you do finish the security and compliance focused tasks ahead of schedule please feel free to review https://www.youtube.com/watch?v=tvVOC0mFR_4 to get a feel for how Developer Hub templates can be used.
|
||||
|
||||
89
data/disconnected/exercise1.mdx
Normal file
@ -0,0 +1,89 @@
|
||||
---
|
||||
title: Understanding our lab environment
|
||||
exercise: 1
|
||||
date: '2023-12-18'
|
||||
tags: ['openshift','containers','kubernetes','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get familiar with our lab setup."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift 4 Disconnected Workshop! Here you'll learn about operating an OpenShift 4 cluster in a disconnected network, for our purposes today that will be a network without access to the internet (even through a proxy or firewall).
|
||||
|
||||
To level set, Red Hat [OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. OpenShift supports running in disconnected networks, though this does change the way the cluster operates because key ingredients like container images, operator bundles, and helm charts must be brought into the environment from the outside world via mirroring.
|
||||
|
||||
There are of course many different options for installing OpenShift in a restricted network; this workshop will primarily cover one opinionated approach. We'll do our best to point out where there's the potential for variability along the way.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - Obtaining your environment
|
||||
|
||||
To get underway open your web browser and navigate to this etherpad link to reserve yourself a user https://etherpad.wikimedia.org/p/OpenShiftDisco_2023_12_20. You can reserve a user by noting your name or initials next to a user that has not yet been claimed.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Etherpad collaborative editor* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Opening your web terminal
|
||||
|
||||
Throughout the remainder of the workshop you will be using a number of command line interface tools for example, `aws` to quickly interact with resources in Amazon Web Services, and `ssh` to login to a remote server.
|
||||
|
||||
To save you from needing to install or configure these tools on your own device for the remainder of this workshop a web terminal will be available for you.
|
||||
|
||||
Simply copy the link next to the user your reserved in etherpad and paste into your browser. If you are prompted to login select `htpass` and enter the credentials listed in etherpad.
|
||||
|
||||
|
||||
## 1.3 - Creating an air gap
|
||||
|
||||
According to the [Internet Security Glossary](https://www.rfc-editor.org/rfc/rfc4949), an Air Gap is:
|
||||
|
||||
> "an interface between two systems at which (a) they are not connected physically and (b) any logical connection is not automated (i.e., data is transferred through the interface only manually, under human control)."
|
||||
|
||||
In disconnected OpenShift installations, the air gap exists between the **Low Side** and the **High Side**, so it is between these systems where a manual data transfer, or **sneakernet** is required.
|
||||
|
||||
For the purposes of this workshop we will be operating within Amazon Web Services. You have been allocated a set of credentials for an environment that already has some basic preparation completed. This will be a single VPC with 3 public subnets, which will serve as our **Low Side**, and 3 private subnets, which will serve as our **High Side**.
|
||||
|
||||
The diagram below shows a simplified overview of the networking topology:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop network topology* |
|
||||
</Zoom>
|
||||
|
||||
Let's check the virtual private cloud network is created using the `aws` command line interface by copying the command below into our web terminal:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-vpcs | jq '.Vpcs[] | select(.Tags[].Value=="disco").VpcId' -r
|
||||
```
|
||||
|
||||
You should see output similar to the example below:
|
||||
|
||||
```text
|
||||
vpc-0e6d176c7d9c94412
|
||||
```
|
||||
|
||||
We can also check our three public **Low side** and three private **High side** subnets are ready to go by running the command below in our web terminal:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-subnets | jq '[.Subnets[].Tags[] | select(.Key=="Name").Value] | sort'
|
||||
```
|
||||
|
||||
We should see output matching this example:
|
||||
|
||||
```bash
|
||||
[
|
||||
"Private Subnet - disco",
|
||||
"Private Subnet 2 - disco",
|
||||
"Private Subnet 3 - disco",
|
||||
"Public Subnet - disco",
|
||||
"Public Subnet 2 - disco",
|
||||
"Public Subnet 3 - disco"
|
||||
]
|
||||
```
|
||||
|
||||
If your environment access and topology is all working you've finished exercise 1! 🎉
|
||||
214
data/disconnected/exercise2.mdx
Normal file
@ -0,0 +1,214 @@
|
||||
---
|
||||
title: Preparing our low side
|
||||
exercise: 2
|
||||
date: '2023-12-18'
|
||||
tags: ['openshift','containers','kubernetes','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Downloading content and tooling for sneaker ops 💾"
|
||||
---
|
||||
|
||||
A disconnected OpenShift installation begins with downloading content and tooling to a prep system that has outbound access to the Internet. This server resides in an environment commonly referred to as the **Low side** due to its low security profile.
|
||||
|
||||
In this exercise we will be creating a new [AWS ec2 instance](https://aws.amazon.com/ec2) in our **Low side** that we will carry out all our preparation activities on.
|
||||
|
||||
|
||||
## 2.1 - Creating a security group
|
||||
|
||||
We'll start by creating an [AWS security group](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) and collecting its ID.
|
||||
|
||||
We're going to use this shortly for the **Low side** prep system, and later on in the workshop for the **High side** bastion server.
|
||||
|
||||
Copy the commands below into your web terminal:
|
||||
|
||||
```bash
|
||||
# Obtain vpc id
|
||||
VPC_ID=$(aws ec2 describe-vpcs | jq '.Vpcs[] | select(.Tags[].Value=="disco").VpcId' -r)
|
||||
echo "Virtual private cloud id is: ${VPC_ID}"
|
||||
|
||||
# Obtain first public subnet id
|
||||
PUBLIC_SUBNET=$(aws ec2 describe-subnets | jq '.Subnets[] | select(.Tags[].Value=="Public Subnet - disco").SubnetId' -r)
|
||||
|
||||
# Create security group
|
||||
aws ec2 create-security-group --group-name disco-sg --description disco-sg --vpc-id ${VPC_ID} --tag-specifications "ResourceType=security-group,Tags=[{Key=Name,Value=disco-sg}]"
|
||||
|
||||
# Store security group id
|
||||
SG_ID=$(aws ec2 describe-security-groups --filters "Name=tag:Name,Values=disco-sg" | jq -r '.SecurityGroups[0].GroupId')
|
||||
echo "Security group id is: ${SG_ID}"
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Creating aws ec2 security group* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.2 - Opening ssh port ingress
|
||||
|
||||
We will want to login to our soon to be created **Low side** aws ec2 instance remotely via `ssh` so let's enable ingress on port `22` for this security group now:
|
||||
|
||||
> Note: We're going to allow traffic from all sources for simplicity (`0.0.0.0/0`), but this is likely to be more restrictive in real world environments:
|
||||
|
||||
```bash
|
||||
aws ec2 authorize-security-group-ingress --group-id $SG_ID --protocol tcp --port 22 --cidr 0.0.0.0/0
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Opening ssh port ingress* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.3 - Create prep system instance
|
||||
|
||||
Ready to launch! 🚀 We'll use the `t3.micro` instance type, which offers `1GiB` of RAM and `2` vCPUs, along with a `50GiB` storage volume to ensure we have enough storage for mirrored content:
|
||||
|
||||
> Note: As mentioned in [OpenShift documentation](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html/installing/disconnected-installation-mirroring) about 12 GB of storage space is required for OpenShift Container Platform 4.14 release images, or additionally about 358 GB for OpenShift Container Platform 4.14 release images and all OpenShift Container Platform 4.14 Red Hat Operator images.
|
||||
|
||||
Run the command below in your web terminal to launch the instance. We will specify an Amazon Machine Image (AMI) to use for our prep system which for this lab will be the [Marketplace AMI for RHEL 8](https://access.redhat.com/solutions/15356#us_east_2) in `us-east-2`.
|
||||
|
||||
```bash
|
||||
aws ec2 run-instances --image-id "ami-092b43193629811af" \
|
||||
--count 1 --instance-type t3.micro \
|
||||
--key-name disco-key \
|
||||
--security-group-ids $SG_ID \
|
||||
--subnet-id $PUBLIC_SUBNET \
|
||||
--associate-public-ip-address \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=disco-prep-system}]" \
|
||||
--block-device-mappings "DeviceName=/dev/sdh,Ebs={VolumeSize=50}"
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Launching a prep rhel8 ec2 instance* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.4 - Connecting to the low side
|
||||
|
||||
Now that our prep system is up, let's `ssh` into it and download the content we'll need to support our install on the **High side**.
|
||||
|
||||
Copy the commands below into your web terminal. Let's start by retrieving the IP for the new ec2 instance and then connecting via `ssh`:
|
||||
|
||||
> Note: If your `ssh` command times out here, your prep system is likely still booting up. Give it a minute and try again.
|
||||
|
||||
```bash
|
||||
PREP_SYSTEM_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=disco-prep-system" | jq -r '.Reservations[0].Instances[0].PublicIpAddress')
|
||||
echo $PREP_SYSTEM_IP
|
||||
|
||||
ssh -i disco_key ec2-user@$PREP_SYSTEM_IP
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Connecting to the prep rhel8 ec2 instance* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.5 - Downloading required tools
|
||||
|
||||
For the purposes of this workshop, rather than downloading mirror content to a USB drive as we would likely do in a real SneakerOps situation, we will instead be saving content to an EBS volume which will be mounted to our prep system on the **Low side** and then subsequently synced to our bastion system on the **High side**.
|
||||
|
||||
Once your prep system has booted let's mount the EBS volume we attached so we can start downloading content. Copy the commands below into your web terminal:
|
||||
|
||||
```bash
|
||||
sudo mkfs -t xfs /dev/nvme1n1
|
||||
sudo mkdir /mnt/high-side
|
||||
sudo mount /dev/nvme1n1 /mnt/high-side
|
||||
sudo chown ec2-user:ec2-user /mnt/high-side
|
||||
cd /mnt/high-side
|
||||
```
|
||||
|
||||
With our mount in place let's grab the tools we'll need for the bastion server - we'll use some of them on the prep system too. Life's good on the low side; we can download these from the internet and tuck them into our **High side** gift basket at `/mnt/high-side`.
|
||||
|
||||
There are four tools we need, copy the commands into your web terminal to download each one:
|
||||
|
||||
1. `oc` OpenShift cli
|
||||
|
||||
```bash
|
||||
curl https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz -L -o oc.tar.gz
|
||||
tar -xzf oc.tar.gz oc && rm -f oc.tar.gz
|
||||
sudo cp oc /usr/local/bin/
|
||||
```
|
||||
|
||||
2. `oc-mirror` oc plugin for mirorring release, operator, and helm content
|
||||
|
||||
```bash
|
||||
curl https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/oc-mirror.tar.gz -L -o oc-mirror.tar.gz
|
||||
tar -xzf oc-mirror.tar.gz && rm -f oc-mirror.tar.gz
|
||||
chmod +x oc-mirror
|
||||
sudo cp oc-mirror /usr/local/bin/
|
||||
```
|
||||
|
||||
3. `mirror-registry` small-scale Quay registry designed for mirroring
|
||||
|
||||
```bash
|
||||
curl https://mirror.openshift.com/pub/openshift-v4/clients/mirror-registry/latest/mirror-registry.tar.gz -L -o mirror-registry.tar.gz
|
||||
tar -xzf mirror-registry.tar.gz
|
||||
rm -f mirror-registry.tar.gz
|
||||
```
|
||||
|
||||
4. `openshift-installer` The OpenShift installer cli
|
||||
|
||||
```bash
|
||||
curl https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-install-linux.tar.gz -L -o openshift-installer.tar.gz
|
||||
tar -xzf openshift-installer.tar.gz openshift-install
|
||||
rm -f openshift-installer.tar.gz
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Downloading required tools with curl* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.6 - Mirroring content to disk
|
||||
|
||||
The `oc-mirror` plugin supports mirroring content directly from upstream sources to a mirror registry, but since there is an air gap between our **Low side** and **High side**, that's not an option for this lab. Instead, we'll mirror content to a tarball on disk that we can then sneakernet into the bastion server on the **High side**. We'll then mirror from the tarball into the mirror registry from there.
|
||||
|
||||
> Note: A pre-requisite for this process is an OpenShift pull secret to authenticate to the Red Hat registries. This has already been created for you to avoid the delay of registering for individual Red Hat accounts during this workhop. You can copy this into your newly created prep system by running `scp -pr -i disco_key .docker ec2-user@$PREP_SYSTEM_IP:` in your web terminal. In a real world scenario this pull secret can be downloaded from https://console.redhat.com/openshift/install/pull-secret.
|
||||
|
||||
Let's get started by generating an `ImageSetConfiguration` that describes the parameters of our mirror. Run the command below to generate a boilerplate configuration file, it may take a minute:
|
||||
|
||||
```bash
|
||||
oc mirror init > imageset-config.yaml
|
||||
```
|
||||
|
||||
> Note: You can take a look at the default file by running `cat imageset-config.yaml` in your web terminal. Feel free to pause the workshop tasks for a few minutes and read through the [OpenShift documentation](https://docs.openshift.com/container-platform/4.14/updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.html#oc-mirror-creating-image-set-config_mirroring-ocp-image-repository) for the different options available within the image set configuration.
|
||||
|
||||
To save time and storage, we're going to remove the operator catalogs and mirror only the release images for this workshop. We'll still get a fully functional cluster, but OperatorHub will be empty.
|
||||
|
||||
To complete this, remove the operators object from your `imageset-config.yaml` by running the command below in your web terminal:
|
||||
|
||||
```
|
||||
cat << EOF > imageset-config.yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/ubi8/ubi:latest
|
||||
helm: {}
|
||||
EOF
|
||||
```
|
||||
|
||||
Now we're ready to kick off the mirror! This can take 5-15 minutes so this is a good time to go grab a coffee or take a short break:
|
||||
|
||||
> Note: If you're keen to see a bit more verbose output to track the progress of the mirror to disk process you can add the `-v 5` flag to the command below.
|
||||
|
||||
```bash
|
||||
oc mirror --config imageset-config.yaml file:///mnt/high-side
|
||||
```
|
||||
|
||||
Once your content has finished mirroring to disk you've finished exercise 2! 🎉
|
||||
119
data/disconnected/exercise3.mdx
Normal file
@ -0,0 +1,119 @@
|
||||
---
|
||||
title: Preparing our high side
|
||||
exercise: 3
|
||||
date: '2023-12-19'
|
||||
tags: ['openshift','containers','kubernetes','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Setting up a bastion server and transferring content"
|
||||
---
|
||||
|
||||
In this exercise, we'll prepare the **High side**. This involves creating a bastion server on the **High side** that will host our mirror registry.
|
||||
|
||||
> Note: We have an interesting dilemma for this excercise: the Amazon Machine Image we used for the prep system earlier does not have `podman` installed. We need `podman`, since it is a key dependency for `mirror-registry`.
|
||||
>
|
||||
> We could rectify this by running `sudo dnf install -y podman` on the bastion system, but the bastion server won't have Internet access, so we need another option for this lab. To solve this problem, we need to build our own RHEL image with podman pre-installed. Real customer environments will likely already have a solution for this, but one approach is to use the [Image Builder](https://console.redhat.com/insights/image-builder) in the Hybrid Cloud Console, and that's exactly what has been done for this lab.
|
||||
>
|
||||
> [workshop](/static/images/disconnected/image-builder.png)
|
||||
>
|
||||
> In the home directory of your web terminal you will find an `ami.txt` file containng our custom image AMI which will be used by the command that creates our bastion ec2 instance.
|
||||
|
||||
|
||||
## 3.1 - Creating a bastion server
|
||||
|
||||
First up for this exercise we'll grab the ID of one of our **High side** private subnets as well as our ec2 security group.
|
||||
|
||||
Copy the commands below into your web terminal:
|
||||
|
||||
```bash
|
||||
PRIVATE_SUBNET=$(aws ec2 describe-subnets | jq '.Subnets[] | select(.Tags[].Value=="Private Subnet - disco").SubnetId' -r)
|
||||
echo $PRIVATE_SUBNET
|
||||
|
||||
SG_ID=$(aws ec2 describe-security-groups --filters "Name=tag:Name,Values=disco-sg" | jq -r '.SecurityGroups[0].GroupId')
|
||||
echo $SG_ID
|
||||
```
|
||||
|
||||
Once we know our subnet and security group ID's we can spin up our **High side** bastion server. Copy the commands below into your web terminal to complete this:
|
||||
|
||||
```bash
|
||||
aws ec2 run-instances --image-id $(cat ami.txt) \
|
||||
--count 1 \
|
||||
--instance-type t3.large \
|
||||
--key-name disco-key \
|
||||
--security-group-ids $SG_ID \
|
||||
--subnet-id $PRIVATE_SUBNET \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=disco-bastion-server}]" \
|
||||
--block-device-mappings "DeviceName=/dev/sdh,Ebs={VolumeSize=50}"
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Launching bastion ec2 instance* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.2 - Accessing the high side
|
||||
|
||||
Now we need to access our bastion server on the high side. In real customer environments, this might entail use of a VPN, or physical access to a workstation in a secure facility such as a SCIF.
|
||||
|
||||
To make things a bit simpler for our lab, we're going to restrict access to our bastion to its private IP address. So we'll use the prep system as a sort of bastion-to-the-bastion.
|
||||
|
||||
Let's get access by grabbing the bastion's private IP.
|
||||
|
||||
```bash
|
||||
HIGHSIDE_BASTION_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=disco-bastion-server" | jq -r '.Reservations[0].Instances[0].PrivateIpAddress')
|
||||
echo $HIGHSIDE_BASTION_IP
|
||||
```
|
||||
|
||||
Our next step will be to `exit` back to our web terminal and copy our private key to the prep system so that we can `ssh` to the bastion from there. You may have to wait a minute for the VM to finish initializing:
|
||||
|
||||
```bash
|
||||
PREP_SYSTEM_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=disco-prep-system" | jq -r '.Reservations[0].Instances[0].PublicIpAddress')
|
||||
|
||||
scp -i disco_key disco_key ec2-user@$PREP_SYSTEM_IP:/home/ec2-user/disco_key
|
||||
```
|
||||
|
||||
To make life a bit easier down the track let's set an environment variable on the prep system so that we can preserve the bastion's IP:
|
||||
|
||||
```bash
|
||||
ssh -i disco_key ec2-user@$PREP_SYSTEM_IP "echo HIGHSIDE_BASTION_IP=$(echo $HIGHSIDE_BASTION_IP) > highside.env"
|
||||
```
|
||||
|
||||
Finally - Let's now connect all the way through to our **High side** bastion 🚀
|
||||
|
||||
```bash
|
||||
ssh -t -i disco_key ec2-user@$PREP_SYSTEM_IP "ssh -t -i disco_key ec2-user@$HIGHSIDE_BASTION_IP"
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Connecting to our bastion ec2 instance* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.3 - Sneakernetting content to the high side
|
||||
|
||||
We'll now deliver the **High side** gift basket to the bastion server. Start by mounting our EBS volume on the bastion server to ensure that we don't run out of space:
|
||||
|
||||
```bash
|
||||
sudo mkfs -t xfs /dev/nvme1n1
|
||||
sudo mkdir /mnt/high-side
|
||||
sudo mount /dev/nvme1n1 /mnt/high-side
|
||||
sudo chown ec2-user:ec2-user /mnt/high-side
|
||||
```
|
||||
|
||||
With the mount in place we can exit back to our base web terminal and send over our gift basket at `/mnt/high-side` using `rsync`. This can take 10-15 minutes depending on the size of the mirror tarball.
|
||||
|
||||
```bash
|
||||
ssh -t -i disco_key ec2-user@$PREP_SYSTEM_IP "rsync -avP -e 'ssh -i disco_key' /mnt/high-side ec2-user@$HIGHSIDE_BASTION_IP:/mnt"
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Initiating the sneakernet transfer via rsync* |
|
||||
</Zoom>
|
||||
|
||||
Once your transfer has finished pushing you are finished with exercise 3, well done! 🎉
|
||||
102
data/disconnected/exercise4.mdx
Normal file
@ -0,0 +1,102 @@
|
||||
---
|
||||
title: Deploying a mirror registry
|
||||
exercise: 4
|
||||
date: '2023-12-20'
|
||||
tags: ['openshift','containers','kubernetes','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's start mirroring some content on our high side!"
|
||||
---
|
||||
|
||||
Images used by operators and platform components must be mirrored from upstream sources into a container registry that is accessible by the **High side**. You can use any registry you like for this as long as it supports Docker `v2-2`, such as:
|
||||
- Red Hat Quay
|
||||
- JFrog Artifactory
|
||||
- Sonatype Nexus Repository
|
||||
- Harbor
|
||||
|
||||
An OpenShift subscription includes access to the [mirror registry](https://docs.openshift.com/container-platform/4.14/installing/disconnected_install/installing-mirroring-creating-registry.html#installing-mirroring-creating-registry) for Red Hat OpenShift, which is a small-scale container registry designed specifically for mirroring images in disconnected installations. We'll make use of this option in this lab.
|
||||
|
||||
Mirroring all release and operator images can take awhile depending on the network bandwidth. For this lab, recall that we're going to mirror just the release images to save time and resources.
|
||||
|
||||
We should have the `mirror-registry` binary along with the required container images available on the bastion in `/mnt/high-side`. The `50GB` volume we created should be enough to hold our mirror (without operators) and binaries.
|
||||
|
||||
|
||||
## 4.1 - Opening mirror registry port ingress
|
||||
|
||||
We are getting close to deploying a disconnected OpenShift cluster that will be spread across multiple machines which are in turn spread across our three private subnets.
|
||||
|
||||
Each of the machines in those private subnets will need to talk back to our mirror registry on port `8443` so let's quickly update our aws security group to ensure this will work.
|
||||
|
||||
> Note: We're going to allow traffic from all sources for simplicity (`0.0.0.0/0`), but this is likely to be more restrictive in real world environments:
|
||||
|
||||
```bash
|
||||
SG_ID=$(aws ec2 describe-security-groups --filters "Name=tag:Name,Values=disco-sg" | jq -r '.SecurityGroups[0].GroupId')
|
||||
|
||||
aws ec2 authorize-security-group-ingress --group-id $SG_ID --protocol tcp --port 8443 --cidr 0.0.0.0/0
|
||||
```
|
||||
|
||||
|
||||
## 4.2 - Running the registry install
|
||||
|
||||
First, let's `ssh` back into the bastion:
|
||||
|
||||
```bash
|
||||
ssh -t -i disco_key ec2-user@$PREP_SYSTEM_IP "ssh -t -i disco_key ec2-user@$HIGHSIDE_BASTION_IP"
|
||||
```
|
||||
|
||||
And then we can kick off our install:
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side
|
||||
./mirror-registry install --quayHostname $(hostname) --quayRoot /mnt/high-side/quay/quay-install --quayStorage /mnt/high-side/quay/quay-storage --pgStorage /mnt/high-side/quay/pg-data --initPassword discopass
|
||||
```
|
||||
|
||||
If all goes well, you should see something like:
|
||||
|
||||
```text
|
||||
INFO[2023-07-06 15:43:41] Quay installed successfully, config data is stored in /mnt/quay/quay-install
|
||||
INFO[2023-07-06 15:43:41] Quay is available at https://ip-10-0-51-47.ec2.internal:8443 with credentials (init, discopass)
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Running the mirror-registry installer* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.3 Logging into the mirror registry
|
||||
|
||||
Now that our registry is running let's login with `podman` which will generate an auth file at `/run/user/1000/containers/auth.json`.
|
||||
|
||||
```bash
|
||||
podman login -u init -p discopass --tls-verify=false $(hostname):8443
|
||||
```
|
||||
|
||||
We should be greeted with `Login Succeeded!`.
|
||||
|
||||
> Note: We pass `--tls-verify=false` here for simplicity during this workshop, but you can optionally add `/mnt/high-side/quay/quay-install/quay-rootCA/rootCA.pem` to the system trust store by following the guide in the Quay documentation [here](https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/using-ssl-to-protect-quay?extIdCarryOver=true&sc_cid=701f2000001OH74AAG#configuring_the_system_to_trust_the_certificate_authority).
|
||||
|
||||
|
||||
## 4.4 Pushing content into mirror registry
|
||||
|
||||
Now we're ready to mirror images from disk into the registry. Let's add `oc` and `oc-mirror` to the path:
|
||||
|
||||
```bash
|
||||
sudo cp /mnt/high-side/oc /usr/local/bin/
|
||||
sudo cp /mnt/high-side/oc-mirror /usr/local/bin/
|
||||
```
|
||||
|
||||
And now we fire up the mirror process to push our content from disk into the registry ready to be pulled by the OpenShift installation. This can take a similar amount of time to the sneakernet procedure we completed in exercise 3.
|
||||
|
||||
```bash
|
||||
oc mirror --from=/mnt/high-side/mirror_seq1_000000.tar --dest-skip-tls docker://$(hostname):8443
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Running the oc mirror process to push content to our registry* |
|
||||
</Zoom>
|
||||
|
||||
Once your content has finished pushing you are finished with exercise 4, well done! 🎉
|
||||
219
data/disconnected/exercise5.mdx
Normal file
@ -0,0 +1,219 @@
|
||||
---
|
||||
title: Installing a disconnected OpenShift cluster
|
||||
exercise: 5
|
||||
date: '2023-12-20'
|
||||
tags: ['openshift','containers','kubernetes','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to install a cluster 🚀"
|
||||
---
|
||||
|
||||
We're on the home straight now. In this exercise we'll configure and then execute our `openshift-installer`.
|
||||
|
||||
The OpenShift installation process is initiated from the bastion server on our **High side**. There are a handful of different ways to install OpenShift, but for this lab we're going to be using installer-provisioned infrastructure (IPI).
|
||||
|
||||
By default, the installation program acts as an installation wizard, prompting you for values that it cannot determine on its own and providing reasonable default values for the remaining parameters.
|
||||
|
||||
We'll then customize the `install-config.yaml` file that is produced to specify advanced configuration for our disconnected installation. The installation program then provisions the underlying infrastructure for the cluster. Here's a diagram describing the inputs and outputs of the installation configuration process:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Installation overview* |
|
||||
</Zoom>
|
||||
|
||||
> Note: You may notice that nodes are provisioned through a process called Ignition. This concept is out of scope for this workshop, but if you're interested to learn more about it, you can read up on it in the documentation [here](https://docs.openshift.com/container-platform/4.14/installing/index.html#about-rhcos).
|
||||
|
||||
IPI is the recommended installation method in most cases because it leverages full automation in installation and cluster management, but there are some key considerations to keep in mind when planning a production installation in a real world scenario.
|
||||
|
||||
You may not have access to the infrastructure APIs. Our lab is going to live in AWS, which requires connectivity to the `.amazonaws.com` domain. We accomplish this by using an allowed list on a Squid proxy running on the **High side**, but a similar approach may not be achievable or permissible for everyone.
|
||||
|
||||
You may not have sufficient permissions with your infrastructure provider. Our lab has full admin in our AWS enclave, so that's not a constraint we'll need to deal with. In real world environments, you'll need to ensure your account has the appropriate permissions which sometimes involves negotiating with security teams.
|
||||
|
||||
Once configuration has been completed, we can kick off the OpenShift Installer and it will do all the work for us to provision the infrastructure and install OpenShift.
|
||||
|
||||
|
||||
## 5.1 - Building install-config.yaml
|
||||
|
||||
Before we run the installer we need to create a configuration file. Let's set up a workspace for it first.
|
||||
|
||||
```bash
|
||||
mkdir /mnt/high-side/install
|
||||
cd /mnt/high-side/install
|
||||
```
|
||||
|
||||
Next we will generate the ssh key pair for access to cluster nodes:
|
||||
|
||||
```bash
|
||||
ssh-keygen -f ~/.ssh/disco-openshift-key -q -N ""
|
||||
```
|
||||
|
||||
Use the following Python code to minify your mirror container registry pull secret to a single line. Copy this output to your clipboard, since you'll need it in a moment:
|
||||
|
||||
```bash
|
||||
python3 -c $'import json\nimport sys\nwith open(sys.argv[1], "r") as f: print(json.dumps(json.load(f)))' /run/user/1000/containers/auth.json
|
||||
```
|
||||
|
||||
> Note: For connected installations, you'd use the secret from the Hybrid Cloud Console, but for our use case, the mirror registry is the only one OpenShift will need to authenticate to.
|
||||
|
||||
Then we can go ahead and generate our `install-config.yaml`:
|
||||
|
||||
> Note: We are setting --log-level to get more verbose output.
|
||||
|
||||
```bash
|
||||
/mnt/high-side/openshift-install create install-config --dir /mnt/high-side/install --log-level=DEBUG
|
||||
```
|
||||
|
||||
The OpenShift installer will prompt you for a number of fields; enter the values below:
|
||||
|
||||
- SSH Public Key: `/home/ec2-user/.ssh/disco-openshift-key.pub`
|
||||
> The SSH public key used to access all nodes within the cluster.
|
||||
|
||||
- Platform: aws
|
||||
> The platform on which the cluster will run.
|
||||
|
||||
- AWS Access Key ID and Secret Access Key: From `cat ~/.aws/credentials`
|
||||
|
||||
- Region: `us-east-2`
|
||||
|
||||
- Base Domain: `sandboxXXXX.opentlc.com` This should automatically populate.
|
||||
> The base domain of the cluster. All DNS records will be sub-domains of this base and will also include the cluster name.
|
||||
|
||||
- Cluster Name: `disco`
|
||||
>The name of the cluster. This will be used when generating sub-domains.
|
||||
|
||||
- Pull Secret: Paste the output from minifying this to a single line in Step 3.
|
||||
|
||||
That's it! The installer will generate `install-config.yaml` and drop it in `/mnt/high-side/install` for you.
|
||||
|
||||
Once the config file is generated take a look through it, we will be making some changes as follows:
|
||||
|
||||
- Change `publish` from `External` to `Internal`. We're using private subnets to house the cluster, so it won't be publicly accessible.
|
||||
|
||||
- Add the subnet IDs for your private subnets to `platform.aws.subnets`. Otherwise, the installer will create its own VPC and subnets. You can retrieve them by running this command from your workstation:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-subnets | jq '[.Subnets[] | select(.Tags[].Value | contains ("Private")).SubnetId] | unique' -r | yq read - -P
|
||||
```
|
||||
|
||||
Then add them to `platform.aws.subnets` in your `install-config.yaml` so that they look something like this:
|
||||
|
||||
```yaml
|
||||
platform:
|
||||
aws:
|
||||
region: us-east-1
|
||||
subnets:
|
||||
- subnet-00f28bbc11d25d523
|
||||
- subnet-07b4de5ea3a39c0fd
|
||||
- subnet-07b4de5ea3a39c0fd
|
||||
```
|
||||
|
||||
- Next we need to modify the `machineNetwork` to match the IPv4 CIDR blocks from the private subnets. Otherwise your control plane and compute nodes will be assigned IP addresses that are out of range and break the install. You can retrieve them by running this command from your workstation:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-subnets | jq '[.Subnets[] | select(.Tags[].Value | contains ("Private")).CidrBlock] | unique | map("cidr: " + .)' | yq read -P - | sed "s/'//g"
|
||||
```
|
||||
|
||||
Then use them to **replace the existing** `networking.machineNetwork` entry in your `install-config.yaml` so that they look something like this:
|
||||
|
||||
```yaml
|
||||
networking:
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.0.48.0/20
|
||||
- cidr: 10.0.64.0/20
|
||||
- cidr: 10.0.80.0/20
|
||||
```
|
||||
|
||||
- Next we will add the `imageContentSources` to ensure image mappings happen correctly. You can append them to your `install-config.yaml` by running this command:
|
||||
|
||||
```bash
|
||||
cat << EOF >> install-config.yaml
|
||||
imageContentSources:
|
||||
- mirrors:
|
||||
- $(hostname):8443/ubi8/ubi
|
||||
source: registry.redhat.io/ubi8/ubi
|
||||
- mirrors:
|
||||
- $(hostname):8443/openshift/release-images
|
||||
source: quay.io/openshift-release-dev/ocp-release
|
||||
- mirrors:
|
||||
- $(hostname):8443/openshift/release
|
||||
source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
|
||||
EOF
|
||||
```
|
||||
|
||||
- Add the root CA of our mirror registry (`/mnt/high-side/quay/quay-install/quay-rootCA/rootCA.pem`) to the trust bundle using the `additionalTrustBundle` field by running this command:
|
||||
|
||||
```bash
|
||||
cat <<EOF >> install-config.yaml
|
||||
additionalTrustBundle: |
|
||||
$(cat /mnt/high-side/quay/quay-install/quay-rootCA/rootCA.pem | sed 's/^/ /')
|
||||
EOF
|
||||
```
|
||||
|
||||
It should look something like this:
|
||||
|
||||
```yaml
|
||||
additionalTrustBundle: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID2DCCAsCgAwIBAgIUbL/naWCJ48BEL28wJTvMhJEz/C8wDQYJKoZIhvcNAQEL
|
||||
BQAwdTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhOZXcgWW9y
|
||||
azENMAsGA1UECgwEUXVheTERMA8GA1UECwwIRGl2aXNpb24xJDAiBgNVBAMMG2lw
|
||||
LTEwLTAtNTEtMjA2LmVjMi5pbnRlcm5hbDAeFw0yMzA3MTExODIyMjNaFw0yNjA0
|
||||
MzAxODIyMjNaMHUxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwI
|
||||
TmV3IFlvcmsxDTALBgNVBAoMBFF1YXkxETAPBgNVBAsMCERpdmlzaW9uMSQwIgYD
|
||||
VQQDDBtpcC0xMC0wLTUxLTIwNi5lYzIuaW50ZXJuYWwwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDEz/8Pi4UYf/zanB4GHMlo4nbJYIJsyDWx+dPITTMd
|
||||
J3pdOo5BMkkUQL8rSFkc3RjY/grdk2jejVPQ8sVnSabsTl+ku7hT0t1w7E0uPY8d
|
||||
RTeGoa5QvdFOxWz6JsLo+C+JwVOWI088tYX1XZ86TD5FflOEeOwWvs5cmQX6L5O9
|
||||
QGO4PHBc9FWpmaHvFBiRJN3AQkMK4C9XB82G6mCp3c1cmVwFOo3vX7h5738PKXWg
|
||||
KYUTGXHxd/41DBhhY7BpgiwRF1idfLv4OE4bzsb42qaU4rKi1TY+xXIYZ/9DPzTN
|
||||
nQ2AHPWbVxI+m8DZa1DAfPvlZVxAm00E1qPPM30WrU4nAgMBAAGjYDBeMAsGA1Ud
|
||||
DwQEAwIC5DATBgNVHSUEDDAKBggrBgEFBQcDATAmBgNVHREEHzAdghtpcC0xMC0w
|
||||
LTUxLTIwNi5lYzIuaW50ZXJuYWwwEgYDVR0TAQH/BAgwBgEB/wIBATANBgkqhkiG
|
||||
9w0BAQsFAAOCAQEAkkV7/+YhWf1vq//N0Ms0td0WDJnqAlbZUgGkUu/6XiUToFtn
|
||||
OE58KCudP0cAQtvl0ISfw0c7X/Ve11H5YSsVE9afoa0whEO1yntdYQagR0RLJnyo
|
||||
Dj9xhQTEKAk5zXlHS4meIgALi734N2KRu+GJDyb6J0XeYS2V1yQ2Ip7AfCFLdwoY
|
||||
cLtooQugLZ8t+Kkqeopy4pt8l0/FqHDidww1FDoZ+v7PteoYQfx4+R5e8ko/vKAI
|
||||
OCALo9gecCXc9U63l5QL+8z0Y/CU9XYNDfZGNLSKyFTsbQFAqDxnCcIngdnYFbFp
|
||||
mRa1akgfPl+BvAo17AtOiWbhAjipf5kSBpmyJA==
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
Lastly, now is a good time to make a backup of your `install-config.yaml` since the installer will consume (and delete) it:
|
||||
|
||||
```bash
|
||||
cp install-config.yaml install-config.yaml.bak
|
||||
```
|
||||
|
||||
|
||||
## 5.2 Running the installation
|
||||
|
||||
We're ready to run the install! Let's kick off the cluster installation by copying the command below into our web terminal:
|
||||
|
||||
> Note: Once more we can use the `--log-level=DEBUG` flag to get more insight on how the install is progressing.
|
||||
|
||||
```bash
|
||||
/mnt/high-side/openshift-install create cluster --log-level=DEBUG
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Installation overview* |
|
||||
</Zoom>
|
||||
|
||||
The installation process should take about 30 minutes. If you've done everything correctly, you should see something like the example below at the conclusion:
|
||||
|
||||
```text
|
||||
...
|
||||
INFO Install complete!
|
||||
INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig'
|
||||
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com
|
||||
INFO Login to the console with user: "kubeadmin", and password: "password"
|
||||
INFO Time elapsed: 30m49s
|
||||
```
|
||||
|
||||
If you made it this far you have completed all the workshop exercises, well done! 🎉
|
||||
618
data/hackathon/README.org
Normal file
@ -0,0 +1,618 @@
|
||||
#+TITLE: OpenShift Workshops
|
||||
#+AUTHOR: James Blair
|
||||
#+DATE: <2024-04-14 Sun>
|
||||
|
||||
This directory contains a set of scenarios to be used for an [[https://www.redhat.com/en/technologies/cloud-computing/openshift/virtualization][OpenShift Virtualisation]] hackathon and was created for an internal enablement exercise at Red Hat.
|
||||
|
||||
* Pre-requisites
|
||||
|
||||
The hackathon is run by breaking attendees into small teams of 2-4 and assigning each team a bare metal OpenShift 4 cluster with OpenShift Virtualisation already installed.
|
||||
|
||||
For our purposes we have clusters running in Equinix Metal provisioned via the [[https://demo.redhat.com/catalog?item=babylon-catalog-prod/equinix-metal.roadshow-ocpvirt.prod&utm_source=webapp&utm_medium=share-link][Red Hat Demo System]].
|
||||
|
||||
|
||||
* Cluster setup
|
||||
|
||||
Follow the steps below to prepare each cluster in advance of the hackathon.
|
||||
|
||||
#+begin_src tmux
|
||||
# Create the exercise three namespace
|
||||
oc create namespace demotestwtf17
|
||||
|
||||
# Create the exercise three virtual machine
|
||||
cat << EOF | oc apply --namespace demotestwtf17 --filename -
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: fedora
|
||||
namespace: demotestwtf17
|
||||
finalizers:
|
||||
- kubevirt.io/virtualMachineControllerFinalize
|
||||
labels:
|
||||
app: fedora
|
||||
vm.kubevirt.io/template: fedora-server-small
|
||||
vm.kubevirt.io/template.namespace: openshift
|
||||
vm.kubevirt.io/template.revision: '1'
|
||||
vm.kubevirt.io/template.version: v0.25.0
|
||||
spec:
|
||||
dataVolumeTemplates:
|
||||
- apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: fedora
|
||||
spec:
|
||||
sourceRef:
|
||||
kind: DataSource
|
||||
name: fedora
|
||||
namespace: openshift-virtualization-os-images
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
running: true
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vm.kubevirt.io/flavor: small
|
||||
vm.kubevirt.io/os: fedora
|
||||
vm.kubevirt.io/workload: server
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubevirt.io/domain: fedora
|
||||
kubevirt.io/size: small
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
model: Superlegitcpu9000
|
||||
cores: 1
|
||||
sockets: 1
|
||||
threads: 1
|
||||
devices:
|
||||
disks:
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: rootdisk
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
interfaces:
|
||||
- macAddress: '02:d5:73:00:00:07'
|
||||
masquerade: {}
|
||||
model: virtio
|
||||
name: default
|
||||
networkInterfaceMultiqueue: true
|
||||
rng: {}
|
||||
features:
|
||||
acpi: {}
|
||||
smm:
|
||||
enabled: true
|
||||
firmware:
|
||||
bootloader:
|
||||
efi: {}
|
||||
machine:
|
||||
type: pc-q35-rhel9.2.0
|
||||
resources:
|
||||
requests:
|
||||
memory: 2Gi
|
||||
evictionStrategy: LiveMigrate
|
||||
networks:
|
||||
- name: default
|
||||
pod: {}
|
||||
nodeSelector:
|
||||
cpumodel: totallylegitipromise
|
||||
terminationGracePeriodSeconds: 180
|
||||
volumes:
|
||||
- dataVolume:
|
||||
name: fedora
|
||||
name: rootdisk
|
||||
- cloudInitNoCloud:
|
||||
userData: |-
|
||||
#cloud-config
|
||||
user: fedora
|
||||
password: fedora
|
||||
chpasswd: { expire: False }
|
||||
name: cloudinitdisk
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
#+begin_src tmux
|
||||
# Create the exercise five namespace
|
||||
oc create namespace super-important-dont-deleteme
|
||||
|
||||
# Create the exercise five virtual machine
|
||||
cat << EOF | oc --namespace super-important-dont-deleteme apply --filename -
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: cryto-carnivore-cpuminer3000
|
||||
finalizers:
|
||||
- kubevirt.io/virtualMachineControllerFinalize
|
||||
labels:
|
||||
app: cryto-carnivore-cpuminer3000
|
||||
vm.kubevirt.io/template: centos7-server-small
|
||||
vm.kubevirt.io/template.namespace: openshift
|
||||
vm.kubevirt.io/template.revision: '1'
|
||||
vm.kubevirt.io/template.version: v0.25.0
|
||||
spec:
|
||||
dataVolumeTemplates:
|
||||
- apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cryto-carnivore-cpuminer3000
|
||||
spec:
|
||||
sourceRef:
|
||||
kind: DataSource
|
||||
name: centos7
|
||||
namespace: openshift-virtualization-os-images
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
running: true
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vm.kubevirt.io/flavor: small
|
||||
vm.kubevirt.io/os: centos7
|
||||
vm.kubevirt.io/workload: server
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubevirt.io/domain: cryto-carnivore-cpuminer3000
|
||||
kubevirt.io/size: small
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: 1
|
||||
sockets: 1
|
||||
threads: 1
|
||||
devices:
|
||||
disks:
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: rootdisk
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
interfaces:
|
||||
- macAddress: '02:d5:73:00:00:0b'
|
||||
masquerade: {}
|
||||
model: virtio
|
||||
name: default
|
||||
networkInterfaceMultiqueue: true
|
||||
rng: {}
|
||||
machine:
|
||||
type: pc-q35-rhel9.2.0
|
||||
resources:
|
||||
requests:
|
||||
memory: 2Gi
|
||||
evictionStrategy: LiveMigrate
|
||||
networks:
|
||||
- name: default
|
||||
pod: {}
|
||||
terminationGracePeriodSeconds: 180
|
||||
volumes:
|
||||
- dataVolume:
|
||||
name: cryto-carnivore-cpuminer3000
|
||||
name: rootdisk
|
||||
- cloudInitNoCloud:
|
||||
userData: |-
|
||||
#cloud-config
|
||||
user: centos
|
||||
password: 123456
|
||||
chpasswd: { expire: False }
|
||||
name: cloudinitdisk
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
#+begin_src tmux
|
||||
# Break the storage class for exercise five
|
||||
oc patch storageclass ocs-storagecluster-ceph-rbd --type='merge' --patch-file /dev/stdin <<-EOF
|
||||
allowVolumeExpansion: false
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
|
||||
* Automated scenario population
|
||||
|
||||
To quickly setup an example environment with all solutions populated you can use the following source blocks.
|
||||
|
||||
** Exercise two - What about my legacy technical debt?
|
||||
|
||||
#+begin_src tmux
|
||||
# Create namespace
|
||||
oc create namespace crusty-corp
|
||||
|
||||
# Create the virtual machine template
|
||||
#+begin_src tmux
|
||||
cat << 'EOF' | oc apply --namespace crusty-corp --filename -
|
||||
kind: Template
|
||||
apiVersion: template.openshift.io/v1
|
||||
metadata:
|
||||
name: centos5-server-small
|
||||
namespace: crusty-corp
|
||||
labels:
|
||||
app.kubernetes.io/part-of: hyperconverged-cluster
|
||||
os.template.kubevirt.io/centos5.0: 'true'
|
||||
flavor.template.kubevirt.io/small: 'true'
|
||||
template.kubevirt.io/version: v0.25.0
|
||||
app.kubernetes.io/version: 4.13.8
|
||||
template.kubevirt.io/type: base
|
||||
app.kubernetes.io/component: templating
|
||||
app.kubernetes.io/managed-by: ssp-operator
|
||||
template.kubevirt.io/default-os-variant: 'true'
|
||||
app.kubernetes.io/name: common-templates
|
||||
workload.template.kubevirt.io/server: 'true'
|
||||
annotations:
|
||||
template.kubevirt.io/provider: Red Hat
|
||||
name.os.template.kubevirt.io/centos5.0: CentOS 5 or higher
|
||||
template.kubevirt.io/provider-url: 'https://www.centos.org'
|
||||
template.kubevirt.io/containerdisks: |
|
||||
quay.io/containerdisks/centos:7-2009
|
||||
template.kubevirt.io/version: v1alpha1
|
||||
openshift.io/display-name: CentOS 5 VM
|
||||
openshift.io/documentation-url: 'https://github.com/kubevirt/common-templates'
|
||||
template.kubevirt.io/images: >
|
||||
https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
|
||||
operator-sdk/primary-resource-type: SSP.ssp.kubevirt.io
|
||||
defaults.template.kubevirt.io/disk: rootdisk
|
||||
template.kubevirt.io/editable: |
|
||||
/objects[0].spec.template.spec.domain.cpu.sockets
|
||||
/objects[0].spec.template.spec.domain.cpu.cores
|
||||
/objects[0].spec.template.spec.domain.cpu.threads
|
||||
/objects[0].spec.template.spec.domain.resources.requests.memory
|
||||
/objects[0].spec.template.spec.domain.devices.disks
|
||||
/objects[0].spec.template.spec.volumes
|
||||
/objects[0].spec.template.spec.networks
|
||||
template.openshift.io/bindable: 'false'
|
||||
openshift.kubevirt.io/pronounceable-suffix-for-name-expression: 'true'
|
||||
operator-sdk/primary-resource: openshift-cnv/ssp-kubevirt-hyperconverged
|
||||
tags: 'hidden,kubevirt,virtualmachine,linux,centos'
|
||||
template.kubevirt.io/provider-support-level: Community
|
||||
description: >-
|
||||
Template for CentOS 5 VM or newer. A PVC with the CentOS disk image must
|
||||
be available.
|
||||
openshift.io/support-url: 'https://github.com/kubevirt/common-templates/issues'
|
||||
iconClass: icon-centos
|
||||
openshift.io/provider-display-name: Red Hat
|
||||
objects:
|
||||
- apiVersion: kubevirt.io/v1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
annotations:
|
||||
vm.kubevirt.io/validations: |
|
||||
[
|
||||
{
|
||||
"name": "minimal-required-memory",
|
||||
"path": "jsonpath::.spec.domain.resources.requests.memory",
|
||||
"rule": "integer",
|
||||
"message": "This VM requires more memory.",
|
||||
"min": 1073741824
|
||||
}
|
||||
]
|
||||
labels:
|
||||
app: '${NAME}'
|
||||
vm.kubevirt.io/template: centos5-server-small
|
||||
vm.kubevirt.io/template.revision: '1'
|
||||
vm.kubevirt.io/template.version: v0.25.0
|
||||
name: '${NAME}'
|
||||
spec:
|
||||
dataVolumeTemplates:
|
||||
- apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
name: '${NAME}'
|
||||
spec:
|
||||
sourceRef:
|
||||
kind: DataSource
|
||||
name: '${DATA_SOURCE_NAME}'
|
||||
namespace: '${DATA_SOURCE_NAMESPACE}'
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
running: false
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vm.kubevirt.io/flavor: small
|
||||
vm.kubevirt.io/os: centos5
|
||||
vm.kubevirt.io/workload: server
|
||||
labels:
|
||||
kubevirt.io/domain: '${NAME}'
|
||||
kubevirt.io/size: small
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: 1
|
||||
sockets: 1
|
||||
threads: 1
|
||||
devices:
|
||||
disks:
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: rootdisk
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
interfaces:
|
||||
- masquerade: {}
|
||||
model: virtio
|
||||
name: default
|
||||
networkInterfaceMultiqueue: true
|
||||
rng: {}
|
||||
machine:
|
||||
type: pc-q35-rhel9.2.0
|
||||
resources:
|
||||
requests:
|
||||
memory: 2Gi
|
||||
evictionStrategy: LiveMigrate
|
||||
networks:
|
||||
- name: default
|
||||
pod: {}
|
||||
terminationGracePeriodSeconds: 180
|
||||
volumes:
|
||||
- dataVolume:
|
||||
name: '${NAME}'
|
||||
name: rootdisk
|
||||
- cloudInitNoCloud:
|
||||
userData: |-
|
||||
#cloud-config
|
||||
user: centos
|
||||
password: ${CLOUD_USER_PASSWORD}
|
||||
chpasswd: { expire: False }
|
||||
name: cloudinitdisk
|
||||
parameters:
|
||||
- name: NAME
|
||||
description: VM name
|
||||
generate: expression
|
||||
from: 'centos5-[a-z0-9]{16}'
|
||||
- name: DATA_SOURCE_NAME
|
||||
description: Name of the DataSource to clone
|
||||
value: centos5
|
||||
- name: DATA_SOURCE_NAMESPACE
|
||||
description: Namespace of the DataSource
|
||||
value: openshift-virtualization-os-images
|
||||
- name: CLOUD_USER_PASSWORD
|
||||
description: Randomized password for the cloud-init user centos
|
||||
generate: expression
|
||||
from: '[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}'
|
||||
EOF
|
||||
|
||||
# Create the virtual machine from template
|
||||
cat << 'EOF' | oc apply --namespace crusty-corp --filename -
|
||||
apiVersion: 'kubevirt.io/v1'
|
||||
kind: 'VirtualMachine'
|
||||
metadata:
|
||||
labels:
|
||||
app: 'crusty-corp-fun-financial-appliance'
|
||||
vm.kubevirt.io/template: 'centos5-server-small'
|
||||
vm.kubevirt.io/template.namespace: 'crusty-corp'
|
||||
vm.kubevirt.io/template.revision: '1'
|
||||
vm.kubevirt.io/template.version: 'v0.25.0'
|
||||
name: 'crusty-corp-fun-financial-appliance'
|
||||
namespace: 'crusty-corp'
|
||||
spec:
|
||||
dataVolumeTemplates:
|
||||
- apiVersion: 'cdi.kubevirt.io/v1beta1'
|
||||
kind: 'DataVolume'
|
||||
metadata:
|
||||
annotations:
|
||||
cdi.kubevirt.io/storage.bind.immediate.requested: 'true'
|
||||
creationTimestamp: null
|
||||
name: 'crusty-corp-fun-financial-appliance'
|
||||
spec:
|
||||
source:
|
||||
blank: {}
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: '30Gi'
|
||||
- metadata:
|
||||
creationTimestamp: null
|
||||
name: 'crusty-corp-fun-financial-appliance-installation-cdrom'
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: 'https://vault.centos.org/5.11/isos/x86_64/CentOS-5.11-x86_64-netinstall.iso'
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: '5Gi'
|
||||
running: false
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vm.kubevirt.io/flavor: 'small'
|
||||
vm.kubevirt.io/os: 'centos5'
|
||||
vm.kubevirt.io/workload: 'server'
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubevirt.io/domain: 'crusty-corp-fun-financial-appliance'
|
||||
kubevirt.io/size: 'small'
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: 1
|
||||
sockets: 1
|
||||
threads: 1
|
||||
devices:
|
||||
disks:
|
||||
- bootOrder: 2
|
||||
disk:
|
||||
bus: 'virtio'
|
||||
name: 'rootdisk'
|
||||
- bootOrder: 3
|
||||
disk:
|
||||
bus: 'virtio'
|
||||
name: 'cloudinitdisk'
|
||||
- bootOrder: 1
|
||||
cdrom:
|
||||
bus: 'sata'
|
||||
name: 'installation-cdrom'
|
||||
interfaces:
|
||||
- macAddress: '02:d5:73:00:00:06'
|
||||
masquerade: {}
|
||||
model: 'virtio'
|
||||
name: 'default'
|
||||
networkInterfaceMultiqueue: true
|
||||
rng: {}
|
||||
machine:
|
||||
type: 'pc-q35-rhel9.2.0'
|
||||
resources:
|
||||
requests:
|
||||
memory: '2Gi'
|
||||
evictionStrategy: 'LiveMigrate'
|
||||
networks:
|
||||
- name: 'default'
|
||||
pod: {}
|
||||
terminationGracePeriodSeconds: 180
|
||||
volumes:
|
||||
- dataVolume:
|
||||
name: 'crusty-corp-fun-financial-appliance'
|
||||
name: 'rootdisk'
|
||||
- cloudInitNoCloud:
|
||||
userData: "#cloud-config\nuser: centos\npassword: cqud-lhel-rd0b\nchpasswd: { expire: False }"
|
||||
name: 'cloudinitdisk'
|
||||
- dataVolume:
|
||||
name: 'crusty-corp-fun-financial-appliance-installation-cdrom'
|
||||
name: 'installation-cdrom'
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
** Exercise three - But can it do live migration?
|
||||
|
||||
#+begin_src tmux
|
||||
# Patch the bogus virtual machine nodeselector & cpumodel
|
||||
oc patch --namespace demotestwtf17 VirtualMachine fedora --type='merge' --patch-file /dev/stdin <<-EOF
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
model:
|
||||
nodeSelector:
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
# Restart vm manually
|
||||
|
||||
#+begin_src tmux
|
||||
# Initiate the live migration
|
||||
cat << EOF | oc create --namespace demotestwtf17 --filename -
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: VirtualMachineInstanceMigration
|
||||
metadata:
|
||||
name: fedora-migration-hackathon
|
||||
namespace: demotestwtf17
|
||||
finalizers:
|
||||
- kubevirt.io/migrationJobFinalize
|
||||
labels:
|
||||
kubevirt.io/vmi-name: fedora
|
||||
spec:
|
||||
vmiName: fedora
|
||||
EOF
|
||||
|
||||
# Check the node virtual machine migrated to
|
||||
oc --namespace demotestwtf17 get VirtualMachineInstance fedora
|
||||
#+end_src
|
||||
|
||||
|
||||
** Exercise four - What about balancing vm workloads?
|
||||
|
||||
#+begin_src tmux
|
||||
# Create required namespace for vm
|
||||
oc create namespace itsjustyaml
|
||||
|
||||
# Create the suggested namespace for descheduler operator
|
||||
oc create namespace openshift-kube-descheduler-operator
|
||||
|
||||
# Create the subscription for the kube deschedular operator
|
||||
cat << EOF | oc apply --namespace openshift-kube-descheduler-operator --filename -
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: cluster-kube-descheduler-operator
|
||||
namespace: openshift-kube-descheduler-operator
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: cluster-kube-descheduler-operator
|
||||
source: redhat-operators
|
||||
sourceNamespace: openshift-marketplace
|
||||
EOF
|
||||
|
||||
# Create the instance of descheduler
|
||||
cat << EOF | oc apply --namespace openshift-kube-descheduler-operator --filename -
|
||||
apiVersion: operator.openshift.io/v1
|
||||
kind: KubeDescheduler
|
||||
metadata:
|
||||
name: cluster
|
||||
namespace: openshift-kube-descheduler-operator
|
||||
spec:
|
||||
deschedulingIntervalSeconds: 3600
|
||||
logLevel: Normal
|
||||
managementState: Managed
|
||||
mode: Automatic
|
||||
operatorLogLevel: Normal
|
||||
profileCustomizations:
|
||||
devLowNodeUtilizationThresholds: Medium
|
||||
profiles:
|
||||
- AffinityAndTaints
|
||||
- DevPreviewLongLifecycle
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
TODO: Create CentOS9 VM With deshcheduler turned on.
|
||||
|
||||
** Exercise five - How do I resize virtual machine disks again?
|
||||
|
||||
#+begin_src tmux
|
||||
# Patch the storageclass to enable volume expansion
|
||||
oc patch storageclass ocs-storagecluster-ceph-rbd --type='merge' --patch-file /dev/stdin <<-EOF
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
#+begin_src tmux
|
||||
# Patch the claim to increase sizea
|
||||
oc --namespace super-important-dont-deleteme patch persistentvolumeclaim cryto-carnivore-cpuminer3000 --type='merge' --patch-file /dev/stdin <<-EOF
|
||||
spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 60Gi
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
#+begin_src tmux
|
||||
# Create new claim for wannacry volume
|
||||
cat << EOF | oc --namespace super-important-dont-deleteme apply --filename -
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: wannacry
|
||||
namespace: super-important-dont-deleteme
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
volumeMode: Block
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
TODO: Patch the vm to include the new volume
|
||||
|
||||
|
||||
|
||||
* Automated scenario cleanup
|
||||
|
||||
If you need to quickly reset an example environment to have no solutions populated you can use the following source blocks.
|
||||
|
||||
#+begin_src tmux
|
||||
oc delete namespace --ignore-not-found crusty-corp demotestwtf17 itsjustyaml super-important-dont-deleteme acme-bank
|
||||
#+end_src
|
||||
78
data/hackathon/scenario1.mdx
Normal file
@ -0,0 +1,78 @@
|
||||
---
|
||||
title: Understanding the hackathon environment
|
||||
exercise: 1
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get underway with the hackathon."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift Virtualisation Hackathon! Here you'll have a chance to build your container native virtualisation prowess. Exercises will award points for each correct solution.
|
||||
|
||||
You're in a race to reach the highest score before the session concludes! If multiple teams complete all exercises so share points totals a further ranking will be done by elapsed time based on when slack messages are sent.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - The hackathon scenario
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Acme Financial Services* |
|
||||
</Zoom>
|
||||
|
||||
Acme Financial Services is a large bank based in Australia. They have a number of virtual machine workloads that are running on a traditional hypervisor and are considering moving these to another platform.
|
||||
|
||||
One of the potential options for Acme Financial Services is to adopt Red Hat OpenShift Virtualisation as a container native virtualisation platform.
|
||||
|
||||
Your hackathon team are the pre-sales technical team engaging with Acme technical teams to secure buy-in for a large scale migration to proceed.
|
||||
|
||||
|
||||
## 1.1 - Understanding the environment
|
||||
|
||||
For this challenge you'll be given a fresh bare metal OpenShift 4 cluster **with the OpenShift Virtualisation operator already installed**.
|
||||
|
||||
All challenge tasks must be performed on this cluster so your solutions can be graded successfully.
|
||||
|
||||
You can and are encouraged to use any supporting documentation or other resources in order to tackle each of the challenge tasks.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *OpenShift bare metal cluster console* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Obtain your environment
|
||||
|
||||
Working in a small team you will have one shared cluster for team members to share. Your team will have a name allocated already.
|
||||
|
||||
To get underway open your web browser and navigate to this link to allocate an environment for your team https://demo.redhat.com/workshop/s72ya3.
|
||||
|
||||
Register for an environment using `[team name]@redhat.com` and the password provided by your hackathon organisers. Registering with a team email will mean all your team members will be able to see the same cluster details for your shared team cluster.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Hackathon team registration page* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.4 - Confirm environment access
|
||||
|
||||
If your team have secured an environment and are ready to start the challenge please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> [team name] have logged into an environment and are starting the challenge!
|
||||
|
||||
The event team will reply in slack to confirm your team has been recorded and start you with a base score of `10` points.
|
||||
|
||||
|
||||
## 1.5 - Hints!
|
||||
|
||||
If you get stuck on a question, fear not, perhaps try a different approach. If you have tried everything you can think of and are still stuck you can unlock a hint for `3` points by posting a message in the `#event-anz-ocp-virt-hackathon` channel with the message:
|
||||
|
||||
> [team name] are stuck on [exercise] and are unlocking a hint.
|
||||
|
||||
A hackathon organiser will join your breakout room to share the hint with you 🤫.
|
||||
45
data/hackathon/scenario2.mdx
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
title: What about my legacy technical debt?
|
||||
exercise: 2
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Will OpenShift Virtualisation run Acme virtual machines?"
|
||||
---
|
||||
|
||||
As a sales team you've got an upcoming demo with the Acme Financial Services linux team. They are concerned a modern cloud native virtualisation platform like OpenShift Virtualistion won't run their **Crusty Corp Fun Financial Appliance™** vm appliances which run CentOS Linux `5.11` and cannot be updated or altered in any way 🤯.
|
||||
|
||||
The Acme team are well aware these virtual machines would not be "supported" but ultimately the deal rests on your teams ability to prove that such an old operating system will boot successfully within OpenShift Virtualisation.
|
||||
|
||||
|
||||
## 2.1 - Create the virtual machine
|
||||
|
||||
For this task, your team are required to use this [`CentOS-5.11-x86_64-netinstall.iso`](https://vault.centos.org/5.11/isos/x86_64/CentOS-5.11-x86_64-netinstall.iso) file.
|
||||
|
||||
No command line is required. Your challenge is to create and boot a virtual machine on your cluster using the name `crusty-corp-fun-financial-appliance`, within the namespace `crusty-corp`.
|
||||
|
||||
**Note: You don't need to complete the CentOS 5 install, you just need to get the machine to boot into the installer.**
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/creating_vms_custom/virt-creating-vms-from-custom-images-overview.html
|
||||
|
||||
|
||||
## 2.2 - Boot the virtual machine
|
||||
|
||||
For this challenge you will know you are successful and will be awarded points when your virtual machine boots the given iso and shows the following logo in vnc console:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Crusty corp financial appliance boot screen.* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.3 - Check your work
|
||||
|
||||
If your **Crusty Corp Financial Appliance™** has booted please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 2.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score.
|
||||
41
data/hackathon/scenario3.mdx
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
title: But can it do live migration?
|
||||
exercise: 3
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Will OpenShift Virtualisation live migrate a virtual machine?"
|
||||
---
|
||||
|
||||
During the demo with the Acme Financial Services team one of their traditional hypervisor team asked if you can show them a live migration for a running virtual machine. They say they are used to performing migrations using a feature in their existing hypervisor called "vMotion" so want to know that feature works in OpenShift Virtualisation.
|
||||
|
||||
You know KVM & KubeVirt has supported a similar feature called "Live Migration" for ages. Thinking on your feet you remember the Fedora virtual machine you were tinkering with last night in the `demotestwtf17` namespace on the cluster. Perhaps you can quickly use that to demo a live migration?
|
||||
|
||||
The Acme Financial Services team have put you on the spot, can you pull off a virtual machine live migration? 😅
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *He's dead Jim...* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.1 - Initiate the live migration
|
||||
|
||||
For this challenge, your team must live migrate the virtual machine named `fedora` in namespace `demotestwtf17` from one node to another and record the name of the node the vm has migrated to.
|
||||
|
||||
No command line is required. However before you can complete the migration you will first need to investigate and correct why the virtual machine is not running. You had one too many coke zeros last night and can't remember how you got the machine into the state it's in 🤦.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/live_migration/virt-configuring-live-migration.html
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/advanced_vm_management/virt-specifying-nodes-for-vms.html
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/advanced_vm_management/virt-schedule-vms.html
|
||||
|
||||
## 3.2 - Check your work
|
||||
|
||||
If the virtual machine in your `demotestwtf17` namespace has been live migrated to another node successfully please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 3. Our vm has migrated to node [node name].
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score.
|
||||
45
data/hackathon/scenario4.mdx
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
title: What about balancing vm workloads?
|
||||
exercise: 4
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "How do we optimally balance vm workloads across an OpenShift cluster?"
|
||||
---
|
||||
|
||||
After the slick demos and objection handling from your pre sales team Acme Financial Services have agreed to run a small scale proof of concept for OpenShift Virtualisation 🎉
|
||||
|
||||
They have spun up a bare metal OpenShift cluster, installed OpenShift Virtualisation and now have a small number of virtual machines running.
|
||||
|
||||
One of the goals of the pilot is to ensure their virtual machine workloads will run efficiently on their new infrastructure and continually auto balance over time based on available underlying infrastructure capacity so they don't have some nodes much busier than others.
|
||||
|
||||
The Acme team are stuck on how they might implement this goal within their current sprint. They told you they found some documentation online on this thing called *"deschedular"* but have decided it would be easier to email Red Hat and ask for a walkthrough.
|
||||
|
||||
Your local pre-sales team has offered to setup an example environment for Acme and step through how to enable the feature. No worries right. After all, how hard can it be?
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *"We've all said it 😂"* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.1 - Enabling virtual machine balancing profiles
|
||||
|
||||
Your challenge for this task is to enable the **Technology Preview** feature for Virtual Machine workload balancing in OpenShift at the cluster level.
|
||||
|
||||
Once enabled at the cluster level you then need to deploy a **CentOS Stream 9** virtual machine called `centos` running in a new namespace called `itsjustyaml`. No command line is required.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/advanced_vm_management/virt-enabling-descheduler-evictions.html
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/creating_vms_rh/virt-creating-vms-from-templates.html
|
||||
|
||||
|
||||
## 4.2 - Check your work
|
||||
|
||||
If you've successfully enabled deschedular evictions for the `centos` virtual machine please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> Please review team [name] solution for exercise 4.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score.
|
||||
69
data/hackathon/scenario5.mdx
Normal file
@ -0,0 +1,69 @@
|
||||
---
|
||||
title: How do I resize virtual machine disks again?
|
||||
exercise: 5
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Persistent volume what??"
|
||||
---
|
||||
|
||||
The proof of concept has been going well at Acme Financial Services. Their team are a few sprints in and have just pinged you an email out of the blue:
|
||||
|
||||
|
||||
> Subject: Quick Help Needed: Expanding Storage for VM!
|
||||
>
|
||||
> Hey Red Hat Pre-Sales Team,
|
||||
>
|
||||
> Hope you're doing awesome! 🚀
|
||||
>
|
||||
> I'm the storage guru over at Acme Financial Services, we met last sprint review. I need your brainpower to help with a little incident. One of our virtual machines in our Red Hat playground will run out of disk in 2 hours based on current rates.
|
||||
>
|
||||
> Are you free to jump on a call, like nowish so we can fix it???
|
||||
>
|
||||
> Would not be a good look if this thing blows up 😅
|
||||
>
|
||||
> Thanks a ton for your help!
|
||||
>
|
||||
> Cheers,
|
||||
>
|
||||
> Chad McStorageGuy<br />
|
||||
> Chief Principal Lead Strategic Storage Architect Engineer<br />
|
||||
> Acme Financial Services
|
||||
|
||||
Ruh roh. Normally this would just be handled via support but you know it might take longer for a response as this isn't technically a production environment. You agree to help Chad to try and prevent any bad news for the proof of concept.
|
||||
|
||||
Time to roll up your sleeves, join the Acme Skype for Business™ video call and get it sorted. Tick tock...
|
||||
|
||||
|
||||
## 5.1 - Expand the vm storage
|
||||
|
||||
For this task your team's challenge is to increase the storage of an existing virtual machine `cryto-carnivore-cpuminer3000` in the `super-important-dont-deleteme` namespace.
|
||||
|
||||
The virtual machine currently has a root disk of `30GiB` of storage. This needs to be increased to `60GiB`. No command line is required, though you will need to investigate potential storage misconfigurations and verify that your additional storage is now presented to the virtual machine operating system.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/virtual_disks/virt-expanding-vm-disks.html
|
||||
- https://docs.openshift.com/container-platform/4.15/storage/expanding-persistent-volumes.html#expanding-csi-volumes_expanding-persistent-volumes
|
||||
|
||||
|
||||
## 5.2 - But wait there's more!
|
||||
|
||||
While you're on the Skype for Business™ video call with Chad fixing the `crypto-carnivore-cpuminer3000` storage incident he also puts you on the spot and asks for some guidance on how to add an additional disk to the vm for a new ~~cve-ridden-literal-malware~~workload that Acme is developing.
|
||||
|
||||
This is a straightforward table stakes procedure for any virtualisation platform so you offer to step Chad through it then and there and get it out of the way.
|
||||
|
||||
Your task is to add an additional `20GiB` disk labelled `wannacry` to the virtual machine. No command line is required, however you may have to get creative and manually create the volume and update the virtual machine spec if your OpenShift console "Configure --> Add Disk" functionality isn't working.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/virtual_machines/virtual_disks/virt-hot-plugging-virtual-disks.html
|
||||
- https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.15/html/managing_and_allocating_storage_resources/managing-persistent-volume-claims_rhodf#configuring-application-pods-to-use-openshift-data-foundation_rhodf
|
||||
|
||||
|
||||
## 5.3 - Check your work
|
||||
|
||||
If you've successfully resolved Chad's storage concerns please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 5.
|
||||
|
||||
Both exercises 5.1 and 5.2 are worth `15` points each for a total of `30` points. The event team will reply in slack to confirm your updated team total score.
|
||||
61
data/hackathon/scenario6.mdx
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
title: Can I connect my pods to my virtual machine?
|
||||
exercise: 6
|
||||
date: '2024-04-14'
|
||||
tags: ['openshift','virtualisation','kubernetes','kubevirt']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "How do we make a virtual machine accessible over the OpenShift SDN"
|
||||
---
|
||||
|
||||
Time flies when you're having fun, after 17 months the "quick" proof of concept for OpenShift Virtualisation has now completed at Acme Financial Services and the CTO has asked for a final presentation from the local Red Hat pre sales team.
|
||||
|
||||
The Acme team have talked about modernisation throughout the proof of concept so for the final demo your local Red Hat team agree to again showcase how container workloads can be run alongside virtual machines and even balance traffic across containers and virtual machines.
|
||||
|
||||
This is it. No pressure but we need to nail this!
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *"The best of both worlds!"* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 6.1 - Deploy workloads
|
||||
|
||||
Your first challenge for this exercise is to deploy two workloads within the `acme-bank` namespace on your cluster:
|
||||
|
||||
1. The container image `quay.io/redhattraining/hello-world-nginx` running as a standard `deployment` named `acme-pod`, listening on port `8080`.
|
||||
|
||||
2. A linux virtual machine named `acme-vm` running that same workload on port `8080` in podman for simplicity.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/applications/creating_applications/odc-creating-applications-using-developer-perspective.html#odc-deploying-container-image_odc-creating-applications-using-developer-perspective
|
||||
- https://podman.io/docs/installation#installing-on-linux
|
||||
- https://developers.redhat.com/cheat-sheets/podman-cheat-sheet
|
||||
|
||||
## 6.2 - Establish networking
|
||||
|
||||
Once the workloads are deployed your challenge is to create one service named `acme-balancer` that will load balance traffic across the virtual machine and regular container.
|
||||
|
||||
You'll know if this is working correctly when you can see two pods appearing in your service pod listing:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *"One service balancing traffic across a vm and standard pod!"* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
- https://docs.openshift.com/container-platform/4.15/virt/vm_networking/virt-exposing-vm-with-service.html
|
||||
- https://kubevirt.io/user-guide/virtual_machines/service_objects
|
||||
- https://kubernetes.io/docs/concepts/services-networking/service
|
||||
|
||||
|
||||
## 6.2 - Check your work
|
||||
|
||||
If you've successfully prepared the demo environment workloads and networking for the CTO demo please post a message in `#event-anz-ocp-virt-hackathon` with the message:
|
||||
|
||||
> Please review team [name] solution for exercise 6.
|
||||
|
||||
Each of the 6.1 and 6.2 exercises are worth `15` points for a total of `30` points. The event team will reply in slack to confirm your updated team total score.
|
||||
@ -1,6 +1,7 @@
|
||||
const headerNavLinks = [
|
||||
{ href: '/workshop', title: 'Exercises' },
|
||||
{ href: 'https://etherpad.wikimedia.org/p/tssc-workshop-bne-dec-23', title: 'Etherpad'}
|
||||
{ href: '/workshop', title: 'Exercises' },
|
||||
{ href: 'https://docs.openshift.com/container-platform/4.17/welcome/index.html', title: 'Documentation' },
|
||||
{ href: 'https://catalog.demo.redhat.com/workshop/w949gy', title: 'Environment login' }
|
||||
]
|
||||
|
||||
export default headerNavLinks
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
const siteMetadata = {
|
||||
title: 'Red Hat OpenShift Application Delivery Workshop',
|
||||
title: 'Red Hat OpenShift Security Hackathon',
|
||||
author: 'Red Hat',
|
||||
headerTitle: 'Red Hat',
|
||||
description: 'Red Hat OpenShift Application Delivery Workshop',
|
||||
description: 'Red Hat OpenShift Security Hackathon',
|
||||
language: 'en-us',
|
||||
siteUrl: 'https://jmhbnz.github.io/ocp-app-delivery-workshop',
|
||||
siteRepo: 'https://github.com/jmhbnz/ocp-app-delivery-workshop',
|
||||
siteUrl: 'https://rhdemo.win',
|
||||
siteRepo: 'https://github.com/jmhbnz/workshops',
|
||||
siteLogo: '/static/images/redhat.png',
|
||||
image: '/static/images/avatar.png',
|
||||
socialBanner: '/static/images/twitter-card.png',
|
||||
|
||||
53
data/windows/README.org
Normal file
@ -0,0 +1,53 @@
|
||||
#+TITLE: OpenShift Workshops
|
||||
#+AUTHOR: James Blair
|
||||
#+DATE: <2024-05-26 Sun>
|
||||
|
||||
This directory contains the setup instructions for an OpenShift Windows Container Workshop.
|
||||
|
||||
* Pre-requisites
|
||||
|
||||
This guide assumes you have an existing OpenShift 4.15 cluster running in AWS.
|
||||
|
||||
For my purposes I have clusters provisioned via the Red Hat Demo System.
|
||||
|
||||
* Cluster setup
|
||||
|
||||
Follow the steps below to prepare each cluster in advance of the hackathon.
|
||||
|
||||
** Login and verify network
|
||||
|
||||
Our first step is to login to the cluster and confirm cluster network details
|
||||
|
||||
#+begin_src tmux
|
||||
oc login --web <api-server>
|
||||
#+end_src
|
||||
|
||||
#+begin_src tmux
|
||||
# Check cluster cidr
|
||||
oc get network.operator cluster -o yaml
|
||||
#+end_src
|
||||
|
||||
** Enable hybrid overlay networking
|
||||
|
||||
https://docs.openshift.com/container-platform/4.15/networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.html#configuring-hybrid-ovnkubernetes
|
||||
|
||||
#+begin_src tmux
|
||||
# Patch the cluster network to enable hybrid overlay networking
|
||||
oc patch networks.operator.openshift.io cluster --type=merge \
|
||||
-p '{
|
||||
"spec":{
|
||||
"defaultNetwork":{
|
||||
"ovnKubernetesConfig":{
|
||||
"hybridOverlayConfig":{
|
||||
"hybridClusterNetwork":[
|
||||
{
|
||||
"cidr": "10.128.0.0/14",
|
||||
"hostPrefix": 23
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
#+end_src
|
||||
55
data/windows/exercise1.mdx
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: Understanding the workshop environment
|
||||
exercise: 1
|
||||
date: '2024-05-26'
|
||||
tags: ['openshift','windows','kubernetes','containers']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get underway with the workshop."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift Windows Containers Workshop! Here you'll have a chance to build your windows container prowess.
|
||||
|
||||
With a Red Hat subscription, you can get support for running Windows workloads in OpenShift Container Platform.
|
||||
|
||||
For this workshop you'll be given a fresh OpenShift 4 cluster which currently only runs linux containers. You will complete a series of exercises to transform the cluster to be capable to run Windows containers.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - Obtaining your environment
|
||||
|
||||
To get underway open your web browser and navigate to the following link to reserve yourself a user https://demo.redhat.com/workshop/98b7pu. You can reserve an environment by entering any email address along with the password provided by your workshop facilitator.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Obtaining a workshop environment* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Logging into your cluster console
|
||||
|
||||
After entering an email and the provided password you'll be presented with a console url and login credentials for your OpenShift cluster.
|
||||
|
||||
Open the console url and login.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Obtaining a workshop environment* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.3 - Logging into your bastion host
|
||||
|
||||
Along with the cluster web console we will also use the command line during this workshop. You've been allocated a bastion host that you can ssh to as part of step 1.1.
|
||||
|
||||
Follow the steps below to connect to your environment bastion host:
|
||||
|
||||
1. Open your preferrred terminal application.
|
||||
2. Enter `ssh lab-user@<bastion-hostname>` replacing `<bastion-hostname>` with the hostname listed in your **Bastion Access** environment details page.
|
||||
3. Enter `yes` if you receive a host key verification prompt. This only appears as it is the first time you have connected to this host.
|
||||
4. When prompted enter the password mentioned under **Bastion Access** in your environment details page.
|
||||
|
||||
Congratulations, you're now ready to proceed with the next exercise 🎉.
|
||||
102
data/windows/exercise2.mdx
Normal file
@ -0,0 +1,102 @@
|
||||
---
|
||||
title: Installing the windows machine config operator
|
||||
exercise: 2
|
||||
date: '2024-05-26'
|
||||
tags: ['openshift','windows','kubernetes','containers']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Preparing our cluster for windows machines."
|
||||
---
|
||||
|
||||
|
||||
In this first hands on excercise we will prepare our cluster for running Windows nodes by installing an operator and configuring it.
|
||||
|
||||
[Operators](https://docs.openshift.com/container-platform/4.15/operators/index.html) are among the most important components of OpenShift Container Platform. Operators are the preferred method of packaging, deploying, and managing additional cluster services or application.
|
||||
|
||||
To install Operators on OpenShift we use Operator Hub. A simplistic way of thinking about Operator Hub is as the "App Store" for your OpenShift cluster.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *OpenShift Operator Hub* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.1 - Enable hybrid networking
|
||||
|
||||
Before installing the windows machine config operator our first step as a cluster administrator is configure a our OpenShift cluster network to allow Linux and Windows nodes to host Linux and Windows workloads, respectively.
|
||||
|
||||
This requires enabling a feature called **[hybrid overlay networking](https://docs.openshift.com/container-platform/4.15/networking/ovn_kubernetes_network_provider/configuring-hybrid-networking.html#configuring-hybrid-ovnkubernetes)**.
|
||||
|
||||
To configure hybrid overlay networking, run the following command in your bastion host terminal:
|
||||
|
||||
```bash
|
||||
oc patch networks.operator.openshift.io cluster --type=merge \
|
||||
-p '{
|
||||
"spec":{
|
||||
"defaultNetwork":{
|
||||
"ovnKubernetesConfig":{
|
||||
"hybridOverlayConfig":{
|
||||
"hybridClusterNetwork":[
|
||||
{
|
||||
"cidr": "10.132.0.0/14",
|
||||
"hostPrefix": 23
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Patching an OpenShift cluster network to enable hybrid networking* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.2 - Install the windows machine config operator
|
||||
|
||||
If you have a running OpenShift cluster and have enabled hybrid overlay networking, you can then install the optional **Windows Machine Config Operator**. This operator will configure any Windows machines we add to the cluster, enabling Windows container workloads to be run within your OpenShift cluster.
|
||||
|
||||
Windows instances can be added either by creating a `MachineSet`, or by specifying existing instances through a `ConfigMap`. The operator will do all the necessary steps to configure the instance so that it can join the cluster as a worker node.
|
||||
|
||||
Follow the steps below to install the operator:
|
||||
1. Navigate to **Operators** > **OperatorHub** in the left menu.
|
||||
2. Search for `Windows`.
|
||||
3. Click on **Windows Machine Config Operator** provided by Red Hat and click **Install**.
|
||||
4. Leave all settings as the default and click **Install** once more.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Installing the windows machine config operator* |
|
||||
</Zoom>
|
||||
|
||||
> Note: The operator installation may take several minutes to complete. Wait for the status of `✅ succeeded` before continuing with the following step.
|
||||
>
|
||||
|
||||
## 2.3 - Create configuration secrets
|
||||
|
||||
The windows machine config operator expects a secret to be present in its namespace called `cloud-private-key` containing a private key. This private key will be used to log into the soon to be provisioned Windows machine and set it up as an OpenShift node.
|
||||
|
||||
Run the commands below from your bastion host to create the required secret.
|
||||
|
||||
1. Generate a new ssh key with `ssh-keygen -t rsa -f ${HOME}/.ssh/winkey -q -N ''`
|
||||
2. Run the command below to create the required secret from the public key you just created.
|
||||
|
||||
```bash
|
||||
oc create secret generic cloud-private-key \
|
||||
--from-file=private-key.pem=${HOME}/.ssh/winkey \
|
||||
--namespace openshift-windows-machine-config-operator
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Create a private key secret* |
|
||||
</Zoom>
|
||||
|
||||
Once your network configuration, operator installation and secret creation are complete you're ready to move on to the next exercise 🎉
|
||||
135
data/windows/exercise3.mdx
Normal file
@ -0,0 +1,135 @@
|
||||
---
|
||||
title: Provisioning a windows worker node
|
||||
exercise: 3
|
||||
date: '2024-05-26'
|
||||
tags: ['openshift','windows','kubernetes','containers']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Auto scaling nodes with machine sets!"
|
||||
---
|
||||
|
||||
|
||||
Now that our cluster is ready to support Windows nodes lets provision one through the Machine API.
|
||||
|
||||
The Machine API is a combination of primary resources that are based on the upstream [Cluster API](https://github.com/kubernetes-sigs/cluster-api) project and custom OpenShift Container Platform resources.
|
||||
|
||||
The two primary resources are:
|
||||
|
||||
**1. Machines**
|
||||
|
||||
> A fundamental unit that describes the host for a Node. A machine has a providerSpec, which describes the types of compute nodes that are offered for different cloud platforms. For example, a machine type for a worker node on Amazon Web Services (AWS) might define a specific machine type and required metadata.
|
||||
|
||||
**2. MachineSets**
|
||||
|
||||
> Groups of machines. MachineSets are to machines as ReplicaSets are to Pods. If you need more machines or must scale them down, you change the **replicas** field on the MachineSet to meet your compute need.
|
||||
|
||||
|
||||
## 3.1 Create a single replica machineset
|
||||
|
||||
In this exercise we will create a `MachineSet`. Once created this will automatically begin provisoning a Windows machine and adding it to our cluster as a worker node.
|
||||
|
||||
Below is a YAML snippet we will use as base to create our `MachineSet`:
|
||||
|
||||
```yaml
|
||||
apiVersion: machine.openshift.io/v1beta1
|
||||
kind: MachineSet
|
||||
metadata:
|
||||
name: cluster-<id>-windows-ap-southeast-<zone>
|
||||
namespace: openshift-machine-api
|
||||
labels:
|
||||
machine.openshift.io/cluster-api-cluster: cluster-<id>
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
machine.openshift.io/cluster-api-cluster: cluster-<id>
|
||||
machine.openshift.io/cluster-api-machineset: cluster-<id>-worker-ap-southeast-<zone>
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
machine.openshift.io/cluster-api-cluster: cluster-<id>
|
||||
machine.openshift.io/cluster-api-machine-role: worker
|
||||
machine.openshift.io/cluster-api-machine-type: worker
|
||||
machine.openshift.io/cluster-api-machineset: cluster-<id>-worker-ap-southeast-<zone>
|
||||
machine.openshift.io/os-id: Windows
|
||||
spec:
|
||||
lifecycleHooks: {}
|
||||
metadata:
|
||||
labels:
|
||||
node-role.kubernetes.io/worker: ''
|
||||
providerSpec:
|
||||
value:
|
||||
userDataSecret:
|
||||
name: windows-user-data
|
||||
placement:
|
||||
availabilityZone: ap-southeast-<zone>
|
||||
region: ap-southeast-1
|
||||
credentialsSecret:
|
||||
name: aws-cloud-credentials
|
||||
instanceType: m5a.4xlarge
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
blockDevices:
|
||||
- ebs:
|
||||
iops: 0
|
||||
kmsKey: {}
|
||||
volumeSize: 120
|
||||
volumeType: gp2
|
||||
securityGroups:
|
||||
- filters:
|
||||
- name: 'tag:Name'
|
||||
values:
|
||||
- cluster-<id>-worker-sg
|
||||
kind: AWSMachineProviderConfig
|
||||
metadataServiceOptions: {}
|
||||
tags:
|
||||
- name: kubernetes.io/cluster/cluster-<id>
|
||||
value: owned
|
||||
deviceIndex: 0
|
||||
ami:
|
||||
id: ami-0e76083a67107f741
|
||||
subnet:
|
||||
filters:
|
||||
- name: 'tag:Name'
|
||||
values:
|
||||
- cluster-<id>-private-ap-southeast-<zone>
|
||||
apiVersion: awsproviderconfig.openshift.io/v1beta1
|
||||
iamInstanceProfile:
|
||||
id: cluster-<id>-worker-profile
|
||||
```
|
||||
|
||||
There are ten references to `<id>` in the sample that we need to find & replace with the actual cluster id for the cluster we have been allocated for the workshop and five references to the availability `<zone>` for our cluster nodes that we also need to update with our actual zone in use.
|
||||
|
||||
Run the following command in your bastion host terminal session to find your cluster id and zone:
|
||||
|
||||
```bash
|
||||
name=$(oc get machineset -A -o jsonpath={.items[0].metadata.name})
|
||||
echo "Cluster id is: ${name:8:11}"
|
||||
echo "Cluster availability zone is: ${name:40:2}"
|
||||
```
|
||||
|
||||
After retrieving your cluster id and zone update the sample `MachineSet` using your preferred text editor, then select and copy all of the text to clipboard.
|
||||
|
||||
Within OpenShift you can then click the ➕ button in the top right hand corner, paste in your yaml and click **Create**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Create a windows machineset* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.2 Verify windows machine status
|
||||
|
||||
After creating the `MachineSet` a new Windows machine will be automatically provisioned and added to our OpenShift cluster, as we set our desired replicas in the YAML to `1`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Check the status of the new windows machine* |
|
||||
</Zoom>
|
||||
|
||||
Creating, provisioning and configuring a new Windows host can be a lengthy process taking 15-30 minutes so now is a good time to take a break ☕.
|
||||
|
||||
You can keep an eye on the status of your Machine in the OpenShift web console. Once it reaches the **✅ Provisioned as node** status you are ready to proceed to the next exercise.
|
||||
|
||||
90
data/windows/exercise4.mdx
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
title: Deploying a windows workload
|
||||
exercise: 4
|
||||
date: '2024-05-26'
|
||||
tags: ['openshift','windows','kubernetes','containers']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Putting our new cluster windows node to work 🚀"
|
||||
---
|
||||
|
||||
|
||||
With our cluster now having both Windows and Linux worker nodes, let's deploy a hybrid workload that will make use of both.
|
||||
|
||||
**The NetCandy Store**
|
||||
|
||||
You will be deploying a sample application stack that delivers an eCommerce site, The NetCandy Store. This application is built using Windows Containers working together with Linux Containers.
|
||||
|
||||
This application consists of:
|
||||
|
||||
1. Windows Container running a .NET v4 frontend, which is consuming a backend service.
|
||||
2. Linux Container running a .NET Core backend service, which is using a database.
|
||||
3. Linux Container running a MSSql database 🤯.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Mixed workload architecture diagram* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.1 Add helm repository
|
||||
|
||||
In this exercise we will deploy the NetCandy Store application using `helm`. You can deliver your Windows workloads in the same way you deliver your Linux workloads. Since everything is just YAML, the workflow is the same. Whether that be via Helm, an Operator, or via Ansible.
|
||||
|
||||
We'll get started by creating a project and adding a helm repository that our application helm chart will be sourced from.
|
||||
|
||||
Follow the steps below to add the repository:
|
||||
|
||||
1. Switch from **Administrator** to **Developer** view in the top left web console dropdown menu.
|
||||
2. Click on **+Add** in the left menu.
|
||||
3. Click on the **Project** dropdown at the top and click **Create Project**
|
||||
4. Enter the name `netcandystore` and click **Create**.
|
||||
5. Click on **Helm Chart repositories**.
|
||||
6. Enter the name `redhat-demos` and url `https://redhat-developer-demos.github.io/helm-repo` then click **Create**.
|
||||
|
||||
This will allow us to deploy any helm charts available in this repository.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Creating a project and adding a helm repository* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.2 Deploy candystore helm chart
|
||||
|
||||
With our helm chart repository added, let's deploy our application! This is as simple as following the three steps below to create a helm release.
|
||||
|
||||
1. Search for `candy` on the **Helm charts** screen.
|
||||
2. Click on **Netcandystore** and then click **Create**.
|
||||
3. Review the chart settings and click **Create** once more.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Create mixed archiecture application via helm* |
|
||||
</Zoom>
|
||||
|
||||
> Note: The application can take a few minutes to complete deploying, time for another coffee ☕.
|
||||
|
||||
## 4.3 Review deployed windows application
|
||||
|
||||
After creating our helm release we can see the status of the application from the **Topology** screen in the **Developer** view.
|
||||
|
||||
We can verify our Windows Container is running by:
|
||||
|
||||
1. Clicking on the **netcandystore** frontend Windows Container.
|
||||
2. Selecting the **Resources** tab on the right hand panel and clicking on the pod name.
|
||||
3. Clicking the **Terminal** tab and verifying that a Windows command prompt displays.
|
||||
4. Returning to the **Topology** screen and opening the URL for the **netcandystore** application to confirm the application is running.
|
||||
|
||||
> Note: You may need to change from `https://` to `http://` in your browser address bar when opening the application URL as some browsers now automatically attempt to redirect to HTTPS, however this application route is currently only served as HTTP.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Confirm Windows container status* |
|
||||
</Zoom>
|
||||
|
||||
Congratulations! You've taken an existing OpenShift 4 cluster, set it up for running Windows workloads, then deployed a Windows app 🎉.
|
||||
171
data/workshop/README.org
Normal file
@ -0,0 +1,171 @@
|
||||
#+TITLE: Openshift security hackathon
|
||||
#+DATE: <2024-09-26 Thu>
|
||||
#+AUTHOR: James Blair
|
||||
|
||||
|
||||
This document captures the steps required to set up an instance of the workshop.
|
||||
|
||||
* Log in to cluster
|
||||
|
||||
#+begin_src tmux
|
||||
oc login --web https://api.cluster-bcfz8.bcfz8.sandbox1805.opentlc.com:6443
|
||||
#+end_src
|
||||
|
||||
* Update cluster logo
|
||||
|
||||
#+begin_src tmux
|
||||
oc create configmap console-custom-logo --from-file=/home/james/Downloads/logo.png -n openshift-config
|
||||
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: operator.openshift.io/v1
|
||||
kind: Console
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
customization:
|
||||
customLogoFile:
|
||||
key: logo.png
|
||||
name: console-custom-logo
|
||||
customProductName: ACME Financial Services OpenShift Console
|
||||
perspectives:
|
||||
- id: admin
|
||||
visibility:
|
||||
state: Disabled
|
||||
- id: dev
|
||||
visibility:
|
||||
state: Enabled
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
* Add an interesting notification banner
|
||||
|
||||
#+begin_src tmux
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: console.openshift.io/v1
|
||||
kind: ConsoleNotification
|
||||
metadata:
|
||||
name: acme-banner
|
||||
spec:
|
||||
text: ACME Financial Services Production OpenShift
|
||||
location: BannerTop
|
||||
link:
|
||||
href: 'https://www.youtube.com/watch?v=W31e9meX9S4'
|
||||
text: Cluster Security Dashboard
|
||||
color: '#fff'
|
||||
backgroundColor: '#0000FF'
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
* Deploy the vulnerable workload
|
||||
|
||||
#+begin_src tmux
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: prd-acme-payments
|
||||
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: prd-acme-payments-processor
|
||||
namespace: prd-acme-payments
|
||||
labels:
|
||||
app: payments-processor
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
deployment: prd-acme-payments-processor
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
deployment: prd-acme-payments-processor
|
||||
spec:
|
||||
containers:
|
||||
- name: literally-log4shell
|
||||
image: quay.io/smileyfritz/log4shell-app:v0.5
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: unix-socket
|
||||
mountPath: /var/run/crio/crio.sock
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
schedulerName: default-scheduler
|
||||
volumes:
|
||||
- name: unix-socket
|
||||
hostPath:
|
||||
path: /var/run/crio/crio.sock
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 25%
|
||||
maxSurge: 25%
|
||||
revisionHistoryLimit: 10
|
||||
progressDeadlineSeconds: 600
|
||||
EOF
|
||||
|
||||
oc adm policy add-scc-to-user privileged -z default -n prd-acme-payments
|
||||
#+end_src
|
||||
|
||||
* Add spicy cluster users
|
||||
|
||||
#+begin_src tmux
|
||||
# Create the namespace for the exercise
|
||||
oc new-project prd-acme-experimental
|
||||
|
||||
# Retrive existing users htpasswd file
|
||||
oc get secret htpasswd -ojsonpath={.data.htpasswd} -n openshift-config | base64 --decode > ${HOME}/Downloads/users.htpasswd
|
||||
|
||||
# Add additional users
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd specific-enhanced-ocelot admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd upset-benevolent-hacker admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd beaming-aggressive-squid admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd tame-threatening-otter admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd rebuked-placid-engineer admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd expert-invasive-meerkat admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd childish-shifty-caterpillar admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd silent-lively-heron admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd bountiful-soaked-crab admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd alienated-proud-snail admin
|
||||
|
||||
# Replace the secret
|
||||
oc create secret generic htpasswd --from-file=htpasswd=${HOME}/Downloads/users.htpasswd --dry-run=client --output yaml --namespace openshift-config | oc replace --filename -
|
||||
sleep 30
|
||||
|
||||
# Login as a specified user
|
||||
oc login --username alienated-proud-snail --password admin
|
||||
oc login --username bountiful-soaked-crab --password admin
|
||||
oc login --username silent-lively-heron --password admin
|
||||
oc login --username childish-shifty-caterpillar --password admin
|
||||
oc login --username expert-invasive-meerkat --password admin
|
||||
oc login --username rebuked-placid-engineer --password admin
|
||||
oc login --username tame-threatening-otter --password admin
|
||||
oc login --username beaming-aggressive-squid --password admin
|
||||
oc login --username upset-benevolent-hacker --password admin
|
||||
oc login --username specific-enhanced-ocelot --password admin
|
||||
|
||||
# Log back in as admin
|
||||
oc login --username admin
|
||||
|
||||
# Grant user permission on project
|
||||
oc adm policy add-role-to-user admin childish-shifty-caterpillar --namespace prd-acme-experimental
|
||||
|
||||
# Delete the namespace as a particular user
|
||||
oc delete project prd-acme-experimental --as childish-shifty-caterpillar
|
||||
#+end_src
|
||||
@ -1,190 +1,72 @@
|
||||
---
|
||||
title: Getting familiar with OpenShift
|
||||
title: Understanding our hackathon environment
|
||||
exercise: 1
|
||||
date: '2023-12-04'
|
||||
tags: ['openshift','containers','kubernetes']
|
||||
date: '2024-10-14'
|
||||
tags: ['openshift','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "In this first exercise we'll get familiar with OpenShift."
|
||||
summary: "Let's get familiar with our hackathon setup."
|
||||
---
|
||||
|
||||
Red Hat [OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. In this first excercise we'll get logged into our cluster and familarise ourselves with the OpenShift web console and web terminal.
|
||||
Welcome to the OpenShift 4 security hackathon! Here you'll be able to practice your prowess operating a secure and compliant OpenShift 4 cluster. Exercises will award points for each correct solution.
|
||||
|
||||
The OpenShift Container Platform web console is a feature-rich web console with both an Administrator perspective and a Developer perspective accessible through any modern web browser. You can use the web console to visualize, browse, and manage your OpenShift cluster and the applications running on it.
|
||||
|
||||
In addition to the web console, OpenShift includes command line tools to provide users with a nice interface to work with applications deployed to the platform. The `oc` command line tool is available for Linux, macOS or Windows.
|
||||
You're in a race to reach the highest score before the session concludes! If multiple teams complete all exercises so share points totals a further ranking will be done by elapsed time based on when slack messages are sent.
|
||||
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
## 1.1 - Login to lab environment
|
||||
|
||||
An OpenShift `4.14` cluster has already been provisioned for you to complete these excercises. Open your web browser and navigate to the workshop login page https://demo.redhat.com/workshop/vq5abz.
|
||||
## 1.1 - The hackathon scenario
|
||||
|
||||
Once the page loads you can login with the details provided by your workshop facilitator.
|
||||
We're returning to ACME Financial Services, a large bank based in Australia. Thanks to the efforts of the local account team after a long procurement journey Red Hat has landed a massive **$5m AUD** deal including a significant portion of Red Hat Services 🚀.
|
||||
|
||||
Your hackathon team are the post-sales consultants engaging with ACME to improve their OpenShift platform security hardening. The bank have been running OpenShift for a while but the account team have said *"they are basically YOLO'ing it"* from a security perspective. Thankfully you're on site now to help iron things out!
|
||||
|
||||

|
||||
|
||||
|
||||
## 1.2 - Understanding the environment
|
||||
|
||||
For this challenge you'll be given access to the ACME Financial Services OpenShift `4.17` cluster which is not currently operating in a secure and compliant manner. All challenge tasks must be performed on this cluster so your solutions can be graded successfully.
|
||||
|
||||
You can and are encouraged to use any supporting documentation or other resources in order to tackle each of the challenge tasks.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop login page* |
|
||||
| *OpenShift cluster console* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Login to the cluster web console
|
||||
## 1.2 - Obtain your environment
|
||||
|
||||
Once you're logged into the lab environnment we can open up the OpenShift web console and login with the credentials provided.
|
||||
Working in a small team you will have one shared cluster for team members to share. Your team will have a name allocated already.
|
||||
|
||||
When first logging in you will be prompted to take a tour of the **Developer** console view, let's do that now.
|
||||
To get underway open your web browser and navigate to this link to allocate an environment for your team https://catalog.demo.redhat.com/workshop/w949gy.
|
||||
|
||||
Register for an environment using the team email address and password provided by your hackathon organisers. Registering with a team email will mean all your team members will be able to see the same cluster details for your shared team cluster.
|
||||
|
||||
<Zoom>
|
||||
|  |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Developer perspective web console tour* |
|
||||
| *Hackathon team registration page* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.3 - Understanding projects
|
||||
## 1.4 - Confirm environment access
|
||||
|
||||
Projects are a logical boundary to help you organize your applications. An OpenShift project allows a community of users (or a single user) to organize and manage their work in isolation from other projects.
|
||||
If your team have secured an environment and are ready to start the challenge please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
Each project has its own resources, role based access control (who can or cannot perform actions), and constraints (quotas and limits on resources, etc).
|
||||
> [team name] have logged into an environment and are starting the challenge!
|
||||
|
||||
Projects act as a "wrapper" around all the application services you (or your teams) are using for your work.
|
||||
|
||||
In this lab environment, you already have access to single project: `userX` (Where X is the number of your user allocted for the workshop.)
|
||||
|
||||
Let's click into our `Project` from the left hand panel of the **Developer** web console perspective. We should be able to see that our project has no `Deployments` and there are no compute cpu or memory resources currently being consumed.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Developer perspective project view* |
|
||||
</Zoom>
|
||||
The event team will reply in slack to confirm your team has been recorded and start you with a base score of `10` points.
|
||||
|
||||
|
||||
## 1.4 - Switching between perspectives
|
||||
## 1.5 - Hints!
|
||||
|
||||
Different roles have different needs when it comes to viewing details within the OpenShift web console. At the top of the left navigation menu, you can toggle between the Administrator perspective and the Developer perspective.
|
||||
If you get stuck on a question, fear not, perhaps try a different approach. If you have tried everything you can think of and are still stuck you can unlock a hint for `5` points by posting a message in the `#event-anz-ocp-security-hackathon` channel with the message:
|
||||
|
||||
Select **Administrator** to switch to the Administrator perspective.
|
||||
> [team name] are stuck on [exercise] and are unlocking a hint.
|
||||
|
||||
Once the Administrator perspective loads, you should be in the "Home" view and see a wider array of menu sections in the left hand navigation panel.
|
||||
|
||||
Switch back to the **Developer** perspective. Once the Developer perspective loads, select the **Topology** view.
|
||||
|
||||
Right now, there are no applications or components to view in your `userX` project, but once you begin working on the lab, you’ll be able to visualize and interact with the components in your application here.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Switching web console perspectives* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
|
||||
## 1.5 - Launching a web terminal
|
||||
|
||||
While web interfaces are comfortable and easy to use, sometimes we want to quickly run commands to get things done. That is where the `oc` command line utility comes in.
|
||||
|
||||
One handy feature of the OpenShift web console is we can launch a web terminal that will create a browser based terminal that already has the `oc` command logged in and ready to use.
|
||||
|
||||
Let's launch a web terminal now by clicking the terminal button in the top right hand corner and then clicking **Start** with our `userX` project selected.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Launching your web terminal* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.6 - Running oc commands
|
||||
|
||||
The [`oc` command line utility](https://docs.openshift.com/container-platform/4.14/cli_reference/openshift_cli/getting-started-cli.html#creating-a-new-app) is a superset of the upstream kubernetes `kubectl` command line utility. This means it can do everything that `kubectl` can do, plus some additional OpenShift specific commands.
|
||||
|
||||
Let's try a few commands now:
|
||||
|
||||
|
||||
### Checking our current project
|
||||
|
||||
Most actions we take in OpenShift will be in relation to a particular project. We can check which project we are currently actively using by running the `oc project` command.
|
||||
|
||||
We should see output similar to below showing we are currently using our `userX` project:
|
||||
|
||||
```bash
|
||||
bash-4.4 ~ $ oc project
|
||||
Using project "user1" from context named "user1-context" on server "https://172.31.0.1:443".
|
||||
```
|
||||
|
||||
### Getting help and explaining concepts
|
||||
|
||||
As with any command line utility, there can be complexity that quickly surfaces. Thankfully the `oc` command line utility has excellent built in help.
|
||||
|
||||
Let's take a look at that now.
|
||||
|
||||
To get an understanding of all the options available, try running `oc help`. You should see options similar to the below sample:
|
||||
|
||||
```text
|
||||
bash-4.4 ~ $ oc help
|
||||
OpenShift Client
|
||||
|
||||
This client helps you develop, build, deploy, and run your applications on any
|
||||
OpenShift or Kubernetes cluster. It also includes the administrative
|
||||
commands for managing a cluster under the 'adm' subcommand.
|
||||
|
||||
Basic Commands:
|
||||
login Log in to a server
|
||||
new-project Request a new project
|
||||
new-app Create a new application
|
||||
status Show an overview of the current project
|
||||
project Switch to another project
|
||||
projects Display existing projects
|
||||
explain Get documentation for a resource
|
||||
|
||||
Build and Deploy Commands:
|
||||
rollout Manage a Kubernetes deployment or OpenShift deployment config
|
||||
rollback Revert part of an application back to a previous deployment
|
||||
new-build Create a new build configuration
|
||||
start-build Start a new build
|
||||
cancel-build Cancel running, pending, or new builds
|
||||
import-image Import images from a container image registry
|
||||
tag Tag existing images into image streams
|
||||
|
||||
```
|
||||
|
||||
|
||||
To get a more detailed explanataion about a specific concept we can use the `oc explain` command.
|
||||
|
||||
Let's run `oc explain project` now to learn more about the concept of a project we introduced earlier:
|
||||
|
||||
```text
|
||||
bash-4.4 ~ $ oc explain project
|
||||
KIND: Project
|
||||
VERSION: project.openshift.io/v1
|
||||
|
||||
DESCRIPTION:
|
||||
Projects are the unit of isolation and collaboration in OpenShift. A
|
||||
project has one or more members, a quota on the resources that the project
|
||||
may consume, and the security controls on the resources in the project.
|
||||
Within a project, members may have different roles - project administrators
|
||||
can set membership, editors can create and manage the resources, and
|
||||
viewers can see but not access running containers. In a normal cluster
|
||||
project administrators are not able to alter their quotas - that is
|
||||
restricted to cluster administrators.
|
||||
|
||||
Listing or watching projects will return only projects the user has the
|
||||
reader role on.
|
||||
|
||||
An OpenShift project is an alternative representation of a Kubernetes
|
||||
namespace. Projects are exposed as editable to end users while namespaces
|
||||
are not. Direct creation of a project is typically restricted to
|
||||
administrators, while end users should use the requestproject resource.
|
||||
```
|
||||
|
||||
|
||||
That's a quick introduction to the `oc` command line utility. Let's close our web terminal now so we can move on to the next excercise.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Closing your web terminal* |
|
||||
</Zoom>
|
||||
A hackathon organiser will join your breakout room to share the hint with you 🤫.
|
||||
|
||||
|
||||
@ -1,115 +1,99 @@
|
||||
---
|
||||
title: Deploying your first application
|
||||
title: Laying the foundations for cluster security
|
||||
exercise: 2
|
||||
date: '2023-12-05'
|
||||
tags: ['openshift','containers','kubernetes','deployments','images']
|
||||
date: '2024-10-17'
|
||||
tags: ['openshift','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to deploy your first app!"
|
||||
summary: "Can't have security without a security platform"
|
||||
---
|
||||
|
||||
It’s your first day of the consulting engagement with ACME. You’ve paired up with one of their Senior Platform Engineers Angie who has just given you a tour of their newly deployed OpenShift cluster which is looking healthy 🥦 (whew!) .
|
||||
|
||||
Now that we have had a tour of the OpenShift web console to get familar, let's use the web console to deploy our first application.
|
||||
Time to tackle the first task on our consulting engagement list, installing [Red Hat Advanced Cluster Security](https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index) via the operator.
|
||||
|
||||
Let’s start by doing the simplest thing possible - get a plain old Docker-formatted container image to run on OpenShift. This is incredibly simple to do. With OpenShift it can be done directly from the web console.
|
||||
Ultimately the ACME team wants to manage everything with GitOps, but for today Angie would prefer a guided walkthrough on how to do things using the OpenShift Web Console so she has an opportunity to learn more about each step of the process.
|
||||
|
||||
Before we begin, if you would like a bit more background on what a container is or why they are important click the following link to learn more: https://www.redhat.com/en/topics/containers#overview
|
||||

|
||||
|
||||
|
||||
## 2.1 - Deploying the container image
|
||||
## 2.1 - Installing the rhacs operator
|
||||
|
||||
In this exercise, we’re going to deploy the **web** component of the ParksMap application which uses OpenShift's service discovery mechanism to discover any accompanying backend services deployed and shows their data on the map. Below is a visual overview of the complete ParksMap application.
|
||||
You’re in front of a screen together with the Web Console open. The first step of installing the operator should be easy, better get started!
|
||||
|
||||
The only requirement Angie has requested for the Advanced Cluster Security operator installation is that all future operator updates must be approved **Manually**. She explains that several platform team members have PTSD from previous upgrades happening automatically and bringing down ACME's EFTPOS platform so now automatic updates are disabled everywhere.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#install-acs-operator_install-central-ocp
|
||||
|
||||
|
||||
## 2.2 - Deploying central services
|
||||
|
||||
With the operator installed and healthy we now need to deploy an instance of **Central** for Angie. This Central instance will provide the management interface, API and secure the full fleet of ACME’s OpenShift clusters along with some EKS clusters ACME are currently running in AWS.
|
||||
|
||||
Angie has shared a high level design with you that states the Central resources need to be deployed to the `prd-acme-rhacs` namespace.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ParksMap application architecture* |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Architecture for Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Within the **Developer** perspective, click the **+Add** entry on the left hand menu.
|
||||
After deploying Central ensure you can log in to the web console using the automatically generated credentials.
|
||||
|
||||
Once on the **+Add** page, click **Container images** to open a dialog that will allow you to quickly deploy an image.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
In the **Image name** field enter the following:
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#install-central-operator_install-central-ocp
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#verify-central-install-operator_install-central-ocp
|
||||
|
||||
```text
|
||||
quay.io/openshiftroadshow/parksmap:latest
|
||||
```
|
||||
|
||||
Leave all other fields at their defaults (but take your time to scroll down and review each one to familarise yourself! 🎓)
|
||||
## 2.3 - Generating an init bundle
|
||||
|
||||
Click **Create** to deploy the application.
|
||||
Alright, you've given Angie a quick tour around the Red Hat Advanced Cluster Security Console, now it's time to secure this hub cluster by generating an init bundle named `prd-acme-hub`.
|
||||
|
||||
OpenShift will pull this container image if it does not exist already on the cluster and then deploy a container based on this image. You will be taken back to the **Topology** view in the **Developer** perspective which will show the new "Parksmap" application.
|
||||
You remember from the documentation that before you install the `SecuredCluster` resource on a cluster, you must create an init bundle. The cluster that has `SecuredCluster` resource then uses this bundle to authenticate with Central.
|
||||
|
||||
Angie would prefer to use the **Operator** method for these tasks as she explains having repressed memories of trying to find indentation issues in helm chart templates and never ever wanting to touch helm ever again.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying the container image* |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Create an init bundle in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
## 2.2 - Reviewing our deployed application
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#portal-generate-init-bundle_init-bundle-ocp
|
||||
|
||||
If you click on the **parksmap** entry in the **Topology** view, you will see some information about that deployed application.
|
||||
|
||||
The **Resources** tab may be displayed by default. If so, click on the **Details** tab. On that tab, you will see that there is a single **Pod** that was created by your actions.
|
||||
## 2.4 - Securing the hub cluster
|
||||
|
||||
The pair session is going well, Angie is impressed how quickly you got to this point. You now have the init bundle downloaded and explain to her that you just need to import it on the cluster and create the `SecuredCluster` resource to finish the process.
|
||||
|
||||
Consulting the high level design she lets you know the init bundle and `SecuredCluster` resources need to be deployed to the `prd-acme-secured` namespace, with the cluster being named `prd-acme-hub` within RHACS.
|
||||
|
||||
Reading further in the design Angie points out that the **Contact Image Scanners** setting should be set to `ScanIfMissing` as this makes the admission control process more secure by ensuring all images are scanned before they can be admitted to the cluster.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#installing-sc-operator
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Deploying the container image* |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Secured cluster list in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
> Note: A pod is the smallest deployable unit in Kubernetes and is effectively a grouping of one or more individual containers. Any containers deployed within a pod are guaranteed to run on the same machine. It is very common for pods in kubernetes to only hold a single container, although sometimes auxiliary services can be included as additional containers in a pod when we want them to run alongside our application.
|
||||
> **Hint** If your SecuredCluster pods are in the right namespace and are not all starting successfully this can commonly occur because you have missed appending the `:443` to your central endpoint in the `SecuredCluster` resource.
|
||||
|
||||
|
||||
## 2.2 - Accessing the application
|
||||
|
||||
Now that we have the ParksMap application deployed. How do we access it??
|
||||
|
||||
This is where OpenShift **Routes** and **Services** come in.
|
||||
|
||||
While **Services** provide internal abstraction and load balancing within an OpenShift cluster, sometimes clients outside of the OpenShift cluster need to access an application. The way that external clients are able to access applications running in OpenShift is through an OpenShift **Route**.
|
||||
|
||||
You may remember that when we deployed the ParksMap application, there was a checkbox ticked to automatically create a **Route**. Thanks to this, all we need to do to access the application is go the **Resources** tab of the application details pane and click the url shown under the **Routes** header.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Opening ParksMap application Route* |
|
||||
</Zoom>
|
||||
|
||||
Clicking the link you should now see the ParksMap application frontend 🎉
|
||||
|
||||
> Note: If this is the first time opening this page, the browser will ask permission to get your position. This is needed by the Frontend app to center the world map to your location, if you don’t allow it, it will just use a default location.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *ParksMap application frontend* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.3 - Checking application logs
|
||||
|
||||
If we deploy an application and something isn't working the way we expect, reviewing the application logs can often be helpful. OpenShift includes built in support features for reviewing application logs.
|
||||
|
||||
Let's try it now for our ParksMap frontend.
|
||||
|
||||
In the **Developer** perspective, open the **Topology** view.
|
||||
|
||||
Click your "Parksmap" application icon then click on the **Resources** tab.
|
||||
|
||||
From the **Resources** tab click **View logs**
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-------------------------------------------------------------------:|
|
||||
| *Accessing the ParksMap application logs* |
|
||||
</Zoom>
|
||||
|
||||
## 2.5 - Check your work
|
||||
|
||||
If your pair session with Angie has finished and the hub cluster is secured please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 2, we have laid the foundations for cluster security.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||

|
||||
|
||||
66
data/workshop/exercise3.mdx
Normal file
@ -0,0 +1,66 @@
|
||||
---
|
||||
title: Encrypting cluster internal network traffic
|
||||
exercise: 3
|
||||
date: '2024-10-18'
|
||||
tags: ['openshift','security','ipsec','encryption']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Is OpenShift secure by default?"
|
||||
---
|
||||
|
||||
Day one with Angie went great. After a refreshing overnight break spent watching the cinematic masterpiece of Shrek 2 you're back on site with the ACME team for day two of the consulting engagement.
|
||||
|
||||
Your first task is to address a complaint from Brent in the ACME Security team who has done some initial cluster security checks to get a baseline. Brent is upset that OpenShift internal network traffic is currently un-encrypted and has been ever since their cluster was deployed!
|
||||
|
||||
Brent is pretty annoyed because the Red Hat sales team told him that OpenShift was **"secure by default"** so he wasn't expecting to see internal cluster traffic viewable in plain text between nodes in the cluster as this is a big no-no for the bank 🤬🙅
|
||||
|
||||
You manage to talk him down by explaining how easily encryption can be turned on and how well OpenShift supports the feature. Whew. You note down to give some feedback to the local sales team to be more careful with the assurances they give.
|
||||
|
||||
You decide to make enabling encryption top of your list for the morning to try and keep Brent happy.
|
||||
|
||||

|
||||
|
||||
|
||||
## 3.1 - Encrypting internal cluster traffic
|
||||
|
||||
With IPsec enabled, you can encrypt internal pod-to-pod cluster traffic on the OVN-Kubernetes cluster network between nodes.
|
||||
|
||||
You confirm the required mode with Angie & Brent as `Full` and then run the `oc patch` command to get the job done after giving Angie a heads up there will be some brief disruption on the cluster while the change is rolled out.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Encryption implications when enabling pod-to-pod IPSec* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.openshift.com/container-platform/4.17/networking/network_security/configuring-ipsec-ovn.html
|
||||
|
||||
|
||||
## 3.2 - Observing cluster network rollout
|
||||
|
||||
Your change window on the ACME cluster is 30 minutes for the cluster network update. You've advised the ACME team there could be some minor disruption to the cluster while the cluster network operator is progressing the update.
|
||||
|
||||
The cluster network update can take around ten minutes to complete. Observe the progress of the operator using the **Administration** > **Cluster Settings** > **Cluster Operators** view.
|
||||
|
||||
You can also verify ipsec status using the following command:
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-ovn-kubernetes rsh ovnkube-node-<XXXXX> ovn-nbctl --no-leader-only get nb_global . ipsec
|
||||
```
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Cluster operators administration* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.3 - Check your work
|
||||
|
||||
If you've kept Brent happy by enabling encryption for internal cluster traffic please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 3, our cluster internal traffic is now encrypted with cipher [cipher].
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
55
data/workshop/exercise4.mdx
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: Securing vulnerable workloads
|
||||
exercise: 4
|
||||
date: '2024-10-19'
|
||||
tags: ['openshift','security','cve management','rhacs']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "How do we deal with vulnerable workloads we can't patch?"
|
||||
---
|
||||
|
||||
IPSec was a quick job and the cluster is looking good after enabling it. Your afternoon job is to pair up with Angie again and review the vulnerability status of the ACME Financial Services workloads that are deployed on the cluster so far.
|
||||
|
||||
Angie is really keen to tap into your knowledge on what she can do to make to the most of the Red Hat Advanced Cluster Security Platform. This new security insight is something ACME have not really had access to historically for their container workloads.
|
||||
|
||||
You're in a meeting room going over things together, so far so good.
|
||||
|
||||
|
||||
## 4.1 - Ruh roh...
|
||||
|
||||
You're looking over the RHACS Dashboard together in the RHACS console.
|
||||
|
||||
You and Angie both spot it at the same time...
|
||||
|
||||
The core banking payments processor namespace `prd-acme-payments` is vulnerable to the critical log4shell vulnerability 😱
|
||||
|
||||

|
||||
|
||||
|
||||
## 4.2 - What the %$^& do we do????
|
||||
|
||||
In the minutes following the alarming discovery you observe a series of rushed conversations and Microsoft Skype for Business™ chats between Angie and various security team members, service owners and incident management team members.
|
||||
|
||||
A critical incident has been raised but at this point the consensus is the application simple cannot be turned off. It's a core component of the banks payments processing and must continue running.
|
||||
|
||||
The ACME team now turn to you, seeking advice on how they could secure this existing vulnerable deployment in place, without scaling down the application, so that any attempt at exploiting the vulnerability would be automatically thwarted.
|
||||
|
||||
The clocks ticking, how will you respond?
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/operating/evaluate-security-risks#use-process-baselines_evaluate-security-risks
|
||||
|
||||
|
||||
## 4.3 - Check your work
|
||||
|
||||
If you've successfully secured the banks vulnerable payments processor please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 4, our payments processor application is now unhackable.
|
||||
|
||||
**WARNING: The hackathon team will perform a brief penetration test of the application. If your application is not actually secured and remains exploitable by the log4shell vulnerability one of your OpenShift cluster nodes will be deleted for the lulz. No pressure!**
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||

|
||||
|
||||
68
data/workshop/exercise5.mdx
Normal file
@ -0,0 +1,68 @@
|
||||
---
|
||||
title: Understanding cluster compliance
|
||||
exercise: 5
|
||||
date: '2024-10-23'
|
||||
tags: ['openshift','compliance','nist','rhacs']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's apply an industry benchmark!"
|
||||
---
|
||||
|
||||
The first two days of the consulting engagement at ACME have whirled by. You're working remotely today for day three and are pairing up with Melissa from the banks compliance squad.
|
||||
|
||||
On the agenda today is to harden the `prd-acme-hub` cluster by understanding and remediating compliance against the [NIST 800-53 moderate benchmark](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-53r5.pdf).
|
||||
|
||||
The bank must comply with this specific benchmark to meet the requirements of their regulation legislation known as APRA (ACME Penny Regulation Act, 1998).
|
||||
|
||||

|
||||
|
||||
|
||||
## 5.1 - Installing the compliance operator
|
||||
|
||||
You’re got an upcoming Microsoft Skype for Business™ video call with Melissa in 30 minutes to show her how compliant the cluster is currently.
|
||||
|
||||
Time to quickly get the [OpenShift Compliance Operator](https://docs.openshift.com/container-platform/4.17//security/compliance_operator/co-overview.html) installed and run a scan via Red Hat Advanced Cluster Security. Better hurry!
|
||||
|
||||
As with last time, to limit PTSD induced panic attacks among the ACME platform team the operator must be set to update mode `Manual`.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/security_and_compliance/compliance-operator#installing-compliance-operator-web-console_compliance-operator-installation
|
||||
|
||||
|
||||
## 5.2 - Scheduling a compliance scan
|
||||
|
||||
Operator installed it's time to join the virtual meeting with Melissa and step her through how to run a compliance scan against NIST 800-53 moderate and visualise results using the Red Hat Advanced Cluster Security Dashboard.
|
||||
|
||||
Create a new scan schedule named `prd-acme-hub-nist-daily` targeting the appropriate benchmarks.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Viewing a compliance report in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/operating/managing-compliance#scheduling-compliance-scans-and-assessing-profile-compliance
|
||||
|
||||
|
||||
## 5.3 - Remediating a compliance issue
|
||||
|
||||
Scan finished you begin stepping through Melissa the individual results, inspecting `ComplianceCheckResult` and `ComplianceRemediation` resources.
|
||||
|
||||
To demonstrate to her how the compliance operator can make automated remediation of compliance issues easy you pick out the `ocp4-moderate-oauth-or-oauthclient-token-maxage` compliance remediation and apply it, then trigger a re-scan from the compliance operator to validate this issue is now remediated on the cluster.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.openshift.com/container-platform/4.17//security/compliance_operator/co-scans/compliance-operator-remediation.html#compliance-applying_compliance-remediation
|
||||
|
||||
|
||||
## 5.4 - Check your work
|
||||
|
||||
If you've successfully run the compliance scan and remediated the compliance issue to show Melissa how things work please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 5, our cluster is now [percentage] compliant against NIST 800-53 moderate at a cluster level.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||
55
data/workshop/exercise6.mdx
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: Inspecting audit logs
|
||||
exercise: 6
|
||||
date: '2024-10-31'
|
||||
tags: ['openshift','audit','logging']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Ahh the classic who dunnit!?!??"
|
||||
---
|
||||
|
||||
|
||||
You're about to finish up day three of the engagement at ACME and have the lid halfway closed on your ACME provided CrapPhablet7000™ laptop for the day when you hear it. An incoming Skype for Business call 😰
|
||||
|
||||
Here we go...
|
||||
|
||||
Lifting the lid with a resigned sigh you answer. It's Angie. She's looking aggrieved and in a huff explains that someone has apparently deleted an important company project and she needs to figure out who. She's worried someone has permissions they shouldn't or there is an inside threat actor.
|
||||
|
||||
Fear not you tell Angie, Kubernetes auditing provides a security-relevant, chronological set of records documenting the sequence of actions in a cluster. The cluster audits the activities generated by users, by applications that use the Kubernetes API, and by the control plane itself.
|
||||
|
||||
So we just need to inspect the audit logs and we should be able to find our culprit!
|
||||
|
||||

|
||||
|
||||
|
||||
## 6.1 - Needle in a haystack
|
||||
|
||||
On the call Angie starts sharing her screen and logging into the ACME Elasticsearch instance to query the audit logs but you interrupt her and explain that the cluster hasn't yet been configured to ship logs to an external aggregator.
|
||||
|
||||
Despite this, you explain how the internal audit logs can still be queried using the `oc` CLI and fire up your own screen share to step her through how it's done.
|
||||
|
||||
The namespace Angie needs to query is `prd-acme-experimental`, can you track down our threat actor??
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.openshift.com/container-platform/4.17/security/audit-log-view.html
|
||||
|
||||
|
||||
## 6.2 - Removing the culprit
|
||||
|
||||
With the culprit identified Angie is aghast to discover it was one of her colleagues in the ACME OpenShift Platform team.
|
||||
|
||||
Angie instructs you to remove their platform access immediately so that they can no longer log in to OpenShift while a formal investigation can be initiated to determine why they deleted the sensitive project was deleted.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://access.redhat.com/solutions/4039941
|
||||
|
||||
|
||||
## 6.3 - Check your work
|
||||
|
||||
If you've successfully identified the culprit and removed their platform access please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 6, the culprit for the project deletion no longer has access to our OpenShift cluster.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
62
data/workshop/exercise7.mdx
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
title: Bonus challenge - Supply chain shmozzle
|
||||
exercise: 7
|
||||
date: '2024-11-08'
|
||||
tags: ['openshift','supply chain','rhtas']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to sign your life away..."
|
||||
---
|
||||
|
||||
|
||||
Whew - it's the last day of this weeks scheduled engagement 🥱. Tomorrow you're on leave to play the new Factorio Space Age expansion and you can't wait!
|
||||
|
||||
Brushing aside thoughts of grandiose factories you review the task list for today. Top of the list is ironically a core component of [software factories](https://www.redhat.com/en/resources/benefits-building-software-factory-with-openshift-overview), addressing a supply chain security requirement from Brent about introducing capability to sign artifacts on premises and store this metadata in a secure tamper proof ledger.
|
||||
|
||||
As part of the $5m AUD deal the sales team included [Red Hat Trusted Artifact Signer (RHTAS)](https://access.redhat.com/products/red-hat-trusted-artifact-signer) to enhance software supply chain security by simplifying cryptographic signing and verifying of software artifacts, such as container images, binaries, and Git commits.
|
||||
|
||||
Brent is keen to get this up and running ASAP as the bank have planned to implement this capability for the prior 6 years in various forms, but always been "busy" with other things.
|
||||
|
||||
Nothing to it but to do it!
|
||||
|
||||
|
||||
## 7.1 - Deploy the signing platform
|
||||
|
||||
Brent's JIRA ticket explains that the signing platform should be deployed to the `prd-acme-rhtas` namespace on the production cluster.
|
||||
|
||||
> **Note** Teams are free to use any OIDC provider from the options of Red Hat Single Sign-on (SSO), Google, Amazon Secure Token Service (STS), or GitHub. Think carefully which option you pick as this will impact how long it takes to complete the exercise...
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Installing the Red Hat Trusted Artifact Signer operator* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_trusted_artifact_signer/1/html-single/deployment_guide/index#installing-trusted-artifact-signer-using-the-operator-lifecycle-manager_deploy
|
||||
- https://developers.redhat.com/learning/learn:install-sign-verify-using-red-hat-trusted-artifact-signer/resource/resources:install-and-deploy-red-hat-trusted-artifact-signer
|
||||
|
||||
|
||||
## 7.2 - Sign a container image
|
||||
|
||||
To test the platform out you join a quick call with Brent to walk him through how to sign a local container image with `cosign` and then inspect the hash in the Rekor immutable ledger web interface.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Searching for a record in Rekor* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_trusted_artifact_signer/1/html-single/deployment_guide/index#signing-and-verifying-containers-by-using-cosign-from-the-command-line-interface_deploy
|
||||
|
||||
|
||||
## 7.3 - Check your work
|
||||
|
||||
If you've successfully deployed a secure signing platform and showed Brent how it worked please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 7, our Rekor record is [url].
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score. Congratulations if you have reached this point you have completed the entire hackathon! 🎉
|
||||
@ -25,10 +25,10 @@ export default function ListLayout({ posts, title, initialDisplayPosts = [], pag
|
||||
</h1>
|
||||
<div className="relative max-w-lg">
|
||||
<input
|
||||
aria-label="Search articles"
|
||||
aria-label="Search exercises"
|
||||
type="text"
|
||||
onChange={(e) => setSearchValue(e.target.value)}
|
||||
placeholder="Search articles"
|
||||
placeholder="Search exercises"
|
||||
className="block w-full px-4 py-2 text-gray-900 bg-white border border-gray-300 rounded-md dark:border-gray-900 focus:ring-primary-500 focus:border-primary-500 dark:bg-gray-800 dark:text-gray-100"
|
||||
/>
|
||||
<svg
|
||||
|
||||
@ -88,18 +88,6 @@ export default function PostLayout({ frontMatter, authorDetails, next, prev, chi
|
||||
</div>
|
||||
<footer>
|
||||
<div className="divide-gray-200 text-sm font-medium leading-5 dark:divide-gray-700 xl:col-start-1 xl:row-start-2 xl:divide-y">
|
||||
{tags && (
|
||||
<div className="py-4 xl:py-8">
|
||||
<h2 className="text-xs uppercase tracking-wide text-gray-500 dark:text-gray-400">
|
||||
Tags
|
||||
</h2>
|
||||
<div className="flex flex-wrap">
|
||||
{tags.map((tag) => (
|
||||
<Tag key={tag} text={tag} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{(next || prev) && (
|
||||
<div className="flex justify-between py-4 xl:block xl:space-y-8 xl:py-8">
|
||||
{prev && (
|
||||
|
||||
@ -26,8 +26,12 @@ const root = process.cwd()
|
||||
export function getFiles(type) {
|
||||
const prefixPaths = path.join(root, 'data', type)
|
||||
const files = getAllFilesRecursively(prefixPaths)
|
||||
|
||||
// Filter to include only files with .mdx extension
|
||||
const mdxFiles = files.filter(file => file.endsWith('.mdx'));
|
||||
|
||||
// Only want to return workshop/path and ignore root, replace is needed to work on Windows
|
||||
return files.map((file) => file.slice(prefixPaths.length + 1).replace(/\\/g, '/'));
|
||||
return mdxFiles.map((file) => file.slice(prefixPaths.length + 1).replace(/\\/g, '/'));
|
||||
}
|
||||
|
||||
export function formatSlug(slug) {
|
||||
|
||||
@ -11,8 +11,8 @@ module.exports = withBundleAnalyzer({
|
||||
images: {
|
||||
unoptimized: true
|
||||
},
|
||||
basePath: '/ocp-app-delivery-workshop',
|
||||
assetPrefix: '/ocp-app-delivery-workshop/',
|
||||
basePath: '',
|
||||
assetPrefix: '',
|
||||
experimental: { esmExternals: true },
|
||||
webpack: (config, { dev, isServer }) => {
|
||||
config.module.rules.push({
|
||||
@ -44,5 +44,6 @@ module.exports = withBundleAnalyzer({
|
||||
|
||||
return config
|
||||
},
|
||||
output: "standalone"
|
||||
// output: "standalone"
|
||||
output: "export"
|
||||
})
|
||||
|
||||
30241
package-lock.json
generated
92
package.json
@ -1,69 +1,69 @@
|
||||
{
|
||||
"name": "ocp-app-delivery-workshop",
|
||||
"version": "0.0.1",
|
||||
"name": "workshops",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "next-remote-watch ./data",
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"test": "next build",
|
||||
"export": "next export",
|
||||
"deploy": "gh-pages -d out -t true",
|
||||
"serve": "next start",
|
||||
"analyze": "cross-env ANALYZE=true next build",
|
||||
"lint": "next lint --fix --dir pages --dir components --dir lib --dir layouts --dir scripts",
|
||||
"prepare": "husky install",
|
||||
"prepare": "husky",
|
||||
"spell": "cspell data/workshop/*"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fontsource/inter": "4.5.2",
|
||||
"@next/bundle-analyzer": "^13.5.6",
|
||||
"@tailwindcss/forms": "^0.5.7",
|
||||
"@tailwindcss/typography": "^0.5.10",
|
||||
"autoprefixer": "^10.4.0",
|
||||
"esbuild": "^0.13.13",
|
||||
"github-slugger": "^1.3.0",
|
||||
"gray-matter": "^4.0.2",
|
||||
"image-size": "1.0.0",
|
||||
"mdx-bundler": "^8.0.0",
|
||||
"next": "13.5.6",
|
||||
"next-themes": "^0.0.14",
|
||||
"postcss": "^8.4.5",
|
||||
"preact": "^10.19.2",
|
||||
"react": "18.2.0",
|
||||
"react-dom": "18.2.0",
|
||||
"@next/bundle-analyzer": "^15.3.4",
|
||||
"@tailwindcss/forms": "^0.5.9",
|
||||
"@tailwindcss/typography": "^0.5.15",
|
||||
"autoprefixer": "^10.4.21",
|
||||
"esbuild": "^0.25.5",
|
||||
"github-slugger": "^2.0.0",
|
||||
"gray-matter": "^4.0.3",
|
||||
"image-size": "1.2.0",
|
||||
"mdx-bundler": "^10.1.1",
|
||||
"next": "^15.3.4",
|
||||
"next-themes": "^0.4.6",
|
||||
"postcss": "^8.5.6",
|
||||
"preact": "^10.26.9",
|
||||
"react": "18.3.1",
|
||||
"react-dom": "18.3.1",
|
||||
"react-medium-image-zoom": "^4.3.5",
|
||||
"reading-time": "1.3.0",
|
||||
"rehype-autolink-headings": "^6.1.0",
|
||||
"rehype-citation": "^0.4.0",
|
||||
"rehype-katex": "^6.0.2",
|
||||
"rehype-preset-minify": "6.0.0",
|
||||
"rehype-prism-plus": "^1.1.3",
|
||||
"rehype-slug": "^5.0.0",
|
||||
"reading-time": "1.5.0",
|
||||
"rehype-autolink-headings": "^7.1.0",
|
||||
"rehype-citation": "^2.3.1",
|
||||
"rehype-katex": "^7.0.1",
|
||||
"rehype-preset-minify": "7.0.1",
|
||||
"rehype-prism-plus": "^2.0.1",
|
||||
"rehype-slug": "^6.0.0",
|
||||
"remark-footnotes": "^4.0.1",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"remark-math": "^5.1.1",
|
||||
"sharp": "^0.33.0",
|
||||
"tailwindcss": "^3.3.5",
|
||||
"unist-util-visit": "^4.0.0"
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.34.2",
|
||||
"tailwindcss": "^3.4.17",
|
||||
"unist-util-visit": "^5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@svgr/webpack": "^6.1.2",
|
||||
"@svgr/webpack": "^8.1.0",
|
||||
"cross-env": "^7.0.3",
|
||||
"dedent": "^0.7.0",
|
||||
"eslint": "^7.29.0",
|
||||
"eslint-config-next": "12.1.4",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-plugin-prettier": "^3.3.1",
|
||||
"file-loader": "^6.0.0",
|
||||
"globby": "11.0.3",
|
||||
"husky": "^6.0.0",
|
||||
"inquirer": "^8.1.1",
|
||||
"lint-staged": "^11.0.0",
|
||||
"next-remote-watch": "^1.0.0",
|
||||
"prettier": "^2.5.1",
|
||||
"prettier-plugin-tailwindcss": "^0.1.4",
|
||||
"socket.io": "^4.4.0",
|
||||
"socket.io-client": "^4.4.0"
|
||||
"dedent": "^1.6.0",
|
||||
"eslint": "^9.30.1",
|
||||
"eslint-config-next": "15.3.4",
|
||||
"eslint-config-prettier": "^10.1.5",
|
||||
"eslint-plugin-prettier": "^5.5.1",
|
||||
"file-loader": "^6.2.0",
|
||||
"globby": "14.1.0",
|
||||
"husky": "^9.1.7",
|
||||
"inquirer": "^12.7.0",
|
||||
"lint-staged": "^16.1.2",
|
||||
"next-remote-watch": "^2.0.0",
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-tailwindcss": "^0.6.13",
|
||||
"socket.io": "^4.8.1",
|
||||
"socket.io-client": "^4.8.1"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.+(js|jsx|ts|tsx)": [
|
||||
|
||||
@ -10,7 +10,7 @@ export default function FourZeroFour() {
|
||||
</div>
|
||||
<div className="max-w-md">
|
||||
<p className="mb-4 text-xl font-bold leading-normal md:text-2xl">
|
||||
Sorry we couldn't find this page.
|
||||
Sorry we couldn't find this page.
|
||||
</p>
|
||||
<p className="mb-8">But dont worry, you can find plenty of other things on our homepage.</p>
|
||||
<Link href="/">
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
import { MDXLayoutRenderer } from '@/components/MDXComponents'
|
||||
import { getFileBySlug } from '@/lib/mdx'
|
||||
|
||||
const DEFAULT_LAYOUT = 'AuthorLayout'
|
||||
|
||||
export async function getStaticProps() {
|
||||
const authorDetails = await getFileBySlug('authors', ['default'])
|
||||
return { props: { authorDetails } }
|
||||
}
|
||||
|
||||
export default function About({ authorDetails }) {
|
||||
const { mdxSource, frontMatter } = authorDetails
|
||||
|
||||
return (
|
||||
<MDXLayoutRenderer
|
||||
layout={frontMatter.layout || DEFAULT_LAYOUT}
|
||||
mdxSource={mdxSource}
|
||||
frontMatter={frontMatter}
|
||||
/>
|
||||
)
|
||||
}
|
||||
@ -5,7 +5,7 @@ import siteMetadata from '@/data/siteMetadata'
|
||||
import { getAllFilesFrontMatter } from '@/lib/mdx'
|
||||
import formatDate from '@/lib/utils/formatDate'
|
||||
|
||||
const MAX_DISPLAY = 5
|
||||
const MAX_DISPLAY = 10
|
||||
|
||||
export async function getStaticProps() {
|
||||
const posts = await getAllFilesFrontMatter('workshop')
|
||||
@ -26,7 +26,7 @@ export default function Home({ posts }) {
|
||||
<ul className="divide-y divide-gray-200 dark:divide-gray-700">
|
||||
{!posts.length && 'No posts found.'}
|
||||
{posts.slice(0, MAX_DISPLAY).map((frontMatter) => {
|
||||
const { slug, date, title, summary, tags, exercise } = frontMatter
|
||||
const { slug, date, title, summary, exercise } = frontMatter
|
||||
return (
|
||||
<li key={slug} className="py-12">
|
||||
<article>
|
||||
@ -48,11 +48,6 @@ export default function Home({ posts }) {
|
||||
{title}
|
||||
</Link>
|
||||
</h2>
|
||||
<div className="flex flex-wrap">
|
||||
{tags.map((tag) => (
|
||||
<Tag key={tag} text={tag} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="prose text-gray-500 max-w-none dark:text-gray-400">
|
||||
{summary}
|
||||
@ -64,7 +59,7 @@ export default function Home({ posts }) {
|
||||
className="text-primary-800 dark:text-primary-700 hover:text-primary-900 dark:hover:text-primary-400"
|
||||
aria-label={`Read "${title}"`}
|
||||
>
|
||||
Read more →
|
||||
Get started →
|
||||
</Link>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
import Link from '@/components/Link'
|
||||
import { PageSEO } from '@/components/SEO'
|
||||
import Tag from '@/components/Tag'
|
||||
import siteMetadata from '@/data/siteMetadata'
|
||||
import { getAllTags } from '@/lib/tags'
|
||||
import kebabCase from '@/lib/utils/kebabCase'
|
||||
|
||||
export async function getStaticProps() {
|
||||
const tags = await getAllTags('workshop')
|
||||
|
||||
return { props: { tags } }
|
||||
}
|
||||
|
||||
export default function Tags({ tags }) {
|
||||
const sortedTags = Object.keys(tags).sort((a, b) => tags[b] - tags[a])
|
||||
return (
|
||||
<>
|
||||
<PageSEO title={`Tags - ${siteMetadata.author}`} description="Things I write about" />
|
||||
<div className="flex flex-col items-start justify-start divide-y divide-gray-200 dark:divide-gray-700 md:justify-center md:items-center md:divide-y-0 md:flex-row md:space-x-6 md:mt-24">
|
||||
<div className="pt-6 pb-8 space-x-2 md:space-y-5">
|
||||
<h1 className="text-3xl font-extrabold leading-9 tracking-tight text-gray-900 dark:text-gray-100 sm:text-4xl sm:leading-10 md:text-6xl md:leading-14 md:border-r-2 md:px-6">
|
||||
Tags
|
||||
</h1>
|
||||
</div>
|
||||
<div className="flex flex-wrap max-w-lg">
|
||||
{Object.keys(tags).length === 0 && 'No tags found.'}
|
||||
{sortedTags.map((t) => {
|
||||
return (
|
||||
<div key={t} className="mt-2 mb-2 mr-5">
|
||||
<Tag text={t} />
|
||||
<Link
|
||||
href={`/tags/${kebabCase(t)}`}
|
||||
className="-ml-2 text-sm font-semibold text-gray-600 uppercase dark:text-gray-300"
|
||||
>
|
||||
{` (${tags[t]})`}
|
||||
</Link>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
@ -1,53 +0,0 @@
|
||||
import { TagSEO } from '@/components/SEO'
|
||||
import siteMetadata from '@/data/siteMetadata'
|
||||
import ListLayout from '@/layouts/ListLayout'
|
||||
import generateRss from '@/lib/generate-rss'
|
||||
import { getAllFilesFrontMatter } from '@/lib/mdx'
|
||||
import { getAllTags } from '@/lib/tags'
|
||||
import kebabCase from '@/lib/utils/kebabCase'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
|
||||
const root = process.cwd()
|
||||
|
||||
export async function getStaticPaths() {
|
||||
const tags = await getAllTags('workshop')
|
||||
|
||||
return {
|
||||
paths: Object.keys(tags).map((tag) => ({
|
||||
params: {
|
||||
tag,
|
||||
},
|
||||
})),
|
||||
fallback: false,
|
||||
}
|
||||
}
|
||||
|
||||
export async function getStaticProps({ params }) {
|
||||
const allPosts = await getAllFilesFrontMatter('workshop')
|
||||
const filteredPosts = allPosts.filter(
|
||||
(post) => post.draft !== true && post.tags.map((t) => kebabCase(t)).includes(params.tag)
|
||||
)
|
||||
|
||||
// rss
|
||||
const rss = generateRss(filteredPosts, `tags/${params.tag}/feed.xml`)
|
||||
const rssPath = path.join(root, 'public', 'tags', params.tag)
|
||||
fs.mkdirSync(rssPath, { recursive: true })
|
||||
fs.writeFileSync(path.join(rssPath, 'feed.xml'), rss)
|
||||
|
||||
return { props: { posts: filteredPosts, tag: params.tag } }
|
||||
}
|
||||
|
||||
export default function Tag({ posts, tag }) {
|
||||
// Capitalize first letter and convert space to dash
|
||||
const title = tag[0].toUpperCase() + tag.split(' ').join('-').slice(1)
|
||||
return (
|
||||
<>
|
||||
<TagSEO
|
||||
title={`${tag} - ${siteMetadata.author}`}
|
||||
description={`${tag} tags - ${siteMetadata.author}`}
|
||||
/>
|
||||
<ListLayout posts={posts} title={title} />
|
||||
</>
|
||||
)
|
||||
}
|
||||
@ -3,7 +3,7 @@ import siteMetadata from '@/data/siteMetadata'
|
||||
import ListLayout from '@/layouts/ListLayout'
|
||||
import { PageSEO } from '@/components/SEO'
|
||||
|
||||
export const POSTS_PER_PAGE = 5
|
||||
export const POSTS_PER_PAGE = 10
|
||||
|
||||
export async function getStaticProps() {
|
||||
const posts = await getAllFilesFrontMatter('workshop')
|
||||
|
||||
@ -1,33 +1,83 @@
|
||||
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>Red Hat OpenShift Application Delivery Workshop</title>
|
||||
<link>https://jmhbnz.github.io/ocp-app-delivery-workshop/workshop</link>
|
||||
<description>Red Hat OpenShift Application Delivery Workshop</description>
|
||||
<title>Red Hat OpenShift Security Hackathon</title>
|
||||
<link>https://rhdemo.win/workshop</link>
|
||||
<description>Red Hat OpenShift Security Hackathon</description>
|
||||
<language>en-us</language>
|
||||
<managingEditor>jablair@redhat.com (Red Hat)</managingEditor>
|
||||
<webMaster>jablair@redhat.com (Red Hat)</webMaster>
|
||||
<lastBuildDate>Mon, 04 Dec 2023 00:00:00 GMT</lastBuildDate>
|
||||
<atom:link href="https://jmhbnz.github.io/ocp-app-delivery-workshop/feed.xml" rel="self" type="application/rss+xml"/>
|
||||
<lastBuildDate>Mon, 14 Oct 2024 00:00:00 GMT</lastBuildDate>
|
||||
<atom:link href="https://rhdemo.win/feed.xml" rel="self" type="application/rss+xml"/>
|
||||
|
||||
<item>
|
||||
<guid>https://jmhbnz.github.io/ocp-app-delivery-workshop/workshop/exercise1</guid>
|
||||
<title>Getting familiar with OpenShift</title>
|
||||
<link>https://jmhbnz.github.io/ocp-app-delivery-workshop/workshop/exercise1</link>
|
||||
<description>In this first exercise we'll get familiar with OpenShift.</description>
|
||||
<pubDate>Mon, 04 Dec 2023 00:00:00 GMT</pubDate>
|
||||
<guid>https://rhdemo.win/workshop/exercise1</guid>
|
||||
<title>Understanding our hackathon environment</title>
|
||||
<link>https://rhdemo.win/workshop/exercise1</link>
|
||||
<description>Let's get familiar with our hackathon setup.</description>
|
||||
<pubDate>Mon, 14 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>containers</category><category>kubernetes</category>
|
||||
<category>openshift</category><category>security</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://jmhbnz.github.io/ocp-app-delivery-workshop/workshop/exercise2</guid>
|
||||
<title>Deploying your first application</title>
|
||||
<link>https://jmhbnz.github.io/ocp-app-delivery-workshop/workshop/exercise2</link>
|
||||
<description>Time to deploy our first app!</description>
|
||||
<pubDate>Tue, 05 Dec 2023 00:00:00 GMT</pubDate>
|
||||
<guid>https://rhdemo.win/workshop/exercise2</guid>
|
||||
<title>Laying the foundations for cluster security</title>
|
||||
<link>https://rhdemo.win/workshop/exercise2</link>
|
||||
<description>Can't have security without a security platform</description>
|
||||
<pubDate>Thu, 17 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>containers</category><category>kubernetes</category><category>deployments</category><category>images</category>
|
||||
<category>openshift</category><category>security</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise3</guid>
|
||||
<title>Encrypting cluster internal network traffic</title>
|
||||
<link>https://rhdemo.win/workshop/exercise3</link>
|
||||
<description>Is OpenShift secure by default?</description>
|
||||
<pubDate>Fri, 18 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>security</category><category>ipsec</category><category>encryption</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise4</guid>
|
||||
<title>Securing vulnerable workloads</title>
|
||||
<link>https://rhdemo.win/workshop/exercise4</link>
|
||||
<description>How do we deal with vulnerable workloads we can't patch?</description>
|
||||
<pubDate>Sat, 19 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>security</category><category>cve management</category><category>rhacs</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise5</guid>
|
||||
<title>Understanding cluster compliance</title>
|
||||
<link>https://rhdemo.win/workshop/exercise5</link>
|
||||
<description>Let's apply an industry benchmark!</description>
|
||||
<pubDate>Wed, 23 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>compliance</category><category>nist</category><category>rhacs</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise6</guid>
|
||||
<title>Inspecting audit logs</title>
|
||||
<link>https://rhdemo.win/workshop/exercise6</link>
|
||||
<description>Ahh the classic who dunnit!?!??</description>
|
||||
<pubDate>Thu, 31 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>audit</category><category>logging</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise7</guid>
|
||||
<title>Bonus challenge - Supply chain shmozzle</title>
|
||||
<link>https://rhdemo.win/workshop/exercise7</link>
|
||||
<description>Time to sign your life away...</description>
|
||||
<pubDate>Fri, 08 Nov 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>supply chain</category><category>rhtas</category>
|
||||
</item>
|
||||
|
||||
</channel>
|
||||
|
||||
BIN
public/static/images/app-crash.gif
Normal file
|
After Width: | Height: | Size: 7.1 MiB |
BIN
public/static/images/app-replicas.gif
Normal file
|
After Width: | Height: | Size: 4.8 MiB |
BIN
public/static/images/app-resources.gif
Normal file
|
After Width: | Height: | Size: 4.2 MiB |
BIN
public/static/images/app-scale.gif
Normal file
|
After Width: | Height: | Size: 9.2 MiB |
BIN
public/static/images/argocd-login.png
Normal file
|
After Width: | Height: | Size: 853 KiB |
BIN
public/static/images/argocd-ui.gif
Normal file
|
After Width: | Height: | Size: 2.2 MiB |
BIN
public/static/images/compliance/acs-architecture-kubernetes.png
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
public/static/images/compliance/acs-central-pods.png
Normal file
|
After Width: | Height: | Size: 275 KiB |
BIN
public/static/images/compliance/acs-policies.png
Normal file
|
After Width: | Height: | Size: 211 KiB |
BIN
public/static/images/compliance/acs-risk.png
Normal file
|
After Width: | Height: | Size: 101 KiB |
BIN
public/static/images/compliance/central-login.gif
Normal file
|
After Width: | Height: | Size: 8.1 MiB |
BIN
public/static/images/compliance/check-operators.gif
Normal file
|
After Width: | Height: | Size: 3.0 MiB |
BIN
public/static/images/compliance/compliance-scan-results-1.png
Normal file
|
After Width: | Height: | Size: 323 KiB |
BIN
public/static/images/compliance/compliance-scan-results-2.png
Normal file
|
After Width: | Height: | Size: 626 KiB |
BIN
public/static/images/compliance/compliance-scan-results-3.png
Normal file
|
After Width: | Height: | Size: 342 KiB |
BIN
public/static/images/compliance/compliance-scan-results.gif
Normal file
|
After Width: | Height: | Size: 7.9 MiB |
BIN
public/static/images/compliance/developer-hub-graphic.png
Normal file
|
After Width: | Height: | Size: 190 KiB |
BIN
public/static/images/compliance/developer-hub.gif
Normal file
|
After Width: | Height: | Size: 2.6 MiB |
BIN
public/static/images/compliance/environments.png
Normal file
|
After Width: | Height: | Size: 316 KiB |
BIN
public/static/images/compliance/init-bundle-import.gif
Normal file
|
After Width: | Height: | Size: 7.2 MiB |
BIN
public/static/images/compliance/install-compliance-operator.gif
Normal file
|
After Width: | Height: | Size: 7.6 MiB |
BIN
public/static/images/compliance/installed-operators-1.png
Normal file
|
After Width: | Height: | Size: 257 KiB |
BIN
public/static/images/compliance/installed-operators-2.png
Normal file
|
After Width: | Height: | Size: 314 KiB |
BIN
public/static/images/compliance/operator-framework.png
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
public/static/images/compliance/rhacs-violation-exclude.gif
Normal file
|
After Width: | Height: | Size: 10 MiB |
BIN
public/static/images/compliance/securedcluster-completed.png
Normal file
|
After Width: | Height: | Size: 233 KiB |
1
public/static/images/compliance/workshop-environment.svg
Normal file
|
After Width: | Height: | Size: 1.8 MiB |
BIN
public/static/images/disconnected/connect-bastion-ec2.gif
Normal file
|
After Width: | Height: | Size: 1.1 MiB |
35
public/static/images/disconnected/connect-bastion-ec2.tape
Normal file
@ -0,0 +1,35 @@
|
||||
Output connect-bastion-ec2.gif
|
||||
|
||||
Require echo
|
||||
|
||||
Set Shell "bash"
|
||||
Set FontSize 32
|
||||
Set Width 1920
|
||||
Set Height 800
|
||||
|
||||
Hide
|
||||
Type `cd`
|
||||
Enter 1
|
||||
Type `SBP_PATH=/home/james/Downloads/sbp`
|
||||
Enter 1
|
||||
Type `source /home/james/Downloads/sbp/sbp.bash`
|
||||
Enter 1
|
||||
Sleep 5s
|
||||
Ctrl+L
|
||||
Show
|
||||
|
||||
Type `HIGHSIDE_BASTION_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=disco-bastion-server" | jq -r '.Reservations[0].Instances[0].PrivateIpAddress')`
|
||||
Enter 1 Sleep 4s
|
||||
Type `echo $HIGHSIDE_BASTION_IP`
|
||||
Enter 1 Sleep 2s
|
||||
|
||||
Type `PREP_SYSTEM_IP=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=disco-prep-system" | jq -r '.Reservations[0].Instances[0].PublicIpAddress')`
|
||||
Enter 1 Sleep 4s
|
||||
Type `scp -i disco_key disco_key ec2-user@$PREP_SYSTEM_IP:/home/ec2-user/disco_key`
|
||||
Enter 3 Sleep 4s
|
||||
|
||||
Type `ssh -i disco_key ec2-user@$PREP_SYSTEM_IP "echo HIGHSIDE_BASTION_IP=$(echo $HIGHSIDE_BASTION_IP) > highside.env"`
|
||||
Enter 3 Sleep 4s
|
||||
|
||||
Type `ssh -t -i disco_key ec2-user@$PREP_SYSTEM_IP "ssh -t -i disco_key ec2-user@$HIGHSIDE_BASTION_IP"`
|
||||
Enter 1 Sleep 10s
|
||||