Compare commits
53 Commits
e539ac1e0b
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 7480632312 | |||
| fe4338721a | |||
| 6e96618617 | |||
| ed46e3b3df | |||
| ec0ad02d70 | |||
| a70d5aed9e | |||
| 8c44988865 | |||
| be702fe47d | |||
| bec9d39ba1 | |||
| 52536b2e3e | |||
| 96af899045 | |||
| 1877d9c0ff | |||
| 730a3f5a5b | |||
| 73d7324927 | |||
| a04af136a7 | |||
| 1224f23c88 | |||
| 7947702050 | |||
| ef78bbdfe9 | |||
| 879aaecdcf | |||
| 638c51d539 | |||
| 04d263374a | |||
| a99499e26b | |||
| 4515f9b096 | |||
| ca73036cd3 | |||
| 4a8d8b409b | |||
| 565330ab50 | |||
| 617fc7bdcc | |||
| 0b061fa8b7 | |||
| f30a8af73f | |||
| e9c4fbd5fc | |||
| 3f0c29fd65 | |||
| 381ebf0da9 | |||
| 94ba768ae1 | |||
| 23b5ea24d8 | |||
| f8dcb947fd | |||
| e8b416180e | |||
| 0640f60ae4 | |||
| 0558a0a947 | |||
| ed36707987 | |||
| 4e48cc4f48 | |||
| 318769929a | |||
| 2368711f07 | |||
| 3512aebbb0 | |||
| 3f6495041c | |||
| 11b8154424 | |||
| 0c75128408 | |||
| 0885136ca9 | |||
| 7ee2a55cdc | |||
| d87cb4a04e | |||
| 47aa8c9e4c | |||
| a09f46b7f7 | |||
| e419983e4d | |||
| ef0c2b0845 |
4
.github/dependabot.yml
vendored
@ -5,9 +5,9 @@ updates:
|
||||
- package-ecosystem: npm
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
|
||||
168
data/compliance/README.org
Normal file
@ -0,0 +1,168 @@
|
||||
#+TITLE: Openshift disconnected security & compliance workshop
|
||||
#+DATE: <2024-08-26 Mon>
|
||||
#+AUTHOR: James Blair
|
||||
|
||||
|
||||
This document captures the steps required to set up an instance of the workshop.
|
||||
|
||||
* Connect to the low side instance
|
||||
|
||||
#+begin_src tmux
|
||||
ssh lab-user@3.143.149.146
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install required tools low side
|
||||
|
||||
#+begin_src tmux
|
||||
cd /mnt/low-side-data/
|
||||
curl -L -o oc-mirror.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/oc-mirror.tar.gz
|
||||
tar -xzf oc-mirror.tar.gz
|
||||
rm -f oc-mirror.tar.gz
|
||||
chmod +x oc-mirror
|
||||
sudo cp -v oc-mirror /bin
|
||||
curl -L -o mirror-registry.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/mirror-registry/latest/mirror-registry.tar.gz
|
||||
curl -L -o openshift-install.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/openshift-install-linux.tar.gz
|
||||
tar -xzf openshift-install.tar.gz openshift-install
|
||||
rm -f openshift-install.tar.gz
|
||||
curl -L -o oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.19/openshift-client-linux.tar.gz
|
||||
tar -xzf oc.tar.gz oc
|
||||
rm -f oc.tar.gz
|
||||
sudo cp -v oc /bin
|
||||
ls -1 /mnt/low-side-data/
|
||||
#+end_src
|
||||
|
||||
|
||||
* Mirror installation content low side
|
||||
|
||||
#+begin_src tmux
|
||||
mkdir -v $HOME/.docker
|
||||
cp -v $HOME/pull-secret-example.json $HOME/.docker/config.json
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
---
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
EOF
|
||||
cd /mnt/low-side-data
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install mirror registry high side
|
||||
|
||||
#+begin_src tmux
|
||||
rsync -avP /mnt/low-side-data/mirror-registry.tar.gz highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
cd /mnt/high-side-data
|
||||
tar -xzvf mirror-registry.tar.gz
|
||||
./mirror-registry install --initPassword discopass
|
||||
#+end_src
|
||||
|
||||
|
||||
* Trust mirror registry high side
|
||||
|
||||
#+begin_src tmux
|
||||
sudo cp -v $HOME/quay-install/quay-rootCA/rootCA.pem /etc/pki/ca-trust/source/anchors/
|
||||
sudo update-ca-trust
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Transfer mirror content from low to high
|
||||
|
||||
#+begin_src tmux
|
||||
exit
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
sudo mv -v /mnt/high-side-data/oc /bin/
|
||||
sudo mv -v /mnt/high-side-data/oc-mirror /bin/
|
||||
sudo mv -v /mnt/high-side-data/openshift-install /bin/
|
||||
cd /mnt/high-side-data
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq1_000000.tar docker://$(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install openshift high side
|
||||
|
||||
#+begin_src tmux
|
||||
cat << EOF > /mnt/high-side-data/install-config.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: disco
|
||||
baseDomain: lab
|
||||
compute:
|
||||
- architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: worker
|
||||
replicas: 0
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
replicas: 1
|
||||
platform:
|
||||
aws:
|
||||
type: m5.8xlarge
|
||||
networking:
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.0.0.0/16
|
||||
networkType: OVNKubernetes
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
platform:
|
||||
aws:
|
||||
region: us-east-2
|
||||
subnets:
|
||||
- $(aws ec2 describe-subnets --output json | jq '.Subnets[0].SubnetId' -r)
|
||||
publish: Internal
|
||||
additionalTrustBundlePolicy: Always
|
||||
EOF
|
||||
if ! test -f "/mnt/high-side-data/id_rsa"; then
|
||||
ssh-keygen -C "OpenShift Debug" -N "" -f /mnt/high-side-data/id_rsa
|
||||
fi
|
||||
echo "sshKey: $(cat /mnt/high-side-data/id_rsa.pub)" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
echo "pullSecret: '$(jq -c . $XDG_RUNTIME_DIR/containers/auth.json)'" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
if (test -e /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
then
|
||||
echo -e "\n\n Looks good, go ahead! \n\n"
|
||||
else
|
||||
echo -e "\n\n Uh oh, something is wrong... \n\n"
|
||||
fi
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
imageContentSources:
|
||||
$(grep "mirrors:" -A 2 --no-group-separator /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
EOF
|
||||
tail -22 /mnt/high-side-data/install-config.yaml
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
additionalTrustBundle: |
|
||||
$(sed 's/^/ /' /home/lab-user/quay-install/quay-rootCA/rootCA.pem)
|
||||
EOF
|
||||
cat /mnt/high-side-data/install-config.yaml
|
||||
cp -v /mnt/high-side-data/install-config.yaml /mnt/high-side-data/install-config.yaml.backup
|
||||
openshift-install create cluster --dir /mnt/high-side-data
|
||||
#+end_src
|
||||
|
||||
|
||||
* Disable default catalog sources high side
|
||||
|
||||
#+begin_src tmux
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
oc patch OperatorHub cluster --type merge -p '{"spec": {"disableAllDefaultSources": true}}'
|
||||
oc create -f /mnt/high-side-data/oc-mirror-workspace/results-*/catalogSource-cs-redhat-operator-index.yaml
|
||||
#+end_src
|
||||
40
data/compliance/exercise1.mdx
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
title: Understanding our lab environment
|
||||
exercise: 1
|
||||
date: '2024-08-22'
|
||||
tags: ['ssh','novnc','workshop','setup']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get familiar with our lab setup."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift 4 Disconnected security & compliance workshop! Here you'll learn about operating a secure and compliant OpenShift 4 cluster in a disconnected network using the following key OpenShift features:
|
||||
|
||||
- [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes)
|
||||
- [Red Hat OpenShift Compliance Operator](https://www.redhat.com/en/blog/a-guide-to-openshift-compliance-operator-best-practices)
|
||||
|
||||
To level set, [Red Hat OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. OpenShift supports running in disconnected networks, though this does change the way the cluster operates because key ingredients like container images, operator bundles, and helm charts must be brought into the environment from the outside world via mirroring.
|
||||
|
||||
There are of course many different options for installing OpenShift in a restricted network; this workshop will not cover the deployment of a cluster, instead you will have an existing installed cluster allocated to you which has been created in advance. Your tasks during this workshop will be to improve the security and compliance of the cluster and workloads running on it.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - Reserve a lab environment
|
||||
|
||||
An OpenShift `4.14` cluster has already been provisioned for you to complete these excercises. To reserve an environment go to [this Google Sheets spreadsheet](https://docs.google.com/spreadsheets/d/1tddgRA6suefTaITyRx87IoRCfCJ7El9Hdr6HB8K7Mvo/edit?usp=sharing). Update your name next to an `Available` environment and change the status to `Allocated`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment worksheet* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Login via ssh and vnc
|
||||
|
||||
To complete the lab exercises you'll use a mix of an `ssh` terminal session for running OpenShift client `oc` commands, and then a browser based vnc session in order to access the OpenShift cluster web console.
|
||||
|
||||
Links to a browser based terminal and vnc session are available in the spreadsheet, along with any credentials required. You are welcome to use your own terminal or vnc software if you prefer.
|
||||
|
||||
Once you have both a terminal and vnc session working you're ready to get underway with the workshop, please move on to exercise 2 🚀
|
||||
228
data/compliance/exercise2.mdx
Normal file
@ -0,0 +1,228 @@
|
||||
---
|
||||
title: Mirror required content
|
||||
exercise: 2
|
||||
date: '2024-08-23'
|
||||
tags: ['oc-mirror','mirror-registry','openshift','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "You want features? Mirror them in!🪞"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation for a private cluster on AWS using the [IPI install method](https://docs.openshift.com/container-platform/4.14/installing/installing_aws/installing-aws-private.html#installing-aws-private), and does not have any post installation features added.
|
||||
|
||||
During this workshop we want to secure the cluster with Red Hat Advanced Cluster Security, understand our compliance posture against [NIST 800-53](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final) with the OpenShift Compliance Operator and then explore some bonus activities like deploying Red Hat Developer Hub.
|
||||
|
||||
To install and configure these features we first need to mirror some additional content into our disconnected environment, let's get started.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment summary* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 2.1 - Open a terminal on your low side
|
||||
|
||||
Our first step to prepare to mirror content is to get connected to our low side jump host via `ssh`. You can use the web terminal link in your browser or alternatively your own local terminal with the command below (replacing the placeholder ip with the one you have been allocated).
|
||||
|
||||
```bash
|
||||
ssh lab-user@<ip address>
|
||||
```
|
||||
|
||||
You'll be prompted to enter a password which you can find in your allocated environment details.
|
||||
|
||||
After connecting change directory to the low side workspace where the intial cluster installation was already completed for you and review the folder contents:
|
||||
|
||||
```bash
|
||||
cd /mnt/low-side-data
|
||||
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Your workspace will look similar to the one below:
|
||||
|
||||
```bash
|
||||
[lab-user@jump low-side-data]$ ls -lah
|
||||
total 21G
|
||||
drwxr-xr-x. 4 lab-user lab-user 4.0K Sep 2 12:46 .
|
||||
drwxr-xr-x. 3 root root 27 Aug 31 22:00 ..
|
||||
-rw-r--r--. 1 lab-user lab-user 305 Sep 2 12:38 imageset-config.yaml
|
||||
-rw-r--r--. 1 lab-user lab-user 696M Sep 2 12:37 mirror-registry.tar.gz
|
||||
-rw-r--r--. 1 lab-user lab-user 20G Sep 2 12:46 mirror_seq1_000000.tar
|
||||
-rwxr-xr-x. 1 lab-user lab-user 146M Mar 26 22:17 oc
|
||||
-rwxr-x--x. 1 lab-user lab-user 144M Aug 7 06:30 oc-mirror
|
||||
-rw-------. 1 lab-user lab-user 160K Sep 2 12:41 .oc-mirror.log
|
||||
drwxr-xr-x. 3 lab-user lab-user 17 Sep 2 12:38 oc-mirror-workspace
|
||||
-rwxr-xr-x. 1 lab-user lab-user 631M Aug 7 07:40 openshift-install
|
||||
drwxr-x---. 2 lab-user lab-user 28 Sep 2 12:46 publish
|
||||
```
|
||||
|
||||
|
||||
## 2.2 - Get familiar with oc-mirror
|
||||
|
||||
To mirror content into our disconnected environment we will be using the [`oc-mirror`](https://github.com/openshift/oc-mirror) openshift client utility.
|
||||
|
||||
To configure what content `oc-mirror` will download and mirror for us we use a YAML formatted file called an `ImageSetConfiguration`. This file declares:
|
||||
|
||||
1. **What to download** which can include (OpenShift itself, operator bundles, helm charts, or specific container images)
|
||||
2. **What versions of each item to download**
|
||||
3. **Where to store the downloaded content**
|
||||
|
||||
The `oc-mirror` utility also has some features for listing available content for mirroring, let's try that now! Run the following commands in your ssh terminal:
|
||||
|
||||
```bash
|
||||
# List available openshift release versions
|
||||
oc-mirror list releases
|
||||
|
||||
# List operator catalogs for a specific openshift release
|
||||
oc-mirror list operators --catalogs --version=4.14
|
||||
|
||||
# List all operators in a specific catalogs
|
||||
oc-mirror list operators --catalog registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
```
|
||||
|
||||
Using the built in help have a go at using `oc-mirror` to identify details of a specific operator.
|
||||
|
||||
We can also use the `oc-mirror` utility to understand the state of any existing mirror content bundles. We have a content bundle called `mirror_seq1_000000.tar` available from the initial installation of your OpenShift cluster, let's inspect that now.
|
||||
|
||||
```bash
|
||||
oc-mirror describe mirror_seq1_000000.tar | more
|
||||
```
|
||||
|
||||
This bundle archive was created by the `oc-mirror` utility using the configuration file called `imageset-config.yaml` which is also in the same directory. Let's review that file:
|
||||
|
||||
```bash
|
||||
cat imageset-config.yaml
|
||||
```
|
||||
|
||||
Your file should look something like the example below, we can see the the `4.14.35` version of OpenShift is specified to be downloaded, along with the `registry.redhat.io/rhel8/support-tools` additional standalone container image.
|
||||
|
||||
```yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
```
|
||||
|
||||
|
||||
## 2.3 - Confirm local cache is up to date
|
||||
|
||||
A local cache of content already exists from when the cluster installation was initially performed in advance of this workshop. Let's confirm everything is still up to date by re-running the `oc-mirror` command specifying our configuration file and the location on our disk.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take several minutes to complete but should complete with `No new images detected, process stopping` to confirm the existing cache is up to date.
|
||||
|
||||
|
||||
## 2.4 - Add new mirror content
|
||||
|
||||
For our workshop exercises today we need to mirror some additional operators, namely the **OpenShift Compliance Operator**, **Red Hat Advanced Cluster Security**, and **Red Hat Developer Hub**. Run the command below to update your `imageset-config.yaml` file to match the example below
|
||||
|
||||
```bash
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
operators:
|
||||
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
packages:
|
||||
- name: rhdh
|
||||
channels:
|
||||
- name: fast
|
||||
minVersion: '1.1.1'
|
||||
maxVersion: '1.1.1'
|
||||
- name: compliance-operator
|
||||
channels:
|
||||
- name: stable
|
||||
- name: rhacs-operator
|
||||
channels:
|
||||
- name: stable
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
helm: {}
|
||||
EOF
|
||||
```
|
||||
|
||||
After updating the configuration file we can re-run our `oc-mirror` command to bring the new content into our local collection on disk in `/mnt/low-side-data`.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take up to 10 minutes to complete depending on connection speeds.
|
||||
|
||||
|
||||
## 2.5 - Mirror updated content to high side registry
|
||||
|
||||
Once the local mirror update has completed we now need to transfer this content to our high side and mirror it from disk into the OpenShift Mirror Registry running in our disconnected high side.
|
||||
|
||||
In this workshop we will use `rsync` to copy our content to our high side system, let's do that now:
|
||||
|
||||
```bash
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
```
|
||||
|
||||
> Note: `oc-mirror` creates incremental mirror content files in order to prevent duplicating content. You will notice your low side mirror workspace includes a new file `mirror_seq2_000000.tar` which is significantly smaller than the original mirror archive.
|
||||
|
||||
Once the transfer has completed we need to log into our high side disconnected system and run `oc-mirror` from that side to upload the content from the new archive into our disconnected container registry
|
||||
|
||||
```bash
|
||||
ssh highside
|
||||
```
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq2_000000.tar docker://$(hostname):8443
|
||||
```
|
||||
|
||||
## 2.6 - Verify new operators are available
|
||||
|
||||
After a couple of minutes the mirror process will complete. We then need to tell OpenShift about the new content that is available by running the commands below.
|
||||
|
||||
```bash
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
for file in $(find ./oc-mirror-workspace -type f -name '*.yaml'); do oc apply -f $file; done
|
||||
```
|
||||
|
||||
> Note: In our `oc-mirror-workspace` directory each time we mirror new content a new `results-<id>` directory will be created which may contain `imageContentSourcePolicy.yaml` or `catalogSource-cs-<index>.yaml` files which we need to apply to our cluster to tell it about the new content that is available.
|
||||
|
||||
Once the updates are applied we can then check that our new operators are available in the OpenShift Web Console using our browser based vnc session:
|
||||
|
||||
1. Open your vnc browser tab
|
||||
2. Use the left menu panel, click **Settings** and then select **Remote Resizing** as the scaling mode to improve viewing experience.
|
||||
3. Click **Connect** and when prompted enter the password in your environment spreadsheet row, then click **Send credentials**.
|
||||
4. A Firefox browser window should already be open, you can manually start if using the top left applications menu if needed.
|
||||
5. Click the bookmark toolbar option for **DISCO - OpenShift**.
|
||||
6. Log in when prompted with the username **kubeadmin** and the kubeadmin password listed in your environment spreadsheet (you can also find this password in your highside bastion ssh session by running `cat /mnt/high-side-data/auth/kubeadmin-password`). Note that to paste in the web based vnc session you need to use the left hand panel to pass the clipboard content through to the session.
|
||||
7. Navigate to **Operators** on the left menu, and then click **OperatorHub**, you should see the newly mirrored operators are now available in your disconnected cluster!
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Check disconnected operator hub* |
|
||||
</Zoom>
|
||||
|
||||
If your mirroring has completed successfully you are ready to move on to exercise 3 and install the three new operators 🎉
|
||||
150
data/compliance/exercise3.mdx
Normal file
@ -0,0 +1,150 @@
|
||||
---
|
||||
title: Install operators on a disconnected cluster
|
||||
exercise: 3
|
||||
date: '2024-08-27'
|
||||
tags: ['openshift','operators','operator-hub','disconnected']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Operators?!? 🤔 - Think app store for Kubernetes 🌟"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation using the IPI install method, and does not have any post installation features added.
|
||||
|
||||
In a broad sense many OpenShift features are added via [Operators](https://www.redhat.com/en/technologies/cloud-computing/openshift/what-are-openshift-operators). Operators automate the creation, configuration, and management of instances of Kubernetes-native applications. Operators can provide automation at every level of the stack—from managing the parts that make up the platform all the way to applications that are provided as a managed service.
|
||||
|
||||
In the previous exercise we mirrored some new operator bundles into our disconnected network. In this exercise we'll install those operators and explore the features they provide us via [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources) they provide.
|
||||
|
||||
> Note: For some trivia, Red Hat created and open sourced the [Operator Framework](https://github.com/operator-framework), then later contributed the project to the Cloud Native Computing Foundation in 2021, ensuring all organisations can benefit from our experience building and supporting operator driven clusters since ~2016.
|
||||
>
|
||||
> 
|
||||
|
||||
|
||||
## 3.1 - Installing compliance operator
|
||||
|
||||
First up let's install the [Red Hat OpenShift Compliance Operator](https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-overview.html).
|
||||
|
||||
For some brief context the Compliance Operator assists platform teams by automating the inspection of numerous technical implementations and compares those against certain aspects of industry standards. For our purposes today that industry standard will be **NIST 800-53**.
|
||||
|
||||
The Compliance Operator assesses compliance of both the Kubernetes API resources of OpenShift Container Platform, as well as the nodes running the cluster. The Compliance Operator uses [OpenSCAP](https://www.open-scap.org), a NIST-certified tool, to scan and enforce security policies provided by the content.
|
||||
|
||||
To install the operator we can use either the OpenShift Web Console, or the terminal with `oc` cli. In this workshop we will install the operator with the Web Console using our vnc browser tab. Thanks to our previous exercise mirroring content and making it available via the cluster disconnected OperatorHub catalogs we can enjoy the same user experience to install the operator as if our cluster was fully connected.
|
||||
|
||||
1. Open your vnc browser tab and return to the OpenShift Web Console browser tab you opened in the previous exercise.
|
||||
2. Click on the **Compliance Operator** in **OperatorHub** to open the right hand panel, then click the blue **Install** button at the top of the panel.
|
||||
3. On the install details screen stick with all the default values and simply click **Install**
|
||||
4. After a short wait the Compliance Operator will be installed and ready for use 🎉
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Install OpenShift Compliance Operator* |
|
||||
</Zoom>
|
||||
|
||||
With the Compliance Operator installed feel free to explore which new Custom Resources the Operator makes available. We'll return to these in future exercises to begin using them.
|
||||
|
||||
|
||||
## 3.2 - Installing the rhacs operator
|
||||
|
||||
Next up we'll install the [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes) Operator.
|
||||
|
||||
Red Hat Advanced Cluster Security (RHACS) has direct integration with the Compliance Operator to provide a frontend user experience for running compliance scans along with viewing results.
|
||||
|
||||
To try the alternative operator installation method this time we will install the operator via the `oc` cli in our terminal.
|
||||
|
||||
Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: rhacs-operator
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhacs-operator.v4.5.1
|
||||
EOF
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Advanced Cluser Security for Kubernetes** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.3 - Installing the developer hub operator
|
||||
|
||||
The final operator we will install for this workshop relates to [Red Hat Developer Hub](https://developers.redhat.com/rhdh/overview).
|
||||
|
||||
Red Hat Developer Hub is an Internal Developer Portal (IDP) based on the upstream [Backstage](https://backstage.io) project initially created at Spotify. With Red Hat Developer Hub combined with Red Hat OpenShift we can enable platform engineering teams to offer software templates and pre-architected and supported approaches to make life easier for development teams, ease onboarding and reduce friction and frustration.
|
||||
|
||||
We'll also install the Red Hat Developer Hub using the `oc` cli in our terminal. Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
namespace: rhdh-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhdh
|
||||
namespace: rhdh-operator
|
||||
spec:
|
||||
channel: fast
|
||||
installPlanApproval: Automatic
|
||||
name: rhdh
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhdh-operator.v1.1.1
|
||||
EOF
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Red Hat Developer Hub** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
</Zoom>
|
||||
|
||||
If all three operators are now installed congratulations you are ready to move on to Exercise 4 🎉
|
||||
|
||||
191
data/compliance/exercise4.mdx
Normal file
@ -0,0 +1,191 @@
|
||||
---
|
||||
title: Deploy advanced cluster security
|
||||
exercise: 4
|
||||
date: '2024-08-31'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to up our security & compliance game! 🔒"
|
||||
---
|
||||
|
||||
With our Red Hat Advanced Cluster Security Operator installed and standing by to do some work for us, let's give it some work to do by telling it to deploy Red Hat Advanced Cluster Security onto our cluster.
|
||||
|
||||
|
||||
## 4.1 - Getting familiar with rhacs
|
||||
|
||||
Before we get into the technical implementation let's take a moment to get up to speed with Red Hat Advanced Cluster Security works.
|
||||
|
||||
Fundamentally you install RHACS as a set of containers in your OpenShift Container Platform or Kubernetes cluster. RHACS includes the following services:
|
||||
|
||||
1. **Central** services you install on a designated "hub" cluster. Central installs the Central, Scanner, and Scanner DB services. The Central service provides access to a user interface through a web UI or the RHACS portal. It also handles API interactions and provides persistent storage. Scanner analyzes images for known vulnerabilities. It uses Scanner DB as a cache for vulnerability definitions.
|
||||
2. **Secured cluster** services you install on each cluster you want to secure by RHACS. This installs the Collector, Sensor, and Admission Controller services. Collector collects runtime information on container security and network activity. It then sends data to Sensor, which monitors your Kubernetes cluster for policy detection and enforcement. Admission Controller monitors workloads and prevents users from creating them in RHACS when they violate security policies.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security high level architecture* |
|
||||
</Zoom>
|
||||
|
||||
> Note: For an overview of which sources Red Hat Advanced Cluster Security uses for vulnerability information and a more detailed walkthrough of each component, take a moment to review https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/architecture/index.
|
||||
|
||||
|
||||
## 4.2 - Deploying central services
|
||||
|
||||
Let's now create our **Central** services on our cluster by creating a new `Central` custom resource which our newly installed operator will then manage and deploy on our behalf. We'll deploy these services into a new namespace called `acs-central`.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: acs-central
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: Central
|
||||
metadata:
|
||||
name: stackrox-central-services
|
||||
namespace: acs-central
|
||||
spec:
|
||||
central:
|
||||
exposure:
|
||||
route:
|
||||
enabled: true
|
||||
egress:
|
||||
connectivityPolicy: Offline
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: The values we used for the `Central` instance are all defaults, aside from `connectivityPolicy: Offline`, which tells Red Hat Advanced Cluster Security it will be operating in a disconnected environment. For more details on how RHACS works in a disconnected environment refer to https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode.
|
||||
|
||||
Once the `Central` resource has been created you can check the state of the RHACS pods by running `oc get pods -n acs-central` in your highside terminal. Or navigating to **Workloads** > **Pods** for the `acs-central` project in the OpenShift Web Console.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security central pods* |
|
||||
</Zoom>
|
||||
|
||||
Once all pods are `Running` and `Ready` you can move on to the next step.
|
||||
|
||||
|
||||
## 4.3 - Logging into rhacs dashboard
|
||||
|
||||
Time to bring up our RHACS dashboard. We'll first retrieve the `admin` user password which was auto generated by the operator and stored in a **Secret**. Then we can open the **Route** for RHACS in a new browser tab and log in.
|
||||
|
||||
1. Return to your vnc session and the open tab with our OpenShift Web Console.
|
||||
2. Click **Workloads** > **Secrets**, ensuring you are looking at the `acs-central` **Project**.
|
||||
3. Click into the `central-htpasswd` **Secret**
|
||||
4. Scroll down and click **Reveal values** on the right hand side.
|
||||
5. Copy the `password` field, we'll need this shortly.
|
||||
6. Navigate to **Networking** > **Routes** in the left hand menu.
|
||||
7. Click on the **Location** URL for the route named `central`.
|
||||
8. Login with the username `admin` and the password you copied earlier.
|
||||
|
||||
> Note: Ironically (given the subject matter), you may receive a tls verification warning when opening the rhacs dashboard. This is expected in this short lived workshop environment (because James is lazy) and should be accepted (Kids please don't do this at home 😂).
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Logging into Red Hat Advanced Cluster Security dashboard* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 4.4 - Securing our hub cluster
|
||||
|
||||
To begin securing our OpenShift "hub" cluster with RHACS we need to:
|
||||
|
||||
1. Generate an init bundle to download and apply to the cluster.
|
||||
2. Create and apply a `SecuredCluster` custom resource.
|
||||
|
||||
We'll start with generating the init bundle. Just for future familiarity for this step we'll use and follow the official RHACS documentation: https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/installing/installing-rhacs-on-red-hat-openshift#portal-generate-init-bundle_init-bundle-ocp
|
||||
|
||||
Follow the steps in `4.3.1.1` to generate an init bundle named `hub` using the RHACS dashboard, selecting the **Operator** based installation method.
|
||||
|
||||
Once the `hub-Operator-secrets-cluster-init-bundle.yaml` file has been downloaded we'll apply it to the cluster using the OpenShift Web Console **Import YAML** feature.
|
||||
|
||||
1. Create a new project in the Web Console named `acs-securedcluster`.
|
||||
2. Click **Import YAML** in the top right of the OpenShift Web Console.
|
||||
3. Open your **Downloads** directory in the file browser using the **Places** top left menu.
|
||||
4. Open the `hub-Operator-secrets-cluster-init-bundle.yaml` file in a text editor and copy the contents.
|
||||
5. Paste the contents into the **Import YAML** text field and click the blue **Create** button.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Importing an init bundle into our hub cluster* |
|
||||
</Zoom>
|
||||
|
||||
> Note: These init bundles contain secrets enabling a secured cluster to communicate with RHACS Central so it's important to store these securely. For automation purposes you can also generate init bundles with the RHACS API or the `roxctl` CLI, for example `roxctl -e <ACS CONSOLE URL>:443 central init-bundles generate demo-cluster --output-secrets /tmp/demo-cluster.yaml --password <ACS ADMIN PASSWORD>`.
|
||||
|
||||
Once our init bundle has been created we can create our `SecuredCluster` custom resource to complete the cluster onboarding process. We'll do that with our `oc` terminal session.
|
||||
|
||||
Copy the command below and run it in your highside web terminal:
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace acs-securedcluster apply --filename -
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: SecuredCluster
|
||||
metadata:
|
||||
name: stackrox-secured-cluster-services
|
||||
spec:
|
||||
monitoring:
|
||||
openshift:
|
||||
enabled: true
|
||||
auditLogs:
|
||||
collection: Auto
|
||||
network:
|
||||
policies: Enabled
|
||||
admissionControl:
|
||||
listenOnUpdates: true
|
||||
bypass: BreakGlassAnnotation
|
||||
contactImageScanners: ScanIfMissing
|
||||
listenOnCreates: true
|
||||
replicas: 3
|
||||
timeoutSeconds: 10
|
||||
listenOnEvents: true
|
||||
scannerV4:
|
||||
db:
|
||||
persistence:
|
||||
persistentVolumeClaim:
|
||||
claimName: scanner-v4-db
|
||||
indexer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: Default
|
||||
scanner:
|
||||
analyzer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: AutoSense
|
||||
perNode:
|
||||
collector:
|
||||
collection: CORE_BPF
|
||||
forceCollection: false
|
||||
imageFlavor: Regular
|
||||
taintToleration: TolerateTaints
|
||||
clusterName: hub
|
||||
centralEndpoint: 'https://central-acs-central.apps.disco.lab:443'
|
||||
EOF
|
||||
```
|
||||
|
||||
After a short wait for pods to initialise in the `acs-securedcluster` namespace you should be able to see the cluster is now secured in RHACS by checking the **Platform Configuration** > **Clusters** overview which should show the `hub` cluster as `✅ Healthy`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Hub cluster is now secured by Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
If you now have Red Hat Advanced Cluster Security **Central** and **SecuredCluster** components deployed then congratulations your RHACS instance is fully deployed and you're ready to start improving your cluster security and compliance posture in Exercise 5! 🎉
|
||||
|
||||
216
data/compliance/exercise5.mdx
Normal file
@ -0,0 +1,216 @@
|
||||
---
|
||||
title: Running a cluster compliance scan
|
||||
exercise: 5
|
||||
date: '2024-09-01'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's check our cluster compliance against NIST 800-53 👀"
|
||||
---
|
||||
|
||||
We've done the work to set the OpenShift Compliance Operator and Red Hat Advanced Cluster Security up on our cluster, now let's make the most of it by using them to schedule and run a compliance scan on our cluster.
|
||||
|
||||
For the scan we'll be using the included `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift` and `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Node level` scan profiles that are included with the OpenShift Compliance Operator.
|
||||
|
||||
Two scan profiles are required as we need to scan both the OpenShift cluster, as well as each individual node running [RHEL CoreOS](https://docs.openshift.com/container-platform/4.14/architecture/architecture-rhcos.html).
|
||||
|
||||
For more details on these compliance profiles please take some time to review:
|
||||
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate.html
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate-node.html
|
||||
- https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-supported-profiles.html
|
||||
|
||||
|
||||
## 5.1 - Scheduling a scan
|
||||
|
||||
There are two methods you can use to schedule Compliance Operator scans:
|
||||
|
||||
1. Creating a `ScanSetting` and `ScanSettingBinding` custom resource. This does not require Red Hat Advanced Cluster Security, and can be easily managed by GitOps, however is not beginner friendly and lacks any graphical frontend to easily explore cluster compliance status. For an overview of this approach please take a few minutes to review https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-scans.html#compliance-operator-scans
|
||||
2. Creating a **Scan Schedule** in Red Hat Advanced Cluster Security. This is the approach we will be using in this workshop as it is the most intuitive option.
|
||||
|
||||
Complete the steps below to create your scan schedule:
|
||||
|
||||
1. Return to your browser tab in the vnc session with the Red Hat Advanced Cluster Security dashboard open.
|
||||
2. Navigate to **Compliance** > **Schedules** in the left hand menu.
|
||||
3. Click the blue **Create Scan Schedule** button in the middle of the screen.
|
||||
4. Enter the name `daily-nist-800-53-moderate` and set the **Time** field to `00:00` then click **Next**.
|
||||
5. On the next screen select your `hub` cluster, then click **Next**.
|
||||
6. On the profile screen tick `ocp4-moderate` and `ocp4-moderate-node`, then click **Next**.
|
||||
7. Click **Next** once more on the **Reports** screen and the click **Save**.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Creating a compliance scan schedule in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
After creating the scan schedule results will be shortly available in the RHACS console. While we wait for the automatically triggered initial scan to complete, let's use the `oc` cli to review the `ScanSetting` that was created behind the scenes when we created the **Scan Schedule** in the RHACS dashboard.
|
||||
|
||||
Run the commands below to review your `ScanSetting` resource:
|
||||
|
||||
```bash
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate
|
||||
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate --output yaml
|
||||
```
|
||||
|
||||
You should see details output similar to the example below. Notice the more advanced settings available in the custom resource including `rawResultsStorage.rotation` and `roles[]` which you may want to customize in your environment.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ScanSetting
|
||||
maxRetryOnTimeout: 3
|
||||
metadata:
|
||||
annotations:
|
||||
owner: stackrox
|
||||
labels:
|
||||
app.kubernetes.io/created-by: sensor
|
||||
app.kubernetes.io/managed-by: sensor
|
||||
app.kubernetes.io/name: stackrox
|
||||
name: daily-nist-800-53-moderate
|
||||
namespace: openshift-compliance
|
||||
rawResultStorage:
|
||||
pvAccessModes:
|
||||
- ReadWriteOnce
|
||||
rotation: 3
|
||||
size: 1Gi
|
||||
roles:
|
||||
- master
|
||||
- worker
|
||||
scanTolerations:
|
||||
- operator: Exists
|
||||
schedule: 0 0 * * *
|
||||
showNotApplicable: false
|
||||
strictNodeScan: false
|
||||
suspend: false
|
||||
timeout: 30m0s
|
||||
```
|
||||
|
||||
|
||||
## 5.2 - Review cluster compliance
|
||||
|
||||
Once your cluster scan completes return to your vnc browser tab with the Red Hat Advanced Cluster Security Dashboard open. We'll take a look at our overall cluster compliance now against the compliance profile.
|
||||
|
||||
> Note: Please be aware of the usage disclaimer shown at the top of the screen *"Red Hat Advanced Cluster Security, and its compliance scanning implementations, assists users by automating the inspection of numerous technical implementations that align with certain aspects of industry standards, benchmarks, and baselines. It does not replace the need for auditors, Qualified Security Assessors, Joint Authorization Boards, or other industry regulatory bodies."*.
|
||||
|
||||
Navigate to **Compliance** > **Coverage** and review the overall result for the `ocp4-moderate` and `ocp4-moderate-node` profiles. The results should look something similar to the examples below:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Your cluster should come out compliant with ~65% of the `ocp4-moderate` profile and ~93% of the `ocp4-moderate-node` profile. Not a bad start, let's review an example of an individual result now.
|
||||
|
||||
|
||||
## 5.3 - Review indvidual `Manual` compliance results
|
||||
|
||||
Reviewing the detailed results any checks that are not passing will either be categorised as `Failing` or `Manual`. While we do everthing we can to automate the compliance process there are still a small number of controls you need to manage outside the direct automation of the Compliance Operator.
|
||||
|
||||
Looking at the `ocp4-moderate` results for our `hub` cluster. A good example of a `Manual` check is `ocp4-moderate-accounts-restrict-service-account-tokens`. Let's get an overview of the check, the rationale and our instructions to address it manually by clicking into that check in the list, and opening the **Details** tab. You can jump directly to it with this url: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-accounts-restrict-service-account-tokens?detailsTab=Details
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan result details in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
We can see in this example it's essentially a judgement call. Our instructions are:
|
||||
|
||||
> For each pod in the cluster, review the pod specification and ensure that pods that do not need to explicitly communicate with the API server have `automountServiceAccountToken` configured to `false`.
|
||||
|
||||
Now just because this check is classified as `Manual`, does not mean that we are now all on our own. There are extremely powerful policy engine & policy violation tracking features in RHACS that we can use investigate the status of this check further.
|
||||
|
||||
A default policy is available out of the box called **Pod Service Account Token Automatically Mounted**. By default this policy is in **Inform only** mode, which means deployments that violate this policy will not be prevented by the RHACS admission controller, or scaled down if already running by the RHACS runtime protection. However we can still use this policy as is to inform on the current state of any cluster in our fleet that is secured by RHACS.
|
||||
|
||||
1. First let's navigate to **Platform Configuration** > **Policy Management** in the left hand menu.
|
||||
2. In the Policy list scroll down to find **Pod Service Account Token Automatically Mounted** and click the policy title.
|
||||
3. Have a read of the policy details, then scroll down to review the **Scope exclusions**. You will see Red Hat has already done some work for you to define some standard OpenShift cluster control plane deployments which do need the token mounted and are safely & intentionally excluded from the policy to save you time.
|
||||
4. The policy should already be enabled so let's click on **Violations** in the left hand menu to review any current instances where this policy is currently being violated. You should have one entry in the list for the `kube-rbac-proxy`. This is actually a standard openshift pod in the `openshift-machine-config-operator` namespace, and does actually require the api token mounted, so we could safely add this deployment to our policy exclusions.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Reviewing a policy & policy violations in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
At this point as a platform engineer we have some flexibility about how we handle this particular compliance check, one option would be to switch the **Pod Service Account Token Automatically Mounted** policy to `Inform & enforce` mode, to prevent any future deployments to any cluster in your fleet secured by RHACS from having this common misconfiguration. As a result of implementing this mitigation you could consider adjusting the compliance profile to remove or change the priority of this `Manual` check as desired. Refer to https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-tailor.html
|
||||
|
||||
## 5.4 - Review individual `Failed` compliance results
|
||||
|
||||
For our last task on this exercise let's review a `Failed` check, and apply the corresponding remediation automatically to improve our compliance posture.
|
||||
|
||||
This time, rather than using the RHACS Dashboard we'll review the check result and apply the remediation using our terminal and `oc` cli.
|
||||
|
||||
Let's start by retrieving one of our failed checks:
|
||||
|
||||
```bash
|
||||
oc get ComplianceCheckResult --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
Each `ComplianceCheckResult` represents a result of one compliance rule check. If the rule can be remediated automatically, a `ComplianceRemediation` object with the same name, owned by the `ComplianceCheckResult` is created. Unless requested, the remediations are not applied automatically, which gives an OpenShift Container Platform administrator the opportunity to review what the remediation does and only apply a remediation once it has been verified.
|
||||
|
||||
> Note: Not all `ComplianceCheckResult` objects create `ComplianceRemediation` objects. Only `ComplianceCheckResult` objects that can be remediated automatically do. A `ComplianceCheckResult` object has a related remediation if it is labeled with the `compliance.openshift.io/automated-remediation` label.
|
||||
|
||||
Let's inspect the corresponding `ComplianceRemediation` for this check:
|
||||
|
||||
```bash
|
||||
oc get ComplianceRemediation --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
You should see output similar to the example below. We can see in the `spec:` that it essentially contains a yaml resource patch for our `APIServer` resource named `cluster` to specify `spec.encryption.type` be set to `aescbc`.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ComplianceRemediation
|
||||
metadata:
|
||||
annotations:
|
||||
compliance.openshift.io/xccdf-value-used: var-apiserver-encryption-type
|
||||
labels:
|
||||
compliance.openshift.io/scan-name: ocp4-moderate
|
||||
compliance.openshift.io/suite: daily-nist-800-53-moderate
|
||||
name: ocp4-moderate-api-server-encryption-provider-cipher
|
||||
namespace: openshift-compliance
|
||||
spec:
|
||||
apply: false
|
||||
current:
|
||||
object:
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
encryption:
|
||||
type: aescbc
|
||||
outdated: {}
|
||||
type: Configuration
|
||||
status:
|
||||
applicationState: NotApplied
|
||||
```
|
||||
|
||||
Let's apply this automatic remediation now:
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance patch complianceremediation/ocp4-moderate-api-server-encryption-provider-cipher --patch '{"spec":{"apply":true}}' --type=merge
|
||||
```
|
||||
|
||||
> Note: This remediation has impacts for pods in the `openshift-apiserver` namespace. If you check those pods quickly with an `oc get pods --namespace openshift-apiserver` you will notice a rolling restart underway.
|
||||
|
||||
Now it's time for some instant gratification. Let's bring up this compliance check in our vnc browser tab with the RHACS dashboard open by going to: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-api-server-encryption-provider-cipher?detailsTab=Results
|
||||
|
||||
You will see it currently shows as `Failed`. We can trigger a re-scan with the `oc` command below in our terminal:
|
||||
|
||||
> Note: Due to the api server rolling restart when this remediation was applied you may need to perform a fresh terminal login with `oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true`
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance annotate compliancescans/ocp4-moderate compliance.openshift.io/rescan=
|
||||
```
|
||||
|
||||
Hitting refresh, the check should now report `Pass`, and our overall percentage compliance against the baseline should have also now increased. Congratulations, time to move on to exercise 6 🚀
|
||||
174
data/compliance/exercise6.mdx
Normal file
@ -0,0 +1,174 @@
|
||||
---
|
||||
title: Retrieving raw compliance results
|
||||
exercise: 6
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Need to integrate results with another platform? No problem!"
|
||||
---
|
||||
|
||||
Often organisations will have dedicated software for managing governance, risk and compliance or need to provide results to external auditors. In these situations while the dashboards within Red Hat Advanced Cluster Security, or `ComplianceCheckResult` objects in the OpenShift APIServer are helpful, what we really need to do is integrate these results into our third party compliance management platform or pass results in a standardised format to third parties.
|
||||
|
||||
In this exercise, we'll briefly step through retrieving raw compliance results, in the well known **Asset Reporting Framework** (ARF) format.
|
||||
|
||||
The Asset Reporting Format is a data model to express the transport format of information about assets, and the relationships between assets and reports. The standardized data model facilitates the reporting, correlating, and fusing of asset information throughout and between organizations. ARF is vendor and technology neutral, flexible, and suited for a wide variety of reporting applications.
|
||||
|
||||
For more details on the format specification refer to https://www.nist.gov/publications/specification-asset-reporting-format-11
|
||||
|
||||
|
||||
## 6.1 - Understanding raw result storage
|
||||
|
||||
When the Compliance Operator runs a scan, raw results are stored in a `PersistentVolume`. The following `oc` command shows the mapping `PersistentVolume` name for a given scan name.
|
||||
|
||||
Let's use our scan name that we set up previously, `daily-nist-800-53-moderate`:
|
||||
|
||||
```bash
|
||||
oc get --namespace openshift-compliance compliancesuites daily-nist-800-53-moderate --output json | jq '.status.scanStatuses[].resultsStorage'
|
||||
```
|
||||
|
||||
We should see results showing the name of each `PersistentVolume` for each profile that was scanned, below is an example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "ocp4-moderate",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-master",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-worker",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
```
|
||||
|
||||
We can view the details of these `PersistentVolumes` as follows:
|
||||
|
||||
|
||||
```bash
|
||||
oc get pvc --namespace openshift-compliance ocp4-moderate
|
||||
```
|
||||
|
||||
|
||||
## 6.2 - Retrieving results from a volume
|
||||
|
||||
Let's retrieve some specific results files from a volume by mounting the volume into a pod, and then using `oc` to copy the volume contents to our highside ssh host.
|
||||
|
||||
We can create a pod using the `rhel8/support-tools` additional image that was mirrored into our disconnected environment.
|
||||
|
||||
> Note: Note the use of the pinned sha256 image digest below rather than standard image tags, this is a requirement of the mirroring process.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace openshift-compliance apply --filename -
|
||||
apiVersion: "v1"
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pv-extract
|
||||
spec:
|
||||
containers:
|
||||
- name: pv-extract-pod
|
||||
image: registry.redhat.io/rhel8/support-tools@sha256:ab42416e9e3460f6c6adac4cf09013be6f402810fba452ea95bd717c3ab4076b
|
||||
command: ["sleep", "3000"]
|
||||
volumeMounts:
|
||||
- mountPath: "/ocp4-moderate-scan-results"
|
||||
name: ocp4-moderate-scan-vol
|
||||
volumes:
|
||||
- name: ocp4-moderate-scan-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: ocp4-moderate
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: Spawning a pod that mounts the `PersistentVolume` will keep the claim as `Bound`. If the volume’s storage class in use has permissions set to `ReadWriteOnce`, the volume is only mountable by one pod at a time. You must delete the pod upon completion, or it will not be possible for the Operator to schedule a pod and continue storing results in this location.
|
||||
|
||||
With the volume mounted we can copy the results out to our machine:
|
||||
|
||||
```bash
|
||||
mkdir /mnt/high-side-data/compliance-results
|
||||
oc cp pv-extract:/ocp4-moderate-scan-results --namespace openshift-compliance .
|
||||
```
|
||||
|
||||
After the copy has completed we should delete our helper pod to unbind the volume:
|
||||
|
||||
```bash
|
||||
oc delete pod pv-extract --namespace openshift-compliance
|
||||
```
|
||||
|
||||
|
||||
## 6.3 - Reviewing raw result files
|
||||
|
||||
Now that we have a copy of the raw result files, let's see what they look like.
|
||||
|
||||
Starting with an `ls -lah` in our highside terminal we can see each scan result is stored in a numbered directory, yours should look similar to the example below:
|
||||
|
||||
```bash
|
||||
drwxr-xr-x. 5 lab-user lab-user 42 Sep 1 20:35 .
|
||||
drwxr-xr-x. 7 lab-user lab-user 4.0K Sep 1 20:28 ..
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 0
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 1
|
||||
drwxr-xr-x. 2 lab-user lab-user 6 Sep 1 20:35 lost+found
|
||||
```
|
||||
|
||||
If we take a look at one of the specific directories with `ls -lah compliance-results/1/` we'll see an archive file:
|
||||
|
||||
```bash
|
||||
-rw-r--r--. 1 lab-user lab-user 251K Sep 1 20:35 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
```
|
||||
|
||||
Let's drop into that directory and extract it now to take a look at the contents, run the commands below in your highside ssh terminal:
|
||||
|
||||
> Note: If you get an error from the `bunzip2` command below you may need to first install it with `sudo yum install --yes bzip2`.
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data/compliance-results/1
|
||||
bunzip2 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
mv ocp4-moderate-api-checks-pod.xml.bzip2.out ocp4-moderate-api-checks-pod.xml
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Now we're getting somewhere, we can see we have `.xml` file. Let's take a quick peek at the contents:
|
||||
|
||||
```bash
|
||||
head ocp4-moderate-api-checks-pod.xml
|
||||
```
|
||||
|
||||
You should see an xml document snippet similar to the example below:
|
||||
|
||||
```xml
|
||||
<core:relationships xmlns:arfvocab="http://scap.nist.gov/specifications/arf/vocabulary/relationships/1.0#">
|
||||
<core:relationship type="arfvocab:createdFor" subject="xccdf1">
|
||||
<core:ref>collection1</core:ref>
|
||||
</core:relationship>
|
||||
<core:relationship type="arfvocab:isAbout" subject="xccdf1">
|
||||
<core:ref>asset0</core:ref>
|
||||
</core:relationship>
|
||||
</core:relationships>
|
||||
```
|
||||
|
||||
|
||||
## 6.4 - Generating reports with openscap tooling
|
||||
|
||||
To finish off this exercise let's go one step further and use OpenSCAP tooling to generate an html based report we can open in our vnc Firefox browser.
|
||||
|
||||
Run the commands below in your high side terminal, we'll start by installing the `openscap-scanner` package.
|
||||
|
||||
```bash
|
||||
sudo yum install -y openscap-scanner
|
||||
```
|
||||
|
||||
One the tooling is installed let's generate the report:
|
||||
|
||||
```bash
|
||||
oscap xccdf generate report ocp4-moderate-api-checks-pod.xml > report.html
|
||||
```
|
||||
|
||||
So far we've done all this on our high side terminal. We need to get this report artifact to our low side server where our Firefox vnc session is running, let's copy it out now:
|
||||
|
||||
```bash
|
||||
exit # Return to low side server
|
||||
rsync highside:/mnt/high-side-data/compliance-results/1/report.html /home/lab-user/Downloads/report.html
|
||||
```
|
||||
|
||||
Finally - we can open up our report in our web based Firefox vnc session! Once you've reviewed the report you can move on to exercise 7 🚀
|
||||
76
data/compliance/exercise7.mdx
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
title: Bonus - Making the most of rhacs
|
||||
exercise: 7
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Optional challenge - if you have time"
|
||||
---
|
||||
|
||||
So you've deployed Red Hat Advanced Cluster Security and completed some day one configuration. Now what?? One of the key day two activities for RHACS in a disconnected environment is ensuring you can keep the vulnerability database up to date.
|
||||
|
||||
At a high level, the RHACS **Scanner** component maintains a database of vulnerabilities. When Red Hat Advanced Cluster Security for Kubernetes (RHACS) runs in normal mode, **Central** retrieves the latest vulnerability data from the internet, and Scanner retrieves vulnerability data from Central.
|
||||
|
||||
However, if you are using RHACS in offline mode, **you must manually update the vulnerability data**. To manually update the vulnerability data, you must upload a definitions file to Central, and Scanner then retrieves the vulnerability data from Central.
|
||||
|
||||
In both online and offline mode, Scanner checks for new data from Central every `5` minutes by default. In online mode, Central also checks for new data from the internet approximately every `5-20` minutes.
|
||||
|
||||
The offline data source is updated approximately every 3 hours. After the data has been uploaded to Central, Scanner downloads the data and updates its local vulnerability database.
|
||||
|
||||
|
||||
## 7.1 - Update rhacs definitions with roxctl
|
||||
|
||||
To update the definitions in offline mode, perform the following steps:
|
||||
|
||||
1. Download the definitions.
|
||||
2. Upload the definitions to Central.
|
||||
|
||||
As a challenge, try following the documentation https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode#download-scanner-definitions_enable-offline-mode to perform the update.
|
||||
|
||||
> Note: I suggest exploring `roxctl` CLI as the method for downloading updates in your low side environment. You could then copy both `roxctl` and the definitions update to your high side environment and use `roxtctl` once more (this time with an API token) in order to update the definitions.
|
||||
|
||||
|
||||
## 7.2 - Prioritise security remediation by risk
|
||||
|
||||
Completed your vulnerability definitions update? Awesome! Feel free to explore some of the other features of Red Hat Advanced Cluster Security using your web based vnc session and the RHACS dashboard.
|
||||
|
||||
Let’s take a look at the **Risk** view, where we go beyond the basics of vulnerabilities to understand how deployment configuration and runtime activity impact the likelihood of an exploit occurring and how successful those exploits will be.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Understanding risk exposure in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Risk is also influenced by runtime activity - and Deployments that have activity that could indicate a breach in progress have a red dot on the left. Obviously - the first one in the list should be our first focus.
|
||||
|
||||
The reality of security is that it’s just not possible to tackle all sources of Risk, so organizations end up prioritizing their efforts. We want RHACS to help inform that prioritization.
|
||||
|
||||
As a challange have a go at mirroring and deploying a new additional container image into your disconnected environment repeating steps we completed earlier. Try creating a deployment for that image to bring it up on your cluster, the **Developer** perspective in the OpenShift Web Console can save you some time here.
|
||||
|
||||
Once the container is running, use the RHACS dashboard to check what the deployments risk level is? What are the factors contributing to that?
|
||||
|
||||
|
||||
## 7.3 - Exploring the rhacs policy engine
|
||||
|
||||
Red Hat Advanced Cluster Security for Kubernetes allows you to use out-of-the-box security policies and define custom multi-factor policies for your container environment.
|
||||
|
||||
Configuring these policies enables you to automatically prevent high-risk service deployments in your environment and respond to runtime security incidents.
|
||||
|
||||
All of the policies that ship with the product are designed with the goal of providing targeted remediation that improves security hardening.
|
||||
|
||||
Take some time to reivew the default policies by clicking **Platform Configuration** > **Policy Management**. You’ll see this list contains many **Build** and **Deploy** time policies to catch misconfigurations early in the pipeline, but also **Runtime** policies that point back to specific hardening recommendations.
|
||||
|
||||
These policies come from us at Red Hat - our expertise, our interpretation of industry best practice, and our interpretation of common compliance standards, but you can modify them or create your own.
|
||||
|
||||
If you have some time take a look at the options for editing default policies to change their enforcement behavior or scope.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Policy management in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
If you're ready for a different topic, head over to Exercise 8, for the final tasks today to deploy Red Hat Developer Hub 🙂
|
||||
@ -1,7 +1,7 @@
|
||||
const headerNavLinks = [
|
||||
{ href: '/workshop', title: 'Exercises' },
|
||||
{ href: 'https://docs.openshift.com/container-platform/4.15/welcome/index.html', title: 'Documentation' },
|
||||
{ href: 'https://demo.redhat.com/workshop/s72ya3', title: 'Environment login' }
|
||||
{ href: 'https://docs.openshift.com/container-platform/4.17/welcome/index.html', title: 'Documentation' },
|
||||
{ href: 'https://catalog.demo.redhat.com/workshop/w949gy', title: 'Environment login' }
|
||||
]
|
||||
|
||||
export default headerNavLinks
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
const siteMetadata = {
|
||||
title: 'Red Hat OpenShift Security & Compliance Workshop',
|
||||
title: 'Red Hat OpenShift Security Hackathon',
|
||||
author: 'Red Hat',
|
||||
headerTitle: 'Red Hat',
|
||||
description: 'Red Hat OpenShift Security & Compliance Workshop',
|
||||
description: 'Red Hat OpenShift Security Hackathon',
|
||||
language: 'en-us',
|
||||
siteUrl: 'https://rhdemo.win',
|
||||
siteRepo: 'https://github.com/jmhbnz/workshops',
|
||||
|
||||
@ -1,168 +1,171 @@
|
||||
#+TITLE: Openshift disconnected security & compliance workshop
|
||||
#+DATE: <2024-08-26 Mon>
|
||||
#+TITLE: Openshift security hackathon
|
||||
#+DATE: <2024-09-26 Thu>
|
||||
#+AUTHOR: James Blair
|
||||
|
||||
|
||||
This document captures the steps required to set up an instance of the workshop.
|
||||
|
||||
* Connect to the low side instance
|
||||
* Log in to cluster
|
||||
|
||||
#+begin_src tmux
|
||||
ssh lab-user@3.143.149.146
|
||||
oc login --web https://api.cluster-bcfz8.bcfz8.sandbox1805.opentlc.com:6443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install required tools low side
|
||||
* Update cluster logo
|
||||
|
||||
#+begin_src tmux
|
||||
cd /mnt/low-side-data/
|
||||
curl -L -o oc-mirror.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/oc-mirror.tar.gz
|
||||
tar -xzf oc-mirror.tar.gz
|
||||
rm -f oc-mirror.tar.gz
|
||||
chmod +x oc-mirror
|
||||
sudo cp -v oc-mirror /bin
|
||||
curl -L -o mirror-registry.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/mirror-registry/latest/mirror-registry.tar.gz
|
||||
curl -L -o openshift-install.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.35/openshift-install-linux.tar.gz
|
||||
tar -xzf openshift-install.tar.gz openshift-install
|
||||
rm -f openshift-install.tar.gz
|
||||
curl -L -o oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.14.19/openshift-client-linux.tar.gz
|
||||
tar -xzf oc.tar.gz oc
|
||||
rm -f oc.tar.gz
|
||||
sudo cp -v oc /bin
|
||||
ls -1 /mnt/low-side-data/
|
||||
#+end_src
|
||||
oc create configmap console-custom-logo --from-file=/home/james/Downloads/logo.png -n openshift-config
|
||||
|
||||
|
||||
* Mirror installation content low side
|
||||
|
||||
#+begin_src tmux
|
||||
mkdir -v $HOME/.docker
|
||||
cp -v $HOME/pull-secret-example.json $HOME/.docker/config.json
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
---
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: operator.openshift.io/v1
|
||||
kind: Console
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
customization:
|
||||
customLogoFile:
|
||||
key: logo.png
|
||||
name: console-custom-logo
|
||||
customProductName: ACME Financial Services OpenShift Console
|
||||
perspectives:
|
||||
- id: admin
|
||||
visibility:
|
||||
state: Disabled
|
||||
- id: dev
|
||||
visibility:
|
||||
state: Enabled
|
||||
EOF
|
||||
cd /mnt/low-side-data
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install mirror registry high side
|
||||
* Add an interesting notification banner
|
||||
|
||||
#+begin_src tmux
|
||||
rsync -avP /mnt/low-side-data/mirror-registry.tar.gz highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
cd /mnt/high-side-data
|
||||
tar -xzvf mirror-registry.tar.gz
|
||||
./mirror-registry install --initPassword discopass
|
||||
cat << EOF | oc apply --filename -
|
||||
apiVersion: console.openshift.io/v1
|
||||
kind: ConsoleNotification
|
||||
metadata:
|
||||
name: acme-banner
|
||||
spec:
|
||||
text: ACME Financial Services Production OpenShift
|
||||
location: BannerTop
|
||||
link:
|
||||
href: 'https://www.youtube.com/watch?v=W31e9meX9S4'
|
||||
text: Cluster Security Dashboard
|
||||
color: '#fff'
|
||||
backgroundColor: '#0000FF'
|
||||
EOF
|
||||
#+end_src
|
||||
|
||||
|
||||
* Trust mirror registry high side
|
||||
* Deploy the vulnerable workload
|
||||
|
||||
#+begin_src tmux
|
||||
sudo cp -v $HOME/quay-install/quay-rootCA/rootCA.pem /etc/pki/ca-trust/source/anchors/
|
||||
sudo update-ca-trust
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Transfer mirror content from low to high
|
||||
|
||||
#+begin_src tmux
|
||||
exit
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
ssh highside
|
||||
sudo mv -v /mnt/high-side-data/oc /bin/
|
||||
sudo mv -v /mnt/high-side-data/oc-mirror /bin/
|
||||
sudo mv -v /mnt/high-side-data/openshift-install /bin/
|
||||
cd /mnt/high-side-data
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq1_000000.tar docker://$(hostname):8443
|
||||
#+end_src
|
||||
|
||||
|
||||
* Install openshift high side
|
||||
|
||||
#+begin_src tmux
|
||||
cat << EOF > /mnt/high-side-data/install-config.yaml
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: disco
|
||||
baseDomain: lab
|
||||
compute:
|
||||
- architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: worker
|
||||
replicas: 0
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
replicas: 1
|
||||
platform:
|
||||
aws:
|
||||
type: m5.8xlarge
|
||||
networking:
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.0.0.0/16
|
||||
networkType: OVNKubernetes
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
platform:
|
||||
aws:
|
||||
region: us-east-2
|
||||
subnets:
|
||||
- $(aws ec2 describe-subnets --output json | jq '.Subnets[0].SubnetId' -r)
|
||||
publish: Internal
|
||||
additionalTrustBundlePolicy: Always
|
||||
name: prd-acme-payments
|
||||
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: prd-acme-payments-processor
|
||||
namespace: prd-acme-payments
|
||||
labels:
|
||||
app: payments-processor
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
deployment: prd-acme-payments-processor
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
deployment: prd-acme-payments-processor
|
||||
spec:
|
||||
containers:
|
||||
- name: literally-log4shell
|
||||
image: quay.io/smileyfritz/log4shell-app:v0.5
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: unix-socket
|
||||
mountPath: /var/run/crio/crio.sock
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
dnsPolicy: ClusterFirst
|
||||
securityContext: {}
|
||||
schedulerName: default-scheduler
|
||||
volumes:
|
||||
- name: unix-socket
|
||||
hostPath:
|
||||
path: /var/run/crio/crio.sock
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 25%
|
||||
maxSurge: 25%
|
||||
revisionHistoryLimit: 10
|
||||
progressDeadlineSeconds: 600
|
||||
EOF
|
||||
if ! test -f "/mnt/high-side-data/id_rsa"; then
|
||||
ssh-keygen -C "OpenShift Debug" -N "" -f /mnt/high-side-data/id_rsa
|
||||
fi
|
||||
echo "sshKey: $(cat /mnt/high-side-data/id_rsa.pub)" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
echo "pullSecret: '$(jq -c . $XDG_RUNTIME_DIR/containers/auth.json)'" | tee -a /mnt/high-side-data/install-config.yaml
|
||||
if (test -e /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
then
|
||||
echo -e "\n\n Looks good, go ahead! \n\n"
|
||||
else
|
||||
echo -e "\n\n Uh oh, something is wrong... \n\n"
|
||||
fi
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
imageContentSources:
|
||||
$(grep "mirrors:" -A 2 --no-group-separator /mnt/high-side-data/oc-mirror-workspace/results-*/imageContentSourcePolicy.yaml)
|
||||
EOF
|
||||
tail -22 /mnt/high-side-data/install-config.yaml
|
||||
cat << EOF >> /mnt/high-side-data/install-config.yaml
|
||||
additionalTrustBundle: |
|
||||
$(sed 's/^/ /' /home/lab-user/quay-install/quay-rootCA/rootCA.pem)
|
||||
EOF
|
||||
cat /mnt/high-side-data/install-config.yaml
|
||||
cp -v /mnt/high-side-data/install-config.yaml /mnt/high-side-data/install-config.yaml.backup
|
||||
openshift-install create cluster --dir /mnt/high-side-data
|
||||
|
||||
oc adm policy add-scc-to-user privileged -z default -n prd-acme-payments
|
||||
#+end_src
|
||||
|
||||
|
||||
* Disable default catalog sources high side
|
||||
* Add spicy cluster users
|
||||
|
||||
#+begin_src tmux
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
oc patch OperatorHub cluster --type merge -p '{"spec": {"disableAllDefaultSources": true}}'
|
||||
oc create -f /mnt/high-side-data/oc-mirror-workspace/results-*/catalogSource-cs-redhat-operator-index.yaml
|
||||
# Create the namespace for the exercise
|
||||
oc new-project prd-acme-experimental
|
||||
|
||||
# Retrive existing users htpasswd file
|
||||
oc get secret htpasswd -ojsonpath={.data.htpasswd} -n openshift-config | base64 --decode > ${HOME}/Downloads/users.htpasswd
|
||||
|
||||
# Add additional users
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd specific-enhanced-ocelot admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd upset-benevolent-hacker admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd beaming-aggressive-squid admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd tame-threatening-otter admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd rebuked-placid-engineer admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd expert-invasive-meerkat admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd childish-shifty-caterpillar admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd silent-lively-heron admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd bountiful-soaked-crab admin
|
||||
htpasswd -bB ${HOME}/Downloads/users.htpasswd alienated-proud-snail admin
|
||||
|
||||
# Replace the secret
|
||||
oc create secret generic htpasswd --from-file=htpasswd=${HOME}/Downloads/users.htpasswd --dry-run=client --output yaml --namespace openshift-config | oc replace --filename -
|
||||
sleep 30
|
||||
|
||||
# Login as a specified user
|
||||
oc login --username alienated-proud-snail --password admin
|
||||
oc login --username bountiful-soaked-crab --password admin
|
||||
oc login --username silent-lively-heron --password admin
|
||||
oc login --username childish-shifty-caterpillar --password admin
|
||||
oc login --username expert-invasive-meerkat --password admin
|
||||
oc login --username rebuked-placid-engineer --password admin
|
||||
oc login --username tame-threatening-otter --password admin
|
||||
oc login --username beaming-aggressive-squid --password admin
|
||||
oc login --username upset-benevolent-hacker --password admin
|
||||
oc login --username specific-enhanced-ocelot --password admin
|
||||
|
||||
# Log back in as admin
|
||||
oc login --username admin
|
||||
|
||||
# Grant user permission on project
|
||||
oc adm policy add-role-to-user admin childish-shifty-caterpillar --namespace prd-acme-experimental
|
||||
|
||||
# Delete the namespace as a particular user
|
||||
oc delete project prd-acme-experimental --as childish-shifty-caterpillar
|
||||
#+end_src
|
||||
|
||||
@ -1,40 +1,72 @@
|
||||
---
|
||||
title: Understanding our lab environment
|
||||
title: Understanding our hackathon environment
|
||||
exercise: 1
|
||||
date: '2024-08-22'
|
||||
tags: ['ssh','novnc','workshop','setup']
|
||||
date: '2024-10-14'
|
||||
tags: ['openshift','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's get familiar with our lab setup."
|
||||
summary: "Let's get familiar with our hackathon setup."
|
||||
---
|
||||
|
||||
Welcome to the OpenShift 4 Disconnected security & compliance workshop! Here you'll learn about operating a secure and compliant OpenShift 4 cluster in a disconnected network using the following key OpenShift features:
|
||||
Welcome to the OpenShift 4 security hackathon! Here you'll be able to practice your prowess operating a secure and compliant OpenShift 4 cluster. Exercises will award points for each correct solution.
|
||||
|
||||
- [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes)
|
||||
- [Red Hat OpenShift Compliance Operator](https://www.redhat.com/en/blog/a-guide-to-openshift-compliance-operator-best-practices)
|
||||
You're in a race to reach the highest score before the session concludes! If multiple teams complete all exercises so share points totals a further ranking will be done by elapsed time based on when slack messages are sent.
|
||||
|
||||
To level set, [Red Hat OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift) is a unified platform to build, modernize, and deploy applications at scale. OpenShift supports running in disconnected networks, though this does change the way the cluster operates because key ingredients like container images, operator bundles, and helm charts must be brought into the environment from the outside world via mirroring.
|
||||
|
||||
There are of course many different options for installing OpenShift in a restricted network; this workshop will not cover the deployment of a cluster, instead you will have an existing installed cluster allocated to you which has been created in advance. Your tasks during this workshop will be to improve the security and compliance of the cluster and workloads running on it.
|
||||
|
||||
**Let's get started!**
|
||||
|
||||
|
||||
## 1.1 - Reserve a lab environment
|
||||
## 1.1 - The hackathon scenario
|
||||
|
||||
An OpenShift `4.14` cluster has already been provisioned for you to complete these excercises. To reserve an environment go to [this Google Sheets spreadsheet](https://docs.google.com/spreadsheets/d/1tddgRA6suefTaITyRx87IoRCfCJ7El9Hdr6HB8K7Mvo/edit?usp=sharing). Update your name next to an `Available` environment and change the status to `Allocated`.
|
||||
We're returning to ACME Financial Services, a large bank based in Australia. Thanks to the efforts of the local account team after a long procurement journey Red Hat has landed a massive **$5m AUD** deal including a significant portion of Red Hat Services 🚀.
|
||||
|
||||
Your hackathon team are the post-sales consultants engaging with ACME to improve their OpenShift platform security hardening. The bank have been running OpenShift for a while but the account team have said *"they are basically YOLO'ing it"* from a security perspective. Thankfully you're on site now to help iron things out!
|
||||
|
||||

|
||||
|
||||
|
||||
## 1.2 - Understanding the environment
|
||||
|
||||
For this challenge you'll be given access to the ACME Financial Services OpenShift `4.17` cluster which is not currently operating in a secure and compliant manner. All challenge tasks must be performed on this cluster so your solutions can be graded successfully.
|
||||
|
||||
You can and are encouraged to use any supporting documentation or other resources in order to tackle each of the challenge tasks.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment worksheet* |
|
||||
| *OpenShift cluster console* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.2 - Login via ssh and vnc
|
||||
## 1.2 - Obtain your environment
|
||||
|
||||
To complete the lab exercises you'll use a mix of an `ssh` terminal session for running OpenShift client `oc` commands, and then a browser based vnc session in order to access the OpenShift cluster web console.
|
||||
Working in a small team you will have one shared cluster for team members to share. Your team will have a name allocated already.
|
||||
|
||||
Links to a browser based terminal and vnc session are available in the spreadsheet, along with any credentials required. You are welcome to use your own terminal or vnc software if you prefer.
|
||||
To get underway open your web browser and navigate to this link to allocate an environment for your team https://catalog.demo.redhat.com/workshop/w949gy.
|
||||
|
||||
Register for an environment using the team email address and password provided by your hackathon organisers. Registering with a team email will mean all your team members will be able to see the same cluster details for your shared team cluster.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Hackathon team registration page* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 1.4 - Confirm environment access
|
||||
|
||||
If your team have secured an environment and are ready to start the challenge please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> [team name] have logged into an environment and are starting the challenge!
|
||||
|
||||
The event team will reply in slack to confirm your team has been recorded and start you with a base score of `10` points.
|
||||
|
||||
|
||||
## 1.5 - Hints!
|
||||
|
||||
If you get stuck on a question, fear not, perhaps try a different approach. If you have tried everything you can think of and are still stuck you can unlock a hint for `5` points by posting a message in the `#event-anz-ocp-security-hackathon` channel with the message:
|
||||
|
||||
> [team name] are stuck on [exercise] and are unlocking a hint.
|
||||
|
||||
A hackathon organiser will join your breakout room to share the hint with you 🤫.
|
||||
|
||||
Once you have both a terminal and vnc session working you're ready to get underway with the workshop, please move on to exercise 2 🚀
|
||||
|
||||
@ -1,228 +1,99 @@
|
||||
---
|
||||
title: Mirror required content
|
||||
title: Laying the foundations for cluster security
|
||||
exercise: 2
|
||||
date: '2024-08-23'
|
||||
tags: ['oc-mirror','mirror-registry','openshift','disconnected']
|
||||
date: '2024-10-17'
|
||||
tags: ['openshift','security']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "You want features? Mirror them in!🪞"
|
||||
summary: "Can't have security without a security platform"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation for a private cluster on AWS using the [IPI install method](https://docs.openshift.com/container-platform/4.14/installing/installing_aws/installing-aws-private.html#installing-aws-private), and does not have any post installation features added.
|
||||
It’s your first day of the consulting engagement with ACME. You’ve paired up with one of their Senior Platform Engineers Angie who has just given you a tour of their newly deployed OpenShift cluster which is looking healthy 🥦 (whew!) .
|
||||
|
||||
During this workshop we want to secure the cluster with Red Hat Advanced Cluster Security, understand our compliance posture against [NIST 800-53](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final) with the OpenShift Compliance Operator and then explore some bonus activities like deploying Red Hat Developer Hub.
|
||||
Time to tackle the first task on our consulting engagement list, installing [Red Hat Advanced Cluster Security](https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index) via the operator.
|
||||
|
||||
To install and configure these features we first need to mirror some additional content into our disconnected environment, let's get started.
|
||||
Ultimately the ACME team wants to manage everything with GitOps, but for today Angie would prefer a guided walkthrough on how to do things using the OpenShift Web Console so she has an opportunity to learn more about each step of the process.
|
||||
|
||||

|
||||
|
||||
|
||||
## 2.1 - Installing the rhacs operator
|
||||
|
||||
You’re in front of a screen together with the Web Console open. The first step of installing the operator should be easy, better get started!
|
||||
|
||||
The only requirement Angie has requested for the Advanced Cluster Security operator installation is that all future operator updates must be approved **Manually**. She explains that several platform team members have PTSD from previous upgrades happening automatically and bringing down ACME's EFTPOS platform so now automatic updates are disabled everywhere.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#install-acs-operator_install-central-ocp
|
||||
|
||||
|
||||
## 2.2 - Deploying central services
|
||||
|
||||
With the operator installed and healthy we now need to deploy an instance of **Central** for Angie. This Central instance will provide the management interface, API and secure the full fleet of ACME’s OpenShift clusters along with some EKS clusters ACME are currently running in AWS.
|
||||
|
||||
Angie has shared a high level design with you that states the Central resources need to be deployed to the `prd-acme-rhacs` namespace.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Workshop environment summary* |
|
||||
| *Architecture for Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
After deploying Central ensure you can log in to the web console using the automatically generated credentials.
|
||||
|
||||
## 2.1 - Open a terminal on your low side
|
||||
Documentation you may find helpful is:
|
||||
|
||||
Our first step to prepare to mirror content is to get connected to our low side jump host via `ssh`. You can use the web terminal link in your browser or alternatively your own local terminal with the command below (replacing the placeholder ip with the one you have been allocated).
|
||||
|
||||
```bash
|
||||
ssh lab-user@<ip address>
|
||||
```
|
||||
|
||||
You'll be prompted to enter a password which you can find in your allocated environment details.
|
||||
|
||||
After connecting change directory to the low side workspace where the intial cluster installation was already completed for you and review the folder contents:
|
||||
|
||||
```bash
|
||||
cd /mnt/low-side-data
|
||||
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Your workspace will look similar to the one below:
|
||||
|
||||
```bash
|
||||
[lab-user@jump low-side-data]$ ls -lah
|
||||
total 21G
|
||||
drwxr-xr-x. 4 lab-user lab-user 4.0K Sep 2 12:46 .
|
||||
drwxr-xr-x. 3 root root 27 Aug 31 22:00 ..
|
||||
-rw-r--r--. 1 lab-user lab-user 305 Sep 2 12:38 imageset-config.yaml
|
||||
-rw-r--r--. 1 lab-user lab-user 696M Sep 2 12:37 mirror-registry.tar.gz
|
||||
-rw-r--r--. 1 lab-user lab-user 20G Sep 2 12:46 mirror_seq1_000000.tar
|
||||
-rwxr-xr-x. 1 lab-user lab-user 146M Mar 26 22:17 oc
|
||||
-rwxr-x--x. 1 lab-user lab-user 144M Aug 7 06:30 oc-mirror
|
||||
-rw-------. 1 lab-user lab-user 160K Sep 2 12:41 .oc-mirror.log
|
||||
drwxr-xr-x. 3 lab-user lab-user 17 Sep 2 12:38 oc-mirror-workspace
|
||||
-rwxr-xr-x. 1 lab-user lab-user 631M Aug 7 07:40 openshift-install
|
||||
drwxr-x---. 2 lab-user lab-user 28 Sep 2 12:46 publish
|
||||
```
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#install-central-operator_install-central-ocp
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#verify-central-install-operator_install-central-ocp
|
||||
|
||||
|
||||
## 2.2 - Get familiar with oc-mirror
|
||||
## 2.3 - Generating an init bundle
|
||||
|
||||
To mirror content into our disconnected environment we will be using the [`oc-mirror`](https://github.com/openshift/oc-mirror) openshift client utility.
|
||||
Alright, you've given Angie a quick tour around the Red Hat Advanced Cluster Security Console, now it's time to secure this hub cluster by generating an init bundle named `prd-acme-hub`.
|
||||
|
||||
To configure what content `oc-mirror` will download and mirror for us we use a YAML formatted file called an `ImageSetConfiguration`. This file declares:
|
||||
You remember from the documentation that before you install the `SecuredCluster` resource on a cluster, you must create an init bundle. The cluster that has `SecuredCluster` resource then uses this bundle to authenticate with Central.
|
||||
|
||||
1. **What to download** which can include (OpenShift itself, operator bundles, helm charts, or specific container images)
|
||||
2. **What versions of each item to download**
|
||||
3. **Where to store the downloaded content**
|
||||
|
||||
The `oc-mirror` utility also has some features for listing available content for mirroring, let's try that now! Run the following commands in your ssh terminal:
|
||||
|
||||
```bash
|
||||
# List available openshift release versions
|
||||
oc-mirror list releases
|
||||
|
||||
# List operator catalogs for a specific openshift release
|
||||
oc-mirror list operators --catalogs --version=4.14
|
||||
|
||||
# List all operators in a specific catalogs
|
||||
oc-mirror list operators --catalog registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
```
|
||||
|
||||
Using the built in help have a go at using `oc-mirror` to identify details of a specific operator.
|
||||
|
||||
We can also use the `oc-mirror` utility to understand the state of any existing mirror content bundles. We have a content bundle called `mirror_seq1_000000.tar` available from the initial installation of your OpenShift cluster, let's inspect that now.
|
||||
|
||||
```bash
|
||||
oc-mirror describe mirror_seq1_000000.tar | more
|
||||
```
|
||||
|
||||
This bundle archive was created by the `oc-mirror` utility using the configuration file called `imageset-config.yaml` which is also in the same directory. Let's review that file:
|
||||
|
||||
```bash
|
||||
cat imageset-config.yaml
|
||||
```
|
||||
|
||||
Your file should look something like the example below, we can see the the `4.14.35` version of OpenShift is specified to be downloaded, along with the `registry.redhat.io/rhel8/support-tools` additional standalone container image.
|
||||
|
||||
```yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
```
|
||||
|
||||
|
||||
## 2.3 - Confirm local cache is up to date
|
||||
|
||||
A local cache of content already exists from when the cluster installation was initially performed in advance of this workshop. Let's confirm everything is still up to date by re-running the `oc-mirror` command specifying our configuration file and the location on our disk.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take several minutes to complete but should complete with `No new images detected, process stopping` to confirm the existing cache is up to date.
|
||||
|
||||
|
||||
## 2.4 - Add new mirror content
|
||||
|
||||
For our workshop exercises today we need to mirror some additional operators, namely the **OpenShift Compliance Operator**, **Red Hat Advanced Cluster Security**, and **Red Hat Developer Hub**. Run the command below to update your `imageset-config.yaml` file to match the example below
|
||||
|
||||
```bash
|
||||
cat << EOF > /mnt/low-side-data/imageset-config.yaml
|
||||
kind: ImageSetConfiguration
|
||||
apiVersion: mirror.openshift.io/v1alpha2
|
||||
storageConfig:
|
||||
local:
|
||||
path: ./
|
||||
mirror:
|
||||
platform:
|
||||
channels:
|
||||
- name: stable-4.14
|
||||
type: ocp
|
||||
minVersion: 4.14.35
|
||||
maxVersion: 4.14.35
|
||||
operators:
|
||||
- catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14
|
||||
packages:
|
||||
- name: rhdh
|
||||
channels:
|
||||
- name: fast
|
||||
minVersion: '1.1.1'
|
||||
maxVersion: '1.1.1'
|
||||
- name: compliance-operator
|
||||
channels:
|
||||
- name: stable
|
||||
- name: rhacs-operator
|
||||
channels:
|
||||
- name: stable
|
||||
additionalImages:
|
||||
- name: registry.redhat.io/rhel8/support-tools
|
||||
helm: {}
|
||||
EOF
|
||||
```
|
||||
|
||||
After updating the configuration file we can re-run our `oc-mirror` command to bring the new content into our local collection on disk in `/mnt/low-side-data`.
|
||||
|
||||
```bash
|
||||
oc-mirror --config imageset-config.yaml file:///mnt/low-side-data --verbose 3
|
||||
```
|
||||
|
||||
> Note: This command may take up to 10 minutes to complete depending on connection speeds.
|
||||
|
||||
|
||||
## 2.5 - Mirror updated content to high side registry
|
||||
|
||||
Once the local mirror update has completed we now need to transfer this content to our high side and mirror it from disk into the OpenShift Mirror Registry running in our disconnected high side.
|
||||
|
||||
In this workshop we will use `rsync` to copy our content to our high side system, let's do that now:
|
||||
|
||||
```bash
|
||||
rsync -avP /mnt/low-side-data/ highside:/mnt/high-side-data/
|
||||
```
|
||||
|
||||
> Note: `oc-mirror` creates incremental mirror content files in order to prevent duplicating content. You will notice your low side mirror workspace includes a new file `mirror_seq2_000000.tar` which is significantly smaller than the original mirror archive.
|
||||
|
||||
Once the transfer has completed we need to log into our high side disconnected system and run `oc-mirror` from that side to upload the content from the new archive into our disconnected container registry
|
||||
|
||||
```bash
|
||||
ssh highside
|
||||
```
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data
|
||||
podman login -u init -p discopass $(hostname):8443
|
||||
oc-mirror --from=/mnt/high-side-data/mirror_seq2_000000.tar docker://$(hostname):8443
|
||||
```
|
||||
|
||||
## 2.6 - Verify new operators are available
|
||||
|
||||
After a couple of minutes the mirror process will complete. We then need to tell OpenShift about the new content that is available by running the commands below.
|
||||
|
||||
```bash
|
||||
oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true
|
||||
for file in $(find ./oc-mirror-workspace -type f -name '*.yaml'); do oc apply -f $file; done
|
||||
```
|
||||
|
||||
> Note: In our `oc-mirror-workspace` directory each time we mirror new content a new `results-<id>` directory will be created which may contain `imageContentSourcePolicy.yaml` or `catalogSource-cs-<index>.yaml` files which we need to apply to our cluster to tell it about the new content that is available.
|
||||
|
||||
Once the updates are applied we can then check that our new operators are available in the OpenShift Web Console using our browser based vnc session:
|
||||
|
||||
1. Open your vnc browser tab
|
||||
2. Use the left menu panel, click **Settings** and then select **Remote Resizing** as the scaling mode to improve viewing experience.
|
||||
3. Click **Connect** and when prompted enter the password in your environment spreadsheet row, then click **Send credentials**.
|
||||
4. A Firefox browser window should already be open, you can manually start if using the top left applications menu if needed.
|
||||
5. Click the bookmark toolbar option for **DISCO - OpenShift**.
|
||||
6. Log in when prompted with the username **kubeadmin** and the kubeadmin password listed in your environment spreadsheet (you can also find this password in your highside bastion ssh session by running `cat /mnt/high-side-data/auth/kubeadmin-password`). Note that to paste in the web based vnc session you need to use the left hand panel to pass the clipboard content through to the session.
|
||||
7. Navigate to **Operators** on the left menu, and then click **OperatorHub**, you should see the newly mirrored operators are now available in your disconnected cluster!
|
||||
Angie would prefer to use the **Operator** method for these tasks as she explains having repressed memories of trying to find indentation issues in helm chart templates and never ever wanting to touch helm ever again.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Check disconnected operator hub* |
|
||||
| *Create an init bundle in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
If your mirroring has completed successfully you are ready to move on to exercise 3 and install the three new operators 🎉
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#portal-generate-init-bundle_init-bundle-ocp
|
||||
|
||||
|
||||
## 2.4 - Securing the hub cluster
|
||||
|
||||
The pair session is going well, Angie is impressed how quickly you got to this point. You now have the init bundle downloaded and explain to her that you just need to import it on the cluster and create the `SecuredCluster` resource to finish the process.
|
||||
|
||||
Consulting the high level design she lets you know the init bundle and `SecuredCluster` resources need to be deployed to the `prd-acme-secured` namespace, with the cluster being named `prd-acme-hub` within RHACS.
|
||||
|
||||
Reading further in the design Angie points out that the **Contact Image Scanners** setting should be set to `ScanIfMissing` as this makes the admission control process more secure by ensuring all images are scanned before they can be admitted to the cluster.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/installing/index#installing-sc-operator
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Secured cluster list in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
> **Hint** If your SecuredCluster pods are in the right namespace and are not all starting successfully this can commonly occur because you have missed appending the `:443` to your central endpoint in the `SecuredCluster` resource.
|
||||
|
||||
|
||||
## 2.5 - Check your work
|
||||
|
||||
If your pair session with Angie has finished and the hub cluster is secured please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 2, we have laid the foundations for cluster security.
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||

|
||||
|
||||
@ -1,150 +1,66 @@
|
||||
---
|
||||
title: Install operators on a disconnected cluster
|
||||
title: Encrypting cluster internal network traffic
|
||||
exercise: 3
|
||||
date: '2024-08-27'
|
||||
tags: ['openshift','operators','operator-hub','disconnected']
|
||||
date: '2024-10-18'
|
||||
tags: ['openshift','security','ipsec','encryption']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Operators?!? 🤔 - Think app store for Kubernetes 🌟"
|
||||
summary: "Is OpenShift secure by default?"
|
||||
---
|
||||
|
||||
The disconnected OpenShift cluster you have been allocated is the result of a standard installation using the IPI install method, and does not have any post installation features added.
|
||||
Day one with Angie went great. After a refreshing overnight break spent watching the cinematic masterpiece of Shrek 2 you're back on site with the ACME team for day two of the consulting engagement.
|
||||
|
||||
In a broad sense many OpenShift features are added via [Operators](https://www.redhat.com/en/technologies/cloud-computing/openshift/what-are-openshift-operators). Operators automate the creation, configuration, and management of instances of Kubernetes-native applications. Operators can provide automation at every level of the stack—from managing the parts that make up the platform all the way to applications that are provided as a managed service.
|
||||
Your first task is to address a complaint from Brent in the ACME Security team who has done some initial cluster security checks to get a baseline. Brent is upset that OpenShift internal network traffic is currently un-encrypted and has been ever since their cluster was deployed!
|
||||
|
||||
In the previous exercise we mirrored some new operator bundles into our disconnected network. In this exercise we'll install those operators and explore the features they provide us via [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources) they provide.
|
||||
Brent is pretty annoyed because the Red Hat sales team told him that OpenShift was **"secure by default"** so he wasn't expecting to see internal cluster traffic viewable in plain text between nodes in the cluster as this is a big no-no for the bank 🤬🙅
|
||||
|
||||
> Note: For some trivia, Red Hat created and open sourced the [Operator Framework](https://github.com/operator-framework), then later contributed the project to the Cloud Native Computing Foundation in 2021, ensuring all organisations can benefit from our experience building and supporting operator driven clusters since ~2016.
|
||||
>
|
||||
> 
|
||||
You manage to talk him down by explaining how easily encryption can be turned on and how well OpenShift supports the feature. Whew. You note down to give some feedback to the local sales team to be more careful with the assurances they give.
|
||||
|
||||
You decide to make enabling encryption top of your list for the morning to try and keep Brent happy.
|
||||
|
||||

|
||||
|
||||
|
||||
## 3.1 - Installing compliance operator
|
||||
## 3.1 - Encrypting internal cluster traffic
|
||||
|
||||
First up let's install the [Red Hat OpenShift Compliance Operator](https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-overview.html).
|
||||
With IPsec enabled, you can encrypt internal pod-to-pod cluster traffic on the OVN-Kubernetes cluster network between nodes.
|
||||
|
||||
For some brief context the Compliance Operator assists platform teams by automating the inspection of numerous technical implementations and compares those against certain aspects of industry standards. For our purposes today that industry standard will be **NIST 800-53**.
|
||||
|
||||
The Compliance Operator assesses compliance of both the Kubernetes API resources of OpenShift Container Platform, as well as the nodes running the cluster. The Compliance Operator uses [OpenSCAP](https://www.open-scap.org), a NIST-certified tool, to scan and enforce security policies provided by the content.
|
||||
|
||||
To install the operator we can use either the OpenShift Web Console, or the terminal with `oc` cli. In this workshop we will install the operator with the Web Console using our vnc browser tab. Thanks to our previous exercise mirroring content and making it available via the cluster disconnected OperatorHub catalogs we can enjoy the same user experience to install the operator as if our cluster was fully connected.
|
||||
|
||||
1. Open your vnc browser tab and return to the OpenShift Web Console browser tab you opened in the previous exercise.
|
||||
2. Click on the **Compliance Operator** in **OperatorHub** to open the right hand panel, then click the blue **Install** button at the top of the panel.
|
||||
3. On the install details screen stick with all the default values and simply click **Install**
|
||||
4. After a short wait the Compliance Operator will be installed and ready for use 🎉
|
||||
You confirm the required mode with Angie & Brent as `Full` and then run the `oc patch` command to get the job done after giving Angie a heads up there will be some brief disruption on the cluster while the change is rolled out.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Install OpenShift Compliance Operator* |
|
||||
| *Encryption implications when enabling pod-to-pod IPSec* |
|
||||
</Zoom>
|
||||
|
||||
With the Compliance Operator installed feel free to explore which new Custom Resources the Operator makes available. We'll return to these in future exercises to begin using them.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.openshift.com/container-platform/4.17/networking/network_security/configuring-ipsec-ovn.html
|
||||
|
||||
|
||||
## 3.2 - Installing the rhacs operator
|
||||
## 3.2 - Observing cluster network rollout
|
||||
|
||||
Next up we'll install the [Red Hat Advanced Cluster Security](https://www.redhat.com/en/technologies/cloud-computing/openshift/advanced-cluster-security-kubernetes) Operator.
|
||||
Your change window on the ACME cluster is 30 minutes for the cluster network update. You've advised the ACME team there could be some minor disruption to the cluster while the cluster network operator is progressing the update.
|
||||
|
||||
Red Hat Advanced Cluster Security (RHACS) has direct integration with the Compliance Operator to provide a frontend user experience for running compliance scans along with viewing results.
|
||||
The cluster network update can take around ten minutes to complete. Observe the progress of the operator using the **Administration** > **Cluster Settings** > **Cluster Operators** view.
|
||||
|
||||
To try the alternative operator installation method this time we will install the operator via the `oc` cli in our terminal.
|
||||
|
||||
Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
You can also verify ipsec status using the following command:
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhacs-operator
|
||||
namespace: rhacs-operator
|
||||
spec:
|
||||
channel: stable
|
||||
installPlanApproval: Automatic
|
||||
name: rhacs-operator
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhacs-operator.v4.5.1
|
||||
EOF
|
||||
oc --namespace openshift-ovn-kubernetes rsh ovnkube-node-<XXXXX> ovn-nbctl --no-leader-only get nb_global . ipsec
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Advanced Cluser Security for Kubernetes** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
| *Cluster operators administration* |
|
||||
</Zoom>
|
||||
|
||||
|
||||
## 3.3 - Installing the developer hub operator
|
||||
## 3.3 - Check your work
|
||||
|
||||
The final operator we will install for this workshop relates to [Red Hat Developer Hub](https://developers.redhat.com/rhdh/overview).
|
||||
If you've kept Brent happy by enabling encryption for internal cluster traffic please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
Red Hat Developer Hub is an Internal Developer Portal (IDP) based on the upstream [Backstage](https://backstage.io) project initially created at Spotify. With Red Hat Developer Hub combined with Red Hat OpenShift we can enable platform engineering teams to offer software templates and pre-architected and supported approaches to make life easier for development teams, ease onboarding and reduce friction and frustration.
|
||||
|
||||
We'll also install the Red Hat Developer Hub using the `oc` cli in our terminal. Run the commands below in your terminal session to create the required `Namespace` and `Subscription` resources which will trigger the operator installation.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: rhdh-operator
|
||||
namespace: rhdh-operator
|
||||
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: rhdh
|
||||
namespace: rhdh-operator
|
||||
spec:
|
||||
channel: fast
|
||||
installPlanApproval: Automatic
|
||||
name: rhdh
|
||||
source: cs-redhat-operator-index
|
||||
sourceNamespace: openshift-marketplace
|
||||
startingCSV: rhdh-operator.v1.1.1
|
||||
EOF
|
||||
```
|
||||
|
||||
If you check back on your web console, after a short wait the **Red Hat Developer Hub** operator should now show as `✅ Succeeded`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *List of installed operators* |
|
||||
</Zoom>
|
||||
|
||||
If all three operators are now installed congratulations you are ready to move on to Exercise 4 🎉
|
||||
> Please review [team name] solution for exercise 3, our cluster internal traffic is now encrypted with cipher [cipher].
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||
@ -1,191 +1,55 @@
|
||||
---
|
||||
title: Deploy advanced cluster security
|
||||
title: Securing vulnerable workloads
|
||||
exercise: 4
|
||||
date: '2024-08-31'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
date: '2024-10-19'
|
||||
tags: ['openshift','security','cve management','rhacs']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Time to up our security & compliance game! 🔒"
|
||||
summary: "How do we deal with vulnerable workloads we can't patch?"
|
||||
---
|
||||
|
||||
With our Red Hat Advanced Cluster Security Operator installed and standing by to do some work for us, let's give it some work to do by telling it to deploy Red Hat Advanced Cluster Security onto our cluster.
|
||||
IPSec was a quick job and the cluster is looking good after enabling it. Your afternoon job is to pair up with Angie again and review the vulnerability status of the ACME Financial Services workloads that are deployed on the cluster so far.
|
||||
|
||||
Angie is really keen to tap into your knowledge on what she can do to make to the most of the Red Hat Advanced Cluster Security Platform. This new security insight is something ACME have not really had access to historically for their container workloads.
|
||||
|
||||
You're in a meeting room going over things together, so far so good.
|
||||
|
||||
|
||||
## 4.1 - Getting familiar with rhacs
|
||||
## 4.1 - Ruh roh...
|
||||
|
||||
Before we get into the technical implementation let's take a moment to get up to speed with Red Hat Advanced Cluster Security works.
|
||||
You're looking over the RHACS Dashboard together in the RHACS console.
|
||||
|
||||
Fundamentally you install RHACS as a set of containers in your OpenShift Container Platform or Kubernetes cluster. RHACS includes the following services:
|
||||
You and Angie both spot it at the same time...
|
||||
|
||||
1. **Central** services you install on a designated "hub" cluster. Central installs the Central, Scanner, and Scanner DB services. The Central service provides access to a user interface through a web UI or the RHACS portal. It also handles API interactions and provides persistent storage. Scanner analyzes images for known vulnerabilities. It uses Scanner DB as a cache for vulnerability definitions.
|
||||
2. **Secured cluster** services you install on each cluster you want to secure by RHACS. This installs the Collector, Sensor, and Admission Controller services. Collector collects runtime information on container security and network activity. It then sends data to Sensor, which monitors your Kubernetes cluster for policy detection and enforcement. Admission Controller monitors workloads and prevents users from creating them in RHACS when they violate security policies.
|
||||
The core banking payments processor namespace `prd-acme-payments` is vulnerable to the critical log4shell vulnerability 😱
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security high level architecture* |
|
||||
</Zoom>
|
||||
|
||||
> Note: For an overview of which sources Red Hat Advanced Cluster Security uses for vulnerability information and a more detailed walkthrough of each component, take a moment to review https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html-single/architecture/index.
|
||||

|
||||
|
||||
|
||||
## 4.2 - Deploying central services
|
||||
## 4.2 - What the %$^& do we do????
|
||||
|
||||
Let's now create our **Central** services on our cluster by creating a new `Central` custom resource which our newly installed operator will then manage and deploy on our behalf. We'll deploy these services into a new namespace called `acs-central`.
|
||||
In the minutes following the alarming discovery you observe a series of rushed conversations and Microsoft Skype for Business™ chats between Angie and various security team members, service owners and incident management team members.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc apply --filename -
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: acs-central
|
||||
spec:
|
||||
finalizers:
|
||||
- kubernetes
|
||||
A critical incident has been raised but at this point the consensus is the application simple cannot be turned off. It's a core component of the banks payments processing and must continue running.
|
||||
|
||||
---
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: Central
|
||||
metadata:
|
||||
name: stackrox-central-services
|
||||
namespace: acs-central
|
||||
spec:
|
||||
central:
|
||||
exposure:
|
||||
route:
|
||||
enabled: true
|
||||
egress:
|
||||
connectivityPolicy: Offline
|
||||
EOF
|
||||
```
|
||||
The ACME team now turn to you, seeking advice on how they could secure this existing vulnerable deployment in place, without scaling down the application, so that any attempt at exploiting the vulnerability would be automatically thwarted.
|
||||
|
||||
> Note: The values we used for the `Central` instance are all defaults, aside from `connectivityPolicy: Offline`, which tells Red Hat Advanced Cluster Security it will be operating in a disconnected environment. For more details on how RHACS works in a disconnected environment refer to https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode.
|
||||
The clocks ticking, how will you respond?
|
||||
|
||||
Once the `Central` resource has been created you can check the state of the RHACS pods by running `oc get pods -n acs-central` in your highside terminal. Or navigating to **Workloads** > **Pods** for the `acs-central` project in the OpenShift Web Console.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Red Hat Advanced Cluster Security central pods* |
|
||||
</Zoom>
|
||||
|
||||
Once all pods are `Running` and `Ready` you can move on to the next step.
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/operating/evaluate-security-risks#use-process-baselines_evaluate-security-risks
|
||||
|
||||
|
||||
## 4.3 - Logging into rhacs dashboard
|
||||
## 4.3 - Check your work
|
||||
|
||||
Time to bring up our RHACS dashboard. We'll first retrieve the `admin` user password which was auto generated by the operator and stored in a **Secret**. Then we can open the **Route** for RHACS in a new browser tab and log in.
|
||||
If you've successfully secured the banks vulnerable payments processor please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
1. Return to your vnc session and the open tab with our OpenShift Web Console.
|
||||
2. Click **Workloads** > **Secrets**, ensuring you are looking at the `acs-central` **Project**.
|
||||
3. Click into the `central-htpasswd` **Secret**
|
||||
4. Scroll down and click **Reveal values** on the right hand side.
|
||||
5. Copy the `password` field, we'll need this shortly.
|
||||
6. Navigate to **Networking** > **Routes** in the left hand menu.
|
||||
7. Click on the **Location** URL for the route named `central`.
|
||||
8. Login with the username `admin` and the password you copied earlier.
|
||||
> Please review [team name] solution for exercise 4, our payments processor application is now unhackable.
|
||||
|
||||
> Note: Ironically (given the subject matter), you may receive a tls verification warning when opening the rhacs dashboard. This is expected in this short lived workshop environment (because James is lazy) and should be accepted (Kids please don't do this at home 😂).
|
||||
**WARNING: The hackathon team will perform a brief penetration test of the application. If your application is not actually secured and remains exploitable by the log4shell vulnerability one of your OpenShift cluster nodes will be deleted for the lulz. No pressure!**
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Logging into Red Hat Advanced Cluster Security dashboard* |
|
||||
</Zoom>
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||
|
||||
## 4.4 - Securing our hub cluster
|
||||
|
||||
To begin securing our OpenShift "hub" cluster with RHACS we need to:
|
||||
|
||||
1. Generate an init bundle to download and apply to the cluster.
|
||||
2. Create and apply a `SecuredCluster` custom resource.
|
||||
|
||||
We'll start with generating the init bundle. Just for future familiarity for this step we'll use and follow the official RHACS documentation: https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/installing/installing-rhacs-on-red-hat-openshift#portal-generate-init-bundle_init-bundle-ocp
|
||||
|
||||
Follow the steps in `4.3.1.1` to generate an init bundle named `hub` using the RHACS dashboard, selecting the **Operator** based installation method.
|
||||
|
||||
Once the `hub-Operator-secrets-cluster-init-bundle.yaml` file has been downloaded we'll apply it to the cluster using the OpenShift Web Console **Import YAML** feature.
|
||||
|
||||
1. Create a new project in the Web Console named `acs-securedcluster`.
|
||||
2. Click **Import YAML** in the top right of the OpenShift Web Console.
|
||||
3. Open your **Downloads** directory in the file browser using the **Places** top left menu.
|
||||
4. Open the `hub-Operator-secrets-cluster-init-bundle.yaml` file in a text editor and copy the contents.
|
||||
5. Paste the contents into the **Import YAML** text field and click the blue **Create** button.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Importing an init bundle into our hub cluster* |
|
||||
</Zoom>
|
||||
|
||||
> Note: These init bundles contain secrets enabling a secured cluster to communicate with RHACS Central so it's important to store these securely. For automation purposes you can also generate init bundles with the RHACS API or the `roxctl` CLI, for example `roxctl -e <ACS CONSOLE URL>:443 central init-bundles generate demo-cluster --output-secrets /tmp/demo-cluster.yaml --password <ACS ADMIN PASSWORD>`.
|
||||
|
||||
Once our init bundle has been created we can create our `SecuredCluster` custom resource to complete the cluster onboarding process. We'll do that with our `oc` terminal session.
|
||||
|
||||
Copy the command below and run it in your highside web terminal:
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace acs-securedcluster apply --filename -
|
||||
apiVersion: platform.stackrox.io/v1alpha1
|
||||
kind: SecuredCluster
|
||||
metadata:
|
||||
name: stackrox-secured-cluster-services
|
||||
spec:
|
||||
monitoring:
|
||||
openshift:
|
||||
enabled: true
|
||||
auditLogs:
|
||||
collection: Auto
|
||||
network:
|
||||
policies: Enabled
|
||||
admissionControl:
|
||||
listenOnUpdates: true
|
||||
bypass: BreakGlassAnnotation
|
||||
contactImageScanners: ScanIfMissing
|
||||
listenOnCreates: true
|
||||
replicas: 3
|
||||
timeoutSeconds: 10
|
||||
listenOnEvents: true
|
||||
scannerV4:
|
||||
db:
|
||||
persistence:
|
||||
persistentVolumeClaim:
|
||||
claimName: scanner-v4-db
|
||||
indexer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: Default
|
||||
scanner:
|
||||
analyzer:
|
||||
scaling:
|
||||
autoScaling: Enabled
|
||||
maxReplicas: 5
|
||||
minReplicas: 2
|
||||
replicas: 3
|
||||
scannerComponent: AutoSense
|
||||
perNode:
|
||||
collector:
|
||||
collection: CORE_BPF
|
||||
forceCollection: false
|
||||
imageFlavor: Regular
|
||||
taintToleration: TolerateTaints
|
||||
clusterName: hub
|
||||
centralEndpoint: 'https://central-acs-central.apps.disco.lab:443'
|
||||
EOF
|
||||
```
|
||||
|
||||
After a short wait for pods to initialise in the `acs-securedcluster` namespace you should be able to see the cluster is now secured in RHACS by checking the **Platform Configuration** > **Clusters** overview which should show the `hub` cluster as `✅ Healthy`.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Hub cluster is now secured by Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
If you now have Red Hat Advanced Cluster Security **Central** and **SecuredCluster** components deployed then congratulations your RHACS instance is fully deployed and you're ready to start improving your cluster security and compliance posture in Exercise 5! 🎉
|
||||

|
||||
|
||||
|
||||
@ -1,216 +1,68 @@
|
||||
---
|
||||
title: Running a cluster compliance scan
|
||||
title: Understanding cluster compliance
|
||||
exercise: 5
|
||||
date: '2024-09-01'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
date: '2024-10-23'
|
||||
tags: ['openshift','compliance','nist','rhacs']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Let's check our cluster compliance against NIST 800-53 👀"
|
||||
summary: "Let's apply an industry benchmark!"
|
||||
---
|
||||
|
||||
We've done the work to set the OpenShift Compliance Operator and Red Hat Advanced Cluster Security up on our cluster, now let's make the most of it by using them to schedule and run a compliance scan on our cluster.
|
||||
The first two days of the consulting engagement at ACME have whirled by. You're working remotely today for day three and are pairing up with Melissa from the banks compliance squad.
|
||||
|
||||
For the scan we'll be using the included `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift` and `NIST 800-53 Moderate-Impact Baseline for Red Hat OpenShift - Node level` scan profiles that are included with the OpenShift Compliance Operator.
|
||||
On the agenda today is to harden the `prd-acme-hub` cluster by understanding and remediating compliance against the [NIST 800-53 moderate benchmark](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-53r5.pdf).
|
||||
|
||||
Two scan profiles are required as we need to scan both the OpenShift cluster, as well as each individual node running [RHEL CoreOS](https://docs.openshift.com/container-platform/4.14/architecture/architecture-rhcos.html).
|
||||
The bank must comply with this specific benchmark to meet the requirements of their regulation legislation known as APRA (ACME Penny Regulation Act, 1998).
|
||||
|
||||
For more details on these compliance profiles please take some time to review:
|
||||
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate.html
|
||||
- https://static.open-scap.org/ssg-guides/ssg-ocp4-guide-moderate-node.html
|
||||
- https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-supported-profiles.html
|
||||

|
||||
|
||||
|
||||
## 5.1 - Scheduling a scan
|
||||
## 5.1 - Installing the compliance operator
|
||||
|
||||
There are two methods you can use to schedule Compliance Operator scans:
|
||||
You’re got an upcoming Microsoft Skype for Business™ video call with Melissa in 30 minutes to show her how compliant the cluster is currently.
|
||||
|
||||
1. Creating a `ScanSetting` and `ScanSettingBinding` custom resource. This does not require Red Hat Advanced Cluster Security, and can be easily managed by GitOps, however is not beginner friendly and lacks any graphical frontend to easily explore cluster compliance status. For an overview of this approach please take a few minutes to review https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-scans.html#compliance-operator-scans
|
||||
2. Creating a **Scan Schedule** in Red Hat Advanced Cluster Security. This is the approach we will be using in this workshop as it is the most intuitive option.
|
||||
Time to quickly get the [OpenShift Compliance Operator](https://docs.openshift.com/container-platform/4.17//security/compliance_operator/co-overview.html) installed and run a scan via Red Hat Advanced Cluster Security. Better hurry!
|
||||
|
||||
Complete the steps below to create your scan schedule:
|
||||
As with last time, to limit PTSD induced panic attacks among the ACME platform team the operator must be set to update mode `Manual`.
|
||||
|
||||
1. Return to your browser tab in the vnc session with the Red Hat Advanced Cluster Security dashboard open.
|
||||
2. Navigate to **Compliance** > **Schedules** in the left hand menu.
|
||||
3. Click the blue **Create Scan Schedule** button in the middle of the screen.
|
||||
4. Enter the name `daily-nist-800-53-moderate` and set the **Time** field to `00:00` then click **Next**.
|
||||
5. On the next screen select your `hub` cluster, then click **Next**.
|
||||
6. On the profile screen tick `ocp4-moderate` and `ocp4-moderate-node`, then click **Next**.
|
||||
7. Click **Next** once more on the **Reports** screen and the click **Save**.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/security_and_compliance/compliance-operator#installing-compliance-operator-web-console_compliance-operator-installation
|
||||
|
||||
|
||||
## 5.2 - Scheduling a compliance scan
|
||||
|
||||
Operator installed it's time to join the virtual meeting with Melissa and step her through how to run a compliance scan against NIST 800-53 moderate and visualise results using the Red Hat Advanced Cluster Security Dashboard.
|
||||
|
||||
Create a new scan schedule named `prd-acme-hub-nist-daily` targeting the appropriate benchmarks.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Creating a compliance scan schedule in Red Hat Advanced Cluster Security* |
|
||||
| *Viewing a compliance report in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
After creating the scan schedule results will be shortly available in the RHACS console. While we wait for the automatically triggered initial scan to complete, let's use the `oc` cli to review the `ScanSetting` that was created behind the scenes when we created the **Scan Schedule** in the RHACS dashboard.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
Run the commands below to review your `ScanSetting` resource:
|
||||
|
||||
```bash
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate
|
||||
|
||||
oc get scansetting --namespace openshift-compliance daily-nist-800-53-moderate --output yaml
|
||||
```
|
||||
|
||||
You should see details output similar to the example below. Notice the more advanced settings available in the custom resource including `rawResultsStorage.rotation` and `roles[]` which you may want to customize in your environment.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ScanSetting
|
||||
maxRetryOnTimeout: 3
|
||||
metadata:
|
||||
annotations:
|
||||
owner: stackrox
|
||||
labels:
|
||||
app.kubernetes.io/created-by: sensor
|
||||
app.kubernetes.io/managed-by: sensor
|
||||
app.kubernetes.io/name: stackrox
|
||||
name: daily-nist-800-53-moderate
|
||||
namespace: openshift-compliance
|
||||
rawResultStorage:
|
||||
pvAccessModes:
|
||||
- ReadWriteOnce
|
||||
rotation: 3
|
||||
size: 1Gi
|
||||
roles:
|
||||
- master
|
||||
- worker
|
||||
scanTolerations:
|
||||
- operator: Exists
|
||||
schedule: 0 0 * * *
|
||||
showNotApplicable: false
|
||||
strictNodeScan: false
|
||||
suspend: false
|
||||
timeout: 30m0s
|
||||
```
|
||||
- https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/operating/managing-compliance#scheduling-compliance-scans-and-assessing-profile-compliance
|
||||
|
||||
|
||||
## 5.2 - Review cluster compliance
|
||||
## 5.3 - Remediating a compliance issue
|
||||
|
||||
Once your cluster scan completes return to your vnc browser tab with the Red Hat Advanced Cluster Security Dashboard open. We'll take a look at our overall cluster compliance now against the compliance profile.
|
||||
Scan finished you begin stepping through Melissa the individual results, inspecting `ComplianceCheckResult` and `ComplianceRemediation` resources.
|
||||
|
||||
> Note: Please be aware of the usage disclaimer shown at the top of the screen *"Red Hat Advanced Cluster Security, and its compliance scanning implementations, assists users by automating the inspection of numerous technical implementations that align with certain aspects of industry standards, benchmarks, and baselines. It does not replace the need for auditors, Qualified Security Assessors, Joint Authorization Boards, or other industry regulatory bodies."*.
|
||||
To demonstrate to her how the compliance operator can make automated remediation of compliance issues easy you pick out the `ocp4-moderate-oauth-or-oauthclient-token-maxage` compliance remediation and apply it, then trigger a re-scan from the compliance operator to validate this issue is now remediated on the cluster.
|
||||
|
||||
Navigate to **Compliance** > **Coverage** and review the overall result for the `ocp4-moderate` and `ocp4-moderate-node` profiles. The results should look something similar to the examples below:
|
||||
Documentation you may find helpful is:
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan results in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
Your cluster should come out compliant with ~65% of the `ocp4-moderate` profile and ~93% of the `ocp4-moderate-node` profile. Not a bad start, let's review an example of an individual result now.
|
||||
- https://docs.openshift.com/container-platform/4.17//security/compliance_operator/co-scans/compliance-operator-remediation.html#compliance-applying_compliance-remediation
|
||||
|
||||
|
||||
## 5.3 - Review indvidual `Manual` compliance results
|
||||
## 5.4 - Check your work
|
||||
|
||||
Reviewing the detailed results any checks that are not passing will either be categorised as `Failing` or `Manual`. While we do everthing we can to automate the compliance process there are still a small number of controls you need to manage outside the direct automation of the Compliance Operator.
|
||||
If you've successfully run the compliance scan and remediated the compliance issue to show Melissa how things work please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
Looking at the `ocp4-moderate` results for our `hub` cluster. A good example of a `Manual` check is `ocp4-moderate-accounts-restrict-service-account-tokens`. Let's get an overview of the check, the rationale and our instructions to address it manually by clicking into that check in the list, and opening the **Details** tab. You can jump directly to it with this url: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-accounts-restrict-service-account-tokens?detailsTab=Details
|
||||
> Please review [team name] solution for exercise 5, our cluster is now [percentage] compliant against NIST 800-53 moderate at a cluster level.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Compliance scan result details in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||
We can see in this example it's essentially a judgement call. Our instructions are:
|
||||
|
||||
> For each pod in the cluster, review the pod specification and ensure that pods that do not need to explicitly communicate with the API server have `automountServiceAccountToken` configured to `false`.
|
||||
|
||||
Now just because this check is classified as `Manual`, does not mean that we are now all on our own. There are extremely powerful policy engine & policy violation tracking features in RHACS that we can use investigate the status of this check further.
|
||||
|
||||
A default policy is available out of the box called **Pod Service Account Token Automatically Mounted**. By default this policy is in **Inform only** mode, which means deployments that violate this policy will not be prevented by the RHACS admission controller, or scaled down if already running by the RHACS runtime protection. However we can still use this policy as is to inform on the current state of any cluster in our fleet that is secured by RHACS.
|
||||
|
||||
1. First let's navigate to **Platform Configuration** > **Policy Management** in the left hand menu.
|
||||
2. In the Policy list scroll down to find **Pod Service Account Token Automatically Mounted** and click the policy title.
|
||||
3. Have a read of the policy details, then scroll down to review the **Scope exclusions**. You will see Red Hat has already done some work for you to define some standard OpenShift cluster control plane deployments which do need the token mounted and are safely & intentionally excluded from the policy to save you time.
|
||||
4. The policy should already be enabled so let's click on **Violations** in the left hand menu to review any current instances where this policy is currently being violated. You should have one entry in the list for the `kube-rbac-proxy`. This is actually a standard openshift pod in the `openshift-machine-config-operator` namespace, and does actually require the api token mounted, so we could safely add this deployment to our policy exclusions.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Reviewing a policy & policy violations in Red Hat Advanced Cluster Security* |
|
||||
</Zoom>
|
||||
|
||||
At this point as a platform engineer we have some flexibility about how we handle this particular compliance check, one option would be to switch the **Pod Service Account Token Automatically Mounted** policy to `Inform & enforce` mode, to prevent any future deployments to any cluster in your fleet secured by RHACS from having this common misconfiguration. As a result of implementing this mitigation you could consider adjusting the compliance profile to remove or change the priority of this `Manual` check as desired. Refer to https://docs.openshift.com/container-platform/4.14/security/compliance_operator/co-scans/compliance-operator-tailor.html
|
||||
|
||||
## 5.4 - Review individual `Failed` compliance results
|
||||
|
||||
For our last task on this exercise let's review a `Failed` check, and apply the corresponding remediation automatically to improve our compliance posture.
|
||||
|
||||
This time, rather than using the RHACS Dashboard we'll review the check result and apply the remediation using our terminal and `oc` cli.
|
||||
|
||||
Let's start by retrieving one of our failed checks:
|
||||
|
||||
```bash
|
||||
oc get ComplianceCheckResult --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
Each `ComplianceCheckResult` represents a result of one compliance rule check. If the rule can be remediated automatically, a `ComplianceRemediation` object with the same name, owned by the `ComplianceCheckResult` is created. Unless requested, the remediations are not applied automatically, which gives an OpenShift Container Platform administrator the opportunity to review what the remediation does and only apply a remediation once it has been verified.
|
||||
|
||||
> Note: Not all `ComplianceCheckResult` objects create `ComplianceRemediation` objects. Only `ComplianceCheckResult` objects that can be remediated automatically do. A `ComplianceCheckResult` object has a related remediation if it is labeled with the `compliance.openshift.io/automated-remediation` label.
|
||||
|
||||
Let's inspect the corresponding `ComplianceRemediation` for this check:
|
||||
|
||||
```bash
|
||||
oc get ComplianceRemediation --namespace openshift-compliance ocp4-moderate-api-server-encryption-provider-cipher --output yaml
|
||||
```
|
||||
|
||||
You should see output similar to the example below. We can see in the `spec:` that it essentially contains a yaml resource patch for our `APIServer` resource named `cluster` to specify `spec.encryption.type` be set to `aescbc`.
|
||||
|
||||
```yaml
|
||||
apiVersion: compliance.openshift.io/v1alpha1
|
||||
kind: ComplianceRemediation
|
||||
metadata:
|
||||
annotations:
|
||||
compliance.openshift.io/xccdf-value-used: var-apiserver-encryption-type
|
||||
labels:
|
||||
compliance.openshift.io/scan-name: ocp4-moderate
|
||||
compliance.openshift.io/suite: daily-nist-800-53-moderate
|
||||
name: ocp4-moderate-api-server-encryption-provider-cipher
|
||||
namespace: openshift-compliance
|
||||
spec:
|
||||
apply: false
|
||||
current:
|
||||
object:
|
||||
apiVersion: config.openshift.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
encryption:
|
||||
type: aescbc
|
||||
outdated: {}
|
||||
type: Configuration
|
||||
status:
|
||||
applicationState: NotApplied
|
||||
```
|
||||
|
||||
Let's apply this automatic remediation now:
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance patch complianceremediation/ocp4-moderate-api-server-encryption-provider-cipher --patch '{"spec":{"apply":true}}' --type=merge
|
||||
```
|
||||
|
||||
> Note: This remediation has impacts for pods in the `openshift-apiserver` namespace. If you check those pods quickly with an `oc get pods --namespace openshift-apiserver` you will notice a rolling restart underway.
|
||||
|
||||
Now it's time for some instant gratification. Let's bring up this compliance check in our vnc browser tab with the RHACS dashboard open by going to: https://central-acs-central.apps.disco.lab/main/compliance/coverage/profiles/ocp4-moderate/checks/ocp4-moderate-api-server-encryption-provider-cipher?detailsTab=Results
|
||||
|
||||
You will see it currently shows as `Failed`. We can trigger a re-scan with the `oc` command below in our terminal:
|
||||
|
||||
> Note: Due to the api server rolling restart when this remediation was applied you may need to perform a fresh terminal login with `oc login https://api.disco.lab:6443 --username kubeadmin -p "$(more /mnt/high-side-data/auth/kubeadmin-password)" --insecure-skip-tls-verify=true`
|
||||
|
||||
```bash
|
||||
oc --namespace openshift-compliance annotate compliancescans/ocp4-moderate compliance.openshift.io/rescan=
|
||||
```
|
||||
|
||||
Hitting refresh, the check should now report `Pass`, and our overall percentage compliance against the baseline should have also now increased. Congratulations, time to move on to exercise 6 🚀
|
||||
|
||||
@ -1,174 +1,55 @@
|
||||
---
|
||||
title: Retrieving raw compliance results
|
||||
title: Inspecting audit logs
|
||||
exercise: 6
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','compliance','nist-800-53','scanning']
|
||||
date: '2024-10-31'
|
||||
tags: ['openshift','audit','logging']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Need to integrate results with another platform? No problem!"
|
||||
summary: "Ahh the classic who dunnit!?!??"
|
||||
---
|
||||
|
||||
Often organisations will have dedicated software for managing governance, risk and compliance or need to provide results to external auditors. In these situations while the dashboards within Red Hat Advanced Cluster Security, or `ComplianceCheckResult` objects in the OpenShift APIServer are helpful, what we really need to do is integrate these results into our third party compliance management platform or pass results in a standardised format to third parties.
|
||||
|
||||
In this exercise, we'll briefly step through retrieving raw compliance results, in the well known **Asset Reporting Framework** (ARF) format.
|
||||
You're about to finish up day three of the engagement at ACME and have the lid halfway closed on your ACME provided CrapPhablet7000™ laptop for the day when you hear it. An incoming Skype for Business call 😰
|
||||
|
||||
The Asset Reporting Format is a data model to express the transport format of information about assets, and the relationships between assets and reports. The standardized data model facilitates the reporting, correlating, and fusing of asset information throughout and between organizations. ARF is vendor and technology neutral, flexible, and suited for a wide variety of reporting applications.
|
||||
Here we go...
|
||||
|
||||
For more details on the format specification refer to https://www.nist.gov/publications/specification-asset-reporting-format-11
|
||||
Lifting the lid with a resigned sigh you answer. It's Angie. She's looking aggrieved and in a huff explains that someone has apparently deleted an important company project and she needs to figure out who. She's worried someone has permissions they shouldn't or there is an inside threat actor.
|
||||
|
||||
Fear not you tell Angie, Kubernetes auditing provides a security-relevant, chronological set of records documenting the sequence of actions in a cluster. The cluster audits the activities generated by users, by applications that use the Kubernetes API, and by the control plane itself.
|
||||
|
||||
So we just need to inspect the audit logs and we should be able to find our culprit!
|
||||
|
||||

|
||||
|
||||
|
||||
## 6.1 - Understanding raw result storage
|
||||
## 6.1 - Needle in a haystack
|
||||
|
||||
When the Compliance Operator runs a scan, raw results are stored in a `PersistentVolume`. The following `oc` command shows the mapping `PersistentVolume` name for a given scan name.
|
||||
On the call Angie starts sharing her screen and logging into the ACME Elasticsearch instance to query the audit logs but you interrupt her and explain that the cluster hasn't yet been configured to ship logs to an external aggregator.
|
||||
|
||||
Let's use our scan name that we set up previously, `daily-nist-800-53-moderate`:
|
||||
Despite this, you explain how the internal audit logs can still be queried using the `oc` CLI and fire up your own screen share to step her through how it's done.
|
||||
|
||||
```bash
|
||||
oc get --namespace openshift-compliance compliancesuites daily-nist-800-53-moderate --output json | jq '.status.scanStatuses[].resultsStorage'
|
||||
```
|
||||
The namespace Angie needs to query is `prd-acme-experimental`, can you track down our threat actor??
|
||||
|
||||
We should see results showing the name of each `PersistentVolume` for each profile that was scanned, below is an example:
|
||||
Documentation you may find helpful is:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "ocp4-moderate",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-master",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
{
|
||||
"name": "ocp4-moderate-node-worker",
|
||||
"namespace": "openshift-compliance"
|
||||
}
|
||||
```
|
||||
|
||||
We can view the details of these `PersistentVolumes` as follows:
|
||||
- https://docs.openshift.com/container-platform/4.17/security/audit-log-view.html
|
||||
|
||||
|
||||
```bash
|
||||
oc get pvc --namespace openshift-compliance ocp4-moderate
|
||||
```
|
||||
## 6.2 - Removing the culprit
|
||||
|
||||
With the culprit identified Angie is aghast to discover it was one of her colleagues in the ACME OpenShift Platform team.
|
||||
|
||||
Angie instructs you to remove their platform access immediately so that they can no longer log in to OpenShift while a formal investigation can be initiated to determine why they deleted the sensitive project was deleted.
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
- https://access.redhat.com/solutions/4039941
|
||||
|
||||
|
||||
## 6.2 - Retrieving results from a volume
|
||||
## 6.3 - Check your work
|
||||
|
||||
Let's retrieve some specific results files from a volume by mounting the volume into a pod, and then using `oc` to copy the volume contents to our highside ssh host.
|
||||
If you've successfully identified the culprit and removed their platform access please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
We can create a pod using the `rhel8/support-tools` additional image that was mirrored into our disconnected environment.
|
||||
> Please review [team name] solution for exercise 6, the culprit for the project deletion no longer has access to our OpenShift cluster.
|
||||
|
||||
> Note: Note the use of the pinned sha256 image digest below rather than standard image tags, this is a requirement of the mirroring process.
|
||||
|
||||
```bash
|
||||
cat << EOF | oc --namespace openshift-compliance apply --filename -
|
||||
apiVersion: "v1"
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pv-extract
|
||||
spec:
|
||||
containers:
|
||||
- name: pv-extract-pod
|
||||
image: registry.redhat.io/rhel8/support-tools@sha256:ab42416e9e3460f6c6adac4cf09013be6f402810fba452ea95bd717c3ab4076b
|
||||
command: ["sleep", "3000"]
|
||||
volumeMounts:
|
||||
- mountPath: "/ocp4-moderate-scan-results"
|
||||
name: ocp4-moderate-scan-vol
|
||||
volumes:
|
||||
- name: ocp4-moderate-scan-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: ocp4-moderate
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: Spawning a pod that mounts the `PersistentVolume` will keep the claim as `Bound`. If the volume’s storage class in use has permissions set to `ReadWriteOnce`, the volume is only mountable by one pod at a time. You must delete the pod upon completion, or it will not be possible for the Operator to schedule a pod and continue storing results in this location.
|
||||
|
||||
With the volume mounted we can copy the results out to our machine:
|
||||
|
||||
```bash
|
||||
mkdir /mnt/high-side-data/compliance-results
|
||||
oc cp pv-extract:/ocp4-moderate-scan-results --namespace openshift-compliance .
|
||||
```
|
||||
|
||||
After the copy has completed we should delete our helper pod to unbind the volume:
|
||||
|
||||
```bash
|
||||
oc delete pod pv-extract --namespace openshift-compliance
|
||||
```
|
||||
|
||||
|
||||
## 6.3 - Reviewing raw result files
|
||||
|
||||
Now that we have a copy of the raw result files, let's see what they look like.
|
||||
|
||||
Starting with an `ls -lah` in our highside terminal we can see each scan result is stored in a numbered directory, yours should look similar to the example below:
|
||||
|
||||
```bash
|
||||
drwxr-xr-x. 5 lab-user lab-user 42 Sep 1 20:35 .
|
||||
drwxr-xr-x. 7 lab-user lab-user 4.0K Sep 1 20:28 ..
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 0
|
||||
drwxr-xr-x. 2 lab-user lab-user 52 Sep 1 20:35 1
|
||||
drwxr-xr-x. 2 lab-user lab-user 6 Sep 1 20:35 lost+found
|
||||
```
|
||||
|
||||
If we take a look at one of the specific directories with `ls -lah compliance-results/1/` we'll see an archive file:
|
||||
|
||||
```bash
|
||||
-rw-r--r--. 1 lab-user lab-user 251K Sep 1 20:35 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
```
|
||||
|
||||
Let's drop into that directory and extract it now to take a look at the contents, run the commands below in your highside ssh terminal:
|
||||
|
||||
> Note: If you get an error from the `bunzip2` command below you may need to first install it with `sudo yum install --yes bzip2`.
|
||||
|
||||
```bash
|
||||
cd /mnt/high-side-data/compliance-results/1
|
||||
bunzip2 ocp4-moderate-api-checks-pod.xml.bzip2
|
||||
mv ocp4-moderate-api-checks-pod.xml.bzip2.out ocp4-moderate-api-checks-pod.xml
|
||||
ls -lah
|
||||
```
|
||||
|
||||
Now we're getting somewhere, we can see we have `.xml` file. Let's take a quick peek at the contents:
|
||||
|
||||
```bash
|
||||
head ocp4-moderate-api-checks-pod.xml
|
||||
```
|
||||
|
||||
You should see an xml document snippet similar to the example below:
|
||||
|
||||
```xml
|
||||
<core:relationships xmlns:arfvocab="http://scap.nist.gov/specifications/arf/vocabulary/relationships/1.0#">
|
||||
<core:relationship type="arfvocab:createdFor" subject="xccdf1">
|
||||
<core:ref>collection1</core:ref>
|
||||
</core:relationship>
|
||||
<core:relationship type="arfvocab:isAbout" subject="xccdf1">
|
||||
<core:ref>asset0</core:ref>
|
||||
</core:relationship>
|
||||
</core:relationships>
|
||||
```
|
||||
|
||||
|
||||
## 6.4 - Generating reports with openscap tooling
|
||||
|
||||
To finish off this exercise let's go one step further and use OpenSCAP tooling to generate an html based report we can open in our vnc Firefox browser.
|
||||
|
||||
Run the commands below in your high side terminal, we'll start by installing the `openscap-scanner` package.
|
||||
|
||||
```bash
|
||||
sudo yum install -y openscap-scanner
|
||||
```
|
||||
|
||||
One the tooling is installed let's generate the report:
|
||||
|
||||
```bash
|
||||
oscap xccdf generate report ocp4-moderate-api-checks-pod.xml > report.html
|
||||
```
|
||||
|
||||
So far we've done all this on our high side terminal. We need to get this report artifact to our low side server where our Firefox vnc session is running, let's copy it out now:
|
||||
|
||||
```bash
|
||||
exit # Return to low side server
|
||||
rsync highside:/mnt/high-side-data/compliance-results/1/report.html /home/lab-user/Downloads/report.html
|
||||
```
|
||||
|
||||
Finally - we can open up our report in our web based Firefox vnc session! Once you've reviewed the report you can move on to exercise 7 🚀
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score 🎉
|
||||
|
||||
@ -1,76 +1,62 @@
|
||||
---
|
||||
title: Bonus - Making the most of rhacs
|
||||
title: Bonus challenge - Supply chain shmozzle
|
||||
exercise: 7
|
||||
date: '2024-09-02'
|
||||
tags: ['openshift','rhacs','container','security']
|
||||
date: '2024-11-08'
|
||||
tags: ['openshift','supply chain','rhtas']
|
||||
draft: false
|
||||
authors: ['default']
|
||||
summary: "Optional challenge - if you have time"
|
||||
summary: "Time to sign your life away..."
|
||||
---
|
||||
|
||||
So you've deployed Red Hat Advanced Cluster Security and completed some day one configuration. Now what?? One of the key day two activities for RHACS in a disconnected environment is ensuring you can keep the vulnerability database up to date.
|
||||
|
||||
At a high level, the RHACS **Scanner** component maintains a database of vulnerabilities. When Red Hat Advanced Cluster Security for Kubernetes (RHACS) runs in normal mode, **Central** retrieves the latest vulnerability data from the internet, and Scanner retrieves vulnerability data from Central.
|
||||
Whew - it's the last day of this weeks scheduled engagement 🥱. Tomorrow you're on leave to play the new Factorio Space Age expansion and you can't wait!
|
||||
|
||||
However, if you are using RHACS in offline mode, **you must manually update the vulnerability data**. To manually update the vulnerability data, you must upload a definitions file to Central, and Scanner then retrieves the vulnerability data from Central.
|
||||
Brushing aside thoughts of grandiose factories you review the task list for today. Top of the list is ironically a core component of [software factories](https://www.redhat.com/en/resources/benefits-building-software-factory-with-openshift-overview), addressing a supply chain security requirement from Brent about introducing capability to sign artifacts on premises and store this metadata in a secure tamper proof ledger.
|
||||
|
||||
In both online and offline mode, Scanner checks for new data from Central every `5` minutes by default. In online mode, Central also checks for new data from the internet approximately every `5-20` minutes.
|
||||
As part of the $5m AUD deal the sales team included [Red Hat Trusted Artifact Signer (RHTAS)](https://access.redhat.com/products/red-hat-trusted-artifact-signer) to enhance software supply chain security by simplifying cryptographic signing and verifying of software artifacts, such as container images, binaries, and Git commits.
|
||||
|
||||
The offline data source is updated approximately every 3 hours. After the data has been uploaded to Central, Scanner downloads the data and updates its local vulnerability database.
|
||||
Brent is keen to get this up and running ASAP as the bank have planned to implement this capability for the prior 6 years in various forms, but always been "busy" with other things.
|
||||
|
||||
Nothing to it but to do it!
|
||||
|
||||
|
||||
## 7.1 - Update rhacs definitions with roxctl
|
||||
## 7.1 - Deploy the signing platform
|
||||
|
||||
To update the definitions in offline mode, perform the following steps:
|
||||
Brent's JIRA ticket explains that the signing platform should be deployed to the `prd-acme-rhtas` namespace on the production cluster.
|
||||
|
||||
1. Download the definitions.
|
||||
2. Upload the definitions to Central.
|
||||
|
||||
As a challenge, try following the documentation https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/configuring/enable-offline-mode#download-scanner-definitions_enable-offline-mode to perform the update.
|
||||
|
||||
> Note: I suggest exploring `roxctl` CLI as the method for downloading updates in your low side environment. You could then copy both `roxctl` and the definitions update to your high side environment and use `roxtctl` once more (this time with an API token) in order to update the definitions.
|
||||
|
||||
|
||||
## 7.2 - Prioritise security remediation by risk
|
||||
|
||||
Completed your vulnerability definitions update? Awesome! Feel free to explore some of the other features of Red Hat Advanced Cluster Security using your web based vnc session and the RHACS dashboard.
|
||||
|
||||
Let’s take a look at the **Risk** view, where we go beyond the basics of vulnerabilities to understand how deployment configuration and runtime activity impact the likelihood of an exploit occurring and how successful those exploits will be.
|
||||
> **Note** Teams are free to use any OIDC provider from the options of Red Hat Single Sign-on (SSO), Google, Amazon Secure Token Service (STS), or GitHub. Think carefully which option you pick as this will impact how long it takes to complete the exercise...
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Understanding risk exposure in Red Hat Advanced Cluster Security* |
|
||||
| *Installing the Red Hat Trusted Artifact Signer operator* |
|
||||
</Zoom>
|
||||
|
||||
Risk is also influenced by runtime activity - and Deployments that have activity that could indicate a breach in progress have a red dot on the left. Obviously - the first one in the list should be our first focus.
|
||||
Documentation you may find helpful is:
|
||||
|
||||
The reality of security is that it’s just not possible to tackle all sources of Risk, so organizations end up prioritizing their efforts. We want RHACS to help inform that prioritization.
|
||||
|
||||
As a challange have a go at mirroring and deploying a new additional container image into your disconnected environment repeating steps we completed earlier. Try creating a deployment for that image to bring it up on your cluster, the **Developer** perspective in the OpenShift Web Console can save you some time here.
|
||||
|
||||
Once the container is running, use the RHACS dashboard to check what the deployments risk level is? What are the factors contributing to that?
|
||||
- https://docs.redhat.com/en/documentation/red_hat_trusted_artifact_signer/1/html-single/deployment_guide/index#installing-trusted-artifact-signer-using-the-operator-lifecycle-manager_deploy
|
||||
- https://developers.redhat.com/learning/learn:install-sign-verify-using-red-hat-trusted-artifact-signer/resource/resources:install-and-deploy-red-hat-trusted-artifact-signer
|
||||
|
||||
|
||||
## 7.3 - Exploring the rhacs policy engine
|
||||
## 7.2 - Sign a container image
|
||||
|
||||
Red Hat Advanced Cluster Security for Kubernetes allows you to use out-of-the-box security policies and define custom multi-factor policies for your container environment.
|
||||
|
||||
Configuring these policies enables you to automatically prevent high-risk service deployments in your environment and respond to runtime security incidents.
|
||||
|
||||
All of the policies that ship with the product are designed with the goal of providing targeted remediation that improves security hardening.
|
||||
|
||||
Take some time to reivew the default policies by clicking **Platform Configuration** > **Policy Management**. You’ll see this list contains many **Build** and **Deploy** time policies to catch misconfigurations early in the pipeline, but also **Runtime** policies that point back to specific hardening recommendations.
|
||||
|
||||
These policies come from us at Red Hat - our expertise, our interpretation of industry best practice, and our interpretation of common compliance standards, but you can modify them or create your own.
|
||||
|
||||
If you have some time take a look at the options for editing default policies to change their enforcement behavior or scope.
|
||||
To test the platform out you join a quick call with Brent to walk him through how to sign a local container image with `cosign` and then inspect the hash in the Rekor immutable ledger web interface.
|
||||
|
||||
<Zoom>
|
||||
| |
|
||||
| |
|
||||
|:-----------------------------------------------------------------------------:|
|
||||
| *Policy management in Red Hat Advanced Cluster Security* |
|
||||
| *Searching for a record in Rekor* |
|
||||
</Zoom>
|
||||
|
||||
Documentation you may find helpful is:
|
||||
|
||||
If you're ready for a different topic, head over to Exercise 8, for the final tasks today to deploy Red Hat Developer Hub 🙂
|
||||
- https://docs.redhat.com/en/documentation/red_hat_trusted_artifact_signer/1/html-single/deployment_guide/index#signing-and-verifying-containers-by-using-cosign-from-the-command-line-interface_deploy
|
||||
|
||||
|
||||
## 7.3 - Check your work
|
||||
|
||||
If you've successfully deployed a secure signing platform and showed Brent how it worked please post in `#event-anz-ocp-security-hackathon` with the message:
|
||||
|
||||
> Please review [team name] solution for exercise 7, our Rekor record is [url].
|
||||
|
||||
This exercise is worth `25` points. The event team will reply in slack to confirm your updated team total score. Congratulations if you have reached this point you have completed the entire hackathon! 🎉
|
||||
|
||||
10099
package-lock.json
generated
54
package.json
@ -16,54 +16,54 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@fontsource/inter": "4.5.2",
|
||||
"@next/bundle-analyzer": "^14.2.11",
|
||||
"@next/bundle-analyzer": "^15.3.4",
|
||||
"@tailwindcss/forms": "^0.5.9",
|
||||
"@tailwindcss/typography": "^0.5.15",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"esbuild": "^0.14.0",
|
||||
"autoprefixer": "^10.4.21",
|
||||
"esbuild": "^0.25.5",
|
||||
"github-slugger": "^2.0.0",
|
||||
"gray-matter": "^4.0.3",
|
||||
"image-size": "1.1.1",
|
||||
"mdx-bundler": "^8.0.0",
|
||||
"next": "^14.2.11",
|
||||
"next-themes": "^0.3.0",
|
||||
"postcss": "^8.4.47",
|
||||
"preact": "^10.24.0",
|
||||
"image-size": "1.2.0",
|
||||
"mdx-bundler": "^10.1.1",
|
||||
"next": "^15.3.4",
|
||||
"next-themes": "^0.4.6",
|
||||
"postcss": "^8.5.6",
|
||||
"preact": "^10.26.9",
|
||||
"react": "18.3.1",
|
||||
"react-dom": "18.3.1",
|
||||
"react-medium-image-zoom": "^4.3.5",
|
||||
"reading-time": "1.5.0",
|
||||
"rehype-autolink-headings": "^7.1.0",
|
||||
"rehype-citation": "^2.1.1",
|
||||
"rehype-citation": "^2.3.1",
|
||||
"rehype-katex": "^7.0.1",
|
||||
"rehype-preset-minify": "7.0.0",
|
||||
"rehype-prism-plus": "^2.0.0",
|
||||
"rehype-preset-minify": "7.0.1",
|
||||
"rehype-prism-plus": "^2.0.1",
|
||||
"rehype-slug": "^6.0.0",
|
||||
"remark-footnotes": "^4.0.1",
|
||||
"remark-gfm": "^3.0.1",
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.33.5",
|
||||
"tailwindcss": "^3.4.11",
|
||||
"sharp": "^0.34.2",
|
||||
"tailwindcss": "^3.4.17",
|
||||
"unist-util-visit": "^5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@svgr/webpack": "^8.1.0",
|
||||
"cross-env": "^7.0.3",
|
||||
"dedent": "^1.5.3",
|
||||
"eslint": "^8.0.0",
|
||||
"eslint-config-next": "14.2.11",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-prettier": "^5.2.1",
|
||||
"dedent": "^1.6.0",
|
||||
"eslint": "^9.30.1",
|
||||
"eslint-config-next": "15.3.4",
|
||||
"eslint-config-prettier": "^10.1.5",
|
||||
"eslint-plugin-prettier": "^5.5.1",
|
||||
"file-loader": "^6.2.0",
|
||||
"globby": "14.0.2",
|
||||
"husky": "^9.1.6",
|
||||
"inquirer": "^11.0.1",
|
||||
"lint-staged": "^15.2.10",
|
||||
"globby": "14.1.0",
|
||||
"husky": "^9.1.7",
|
||||
"inquirer": "^12.7.0",
|
||||
"lint-staged": "^16.1.2",
|
||||
"next-remote-watch": "^2.0.0",
|
||||
"prettier": "^3.3.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.6",
|
||||
"socket.io": "^4.7.5",
|
||||
"socket.io-client": "^4.7.5"
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-tailwindcss": "^0.6.13",
|
||||
"socket.io": "^4.8.1",
|
||||
"socket.io-client": "^4.8.1"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.+(js|jsx|ts|tsx)": [
|
||||
|
||||
@ -1,93 +1,83 @@
|
||||
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>Red Hat OpenShift Security & Compliance Workshop</title>
|
||||
<title>Red Hat OpenShift Security Hackathon</title>
|
||||
<link>https://rhdemo.win/workshop</link>
|
||||
<description>Red Hat OpenShift Security & Compliance Workshop</description>
|
||||
<description>Red Hat OpenShift Security Hackathon</description>
|
||||
<language>en-us</language>
|
||||
<managingEditor>jablair@redhat.com (Red Hat)</managingEditor>
|
||||
<webMaster>jablair@redhat.com (Red Hat)</webMaster>
|
||||
<lastBuildDate>Thu, 22 Aug 2024 00:00:00 GMT</lastBuildDate>
|
||||
<lastBuildDate>Mon, 14 Oct 2024 00:00:00 GMT</lastBuildDate>
|
||||
<atom:link href="https://rhdemo.win/feed.xml" rel="self" type="application/rss+xml"/>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise1</guid>
|
||||
<title>Understanding our lab environment</title>
|
||||
<title>Understanding our hackathon environment</title>
|
||||
<link>https://rhdemo.win/workshop/exercise1</link>
|
||||
<description>Let's get familiar with our lab setup.</description>
|
||||
<pubDate>Thu, 22 Aug 2024 00:00:00 GMT</pubDate>
|
||||
<description>Let's get familiar with our hackathon setup.</description>
|
||||
<pubDate>Mon, 14 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>ssh</category><category>novnc</category><category>workshop</category><category>setup</category>
|
||||
<category>openshift</category><category>security</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise2</guid>
|
||||
<title>Mirror required content</title>
|
||||
<title>Laying the foundations for cluster security</title>
|
||||
<link>https://rhdemo.win/workshop/exercise2</link>
|
||||
<description>You want features? Mirror them in!🪞</description>
|
||||
<pubDate>Fri, 23 Aug 2024 00:00:00 GMT</pubDate>
|
||||
<description>Can't have security without a security platform</description>
|
||||
<pubDate>Thu, 17 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>oc-mirror</category><category>mirror-registry</category><category>openshift</category><category>disconnected</category>
|
||||
<category>openshift</category><category>security</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise3</guid>
|
||||
<title>Install operators on a disconnected cluster</title>
|
||||
<title>Encrypting cluster internal network traffic</title>
|
||||
<link>https://rhdemo.win/workshop/exercise3</link>
|
||||
<description>Operators?!? 🤔 - Think app store for Kubernetes 🌟</description>
|
||||
<pubDate>Tue, 27 Aug 2024 00:00:00 GMT</pubDate>
|
||||
<description>Is OpenShift secure by default?</description>
|
||||
<pubDate>Fri, 18 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>operators</category><category>operator-hub</category><category>disconnected</category>
|
||||
<category>openshift</category><category>security</category><category>ipsec</category><category>encryption</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise4</guid>
|
||||
<title>Deploy advanced cluster security</title>
|
||||
<title>Securing vulnerable workloads</title>
|
||||
<link>https://rhdemo.win/workshop/exercise4</link>
|
||||
<description>Time to up our security & compliance game! 🔒</description>
|
||||
<pubDate>Sat, 31 Aug 2024 00:00:00 GMT</pubDate>
|
||||
<description>How do we deal with vulnerable workloads we can't patch?</description>
|
||||
<pubDate>Sat, 19 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>rhacs</category><category>container</category><category>security</category>
|
||||
<category>openshift</category><category>security</category><category>cve management</category><category>rhacs</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise5</guid>
|
||||
<title>Running a cluster compliance scan</title>
|
||||
<title>Understanding cluster compliance</title>
|
||||
<link>https://rhdemo.win/workshop/exercise5</link>
|
||||
<description>Let's check our cluster compliance against NIST 800-53 👀</description>
|
||||
<pubDate>Sun, 01 Sep 2024 00:00:00 GMT</pubDate>
|
||||
<description>Let's apply an industry benchmark!</description>
|
||||
<pubDate>Wed, 23 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>compliance</category><category>nist-800-53</category><category>scanning</category>
|
||||
<category>openshift</category><category>compliance</category><category>nist</category><category>rhacs</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise6</guid>
|
||||
<title>Retrieving raw compliance results</title>
|
||||
<title>Inspecting audit logs</title>
|
||||
<link>https://rhdemo.win/workshop/exercise6</link>
|
||||
<description>Need to integrate results with another platform? No problem!</description>
|
||||
<pubDate>Mon, 02 Sep 2024 00:00:00 GMT</pubDate>
|
||||
<description>Ahh the classic who dunnit!?!??</description>
|
||||
<pubDate>Thu, 31 Oct 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>compliance</category><category>nist-800-53</category><category>scanning</category>
|
||||
<category>openshift</category><category>audit</category><category>logging</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise7</guid>
|
||||
<title>Bonus - Making the most of rhacs</title>
|
||||
<title>Bonus challenge - Supply chain shmozzle</title>
|
||||
<link>https://rhdemo.win/workshop/exercise7</link>
|
||||
<description>Optional challenge - if you have time</description>
|
||||
<pubDate>Mon, 02 Sep 2024 00:00:00 GMT</pubDate>
|
||||
<description>Time to sign your life away...</description>
|
||||
<pubDate>Fri, 08 Nov 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>rhacs</category><category>container</category><category>security</category>
|
||||
</item>
|
||||
|
||||
<item>
|
||||
<guid>https://rhdemo.win/workshop/exercise8</guid>
|
||||
<title>Bonus - Installing red hat developer hub</title>
|
||||
<link>https://rhdemo.win/workshop/exercise8</link>
|
||||
<description>Upping our dx in a disconnected environment</description>
|
||||
<pubDate>Mon, 02 Sep 2024 00:00:00 GMT</pubDate>
|
||||
<author>jablair@redhat.com (Red Hat)</author>
|
||||
<category>openshift</category><category>backstage</category><category>developer-hub</category><category>operator</category>
|
||||
<category>openshift</category><category>supply chain</category><category>rhtas</category>
|
||||
</item>
|
||||
|
||||
</channel>
|
||||
|
||||
BIN
public/static/images/security/acme.png
Normal file
|
After Width: | Height: | Size: 1.6 MiB |
BIN
public/static/images/security/audit-logs.png
Normal file
|
After Width: | Height: | Size: 251 KiB |
BIN
public/static/images/security/brent.png
Normal file
|
After Width: | Height: | Size: 1.7 MiB |
BIN
public/static/images/security/central.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
public/static/images/security/cluster-network.png
Normal file
|
After Width: | Height: | Size: 124 KiB |
BIN
public/static/images/security/cluster.png
Normal file
|
After Width: | Height: | Size: 204 KiB |
BIN
public/static/images/security/completed.png
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
public/static/images/security/hack-prevented.png
Normal file
|
After Width: | Height: | Size: 229 KiB |
BIN
public/static/images/security/init-bundle.png
Normal file
|
After Width: | Height: | Size: 161 KiB |
BIN
public/static/images/security/ipsec.png
Normal file
|
After Width: | Height: | Size: 62 KiB |
BIN
public/static/images/security/meeting.png
Normal file
|
After Width: | Height: | Size: 176 KiB |
BIN
public/static/images/security/pairing.png
Normal file
|
After Width: | Height: | Size: 1.7 MiB |
BIN
public/static/images/security/panik.png
Normal file
|
After Width: | Height: | Size: 1.8 MiB |
BIN
public/static/images/security/rekor.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
public/static/images/security/report.png
Normal file
|
After Width: | Height: | Size: 260 KiB |
BIN
public/static/images/security/rhtas.png
Normal file
|
After Width: | Height: | Size: 304 KiB |
BIN
public/static/images/security/secured-cluster.png
Normal file
|
After Width: | Height: | Size: 160 KiB |
BIN
public/static/images/security/workshop.png
Normal file
|
After Width: | Height: | Size: 725 KiB |