Remove defunct ssh key instructions and reference kubeconfigs.
This commit is contained in:
		@ -63,16 +63,6 @@ Open the [[https://console.redhat.com/openshift/create/local][Console]] and clic
 | 
			
		||||
Once the file downloads ensure it is copied or moved to the directory you will be running the remaining commands on this guide from.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
** 3.3 Create ssh key
 | 
			
		||||
 | 
			
		||||
For access to our soon to be created cluster nodes we need an ssh key, let's generate those now via ~ssh-keygen~.
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
ssh-keygen -t rsa -b 4096 -f ~/.ssh/hubkey -q -N "" <<< y
 | 
			
		||||
ssh-keygen -t rsa -b 4096 -f ~/.ssh/snokey -q -N "" <<< y
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
** 3.3 Initiate the hub cluster install
 | 
			
		||||
 | 
			
		||||
Once our install tooling is available let's kick off the installation of our hub cluster by creating a configuration file and then running ~openshift-install~.
 | 
			
		||||
@ -179,9 +169,9 @@ To make use of the Red Hat Advanced Cluster Management Observability feature we
 | 
			
		||||
Let's get started by creating an ~OperatorGroup~ and ~Subscription~ which will install the operator.
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
oc create namespace open-cluster-management
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig create namespace open-cluster-management
 | 
			
		||||
 | 
			
		||||
cat << EOF | oc apply --filename -
 | 
			
		||||
cat << EOF | oc --kubeconfig hub/auth/kubeconfig apply --filename -
 | 
			
		||||
apiVersion: operators.coreos.com/v1
 | 
			
		||||
kind: OperatorGroup
 | 
			
		||||
metadata:
 | 
			
		||||
@ -212,7 +202,7 @@ Once the operator is installed we can create the ~MultiClusterHub~ resource to i
 | 
			
		||||
Note: It can take up to ten minutes for this to complete.
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
cat << EOF | oc apply --filename -
 | 
			
		||||
cat << EOF | oc --kubeconfig hub/auth/kubeconfig apply --filename -
 | 
			
		||||
apiVersion: operator.open-cluster-management.io/v1
 | 
			
		||||
kind: MultiClusterHub
 | 
			
		||||
metadata:
 | 
			
		||||
@ -230,17 +220,17 @@ Now, with our clusters deployed and acm installed we can enable the observabilit
 | 
			
		||||
Our first step towards this is to create two secrets.
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
oc create namespace open-cluster-management-observability
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig create namespace open-cluster-management-observability
 | 
			
		||||
 | 
			
		||||
DOCKER_CONFIG_JSON=`oc extract secret/pull-secret -n openshift-config --to=-`
 | 
			
		||||
DOCKER_CONFIG_JSON=`oc --kubeconfig hub/auth/kubeconfig extract secret/pull-secret -n openshift-config --to=-`
 | 
			
		||||
 | 
			
		||||
oc create secret generic multiclusterhub-operator-pull-secret \
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig create secret generic multiclusterhub-operator-pull-secret \
 | 
			
		||||
    -n open-cluster-management-observability \
 | 
			
		||||
    --from-literal=.dockerconfigjson="$DOCKER_CONFIG_JSON" \
 | 
			
		||||
    --type=kubernetes.io/dockerconfigjson
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cat << EOF | oc apply --filename -
 | 
			
		||||
cat << EOF | oc --kubeconfig hub/auth/kubeconfig apply --filename -
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Secret
 | 
			
		||||
metadata:
 | 
			
		||||
@ -263,7 +253,7 @@ EOF
 | 
			
		||||
Once the two required secrets exist we can create the ~MultiClusterObservability~ resource as follows:
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
cat << EOF | oc apply --filename -
 | 
			
		||||
cat << EOF | oc --kubeconfig hub/auth/kubeconfig apply --filename -
 | 
			
		||||
apiVersion: observability.open-cluster-management.io/v1beta2
 | 
			
		||||
kind: MultiClusterObservability
 | 
			
		||||
metadata:
 | 
			
		||||
@ -280,19 +270,19 @@ EOF
 | 
			
		||||
After creating the resource and waiting briefyl we can access the grafana console via the ~Route~ to confirm everything is running:
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
echo "https://$(oc get route -n open-cluster-management-observability grafana -o jsonpath={.spec.host})"
 | 
			
		||||
echo "https://$(oc --kubeconfig hub/auth/kubeconfig get route -n open-cluster-management-observability grafana -o jsonpath={.spec.host})"
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
* 6 - Import the single node openshift cluster into acm
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
oc new-project sno
 | 
			
		||||
oc label namespace sno cluster.open-cluster-management.io/managedCluster=sno
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig new-project sno
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig label namespace sno cluster.open-cluster-management.io/managedCluster=sno
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
cat << EOF | oc apply --filename -
 | 
			
		||||
cat << EOF | oc --kubeconfig hub/auth/kubeconfig apply --filename -
 | 
			
		||||
apiVersion: cluster.open-cluster-management.io/v1
 | 
			
		||||
kind: ManagedCluster
 | 
			
		||||
metadata:
 | 
			
		||||
@ -330,14 +320,14 @@ The ManagedCluster-Import-Controller will generate a secret named ~sno-import~.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
oc get secret sno-import -n sno -o jsonpath={.data.crds\\.yaml} | base64 --decode > klusterlet-crd.yaml
 | 
			
		||||
oc get secret sno-import -n sno -o jsonpath={.data.import\\.yaml} | base64 --decode > import.yaml
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig get secret sno-import -n sno -o jsonpath={.data.crds\\.yaml} | base64 --decode > klusterlet-crd.yaml
 | 
			
		||||
oc --kubeconfig hub/auth/kubeconfig get secret sno-import -n sno -o jsonpath={.data.import\\.yaml} | base64 --decode > import.yaml
 | 
			
		||||
 | 
			
		||||
oc --kubeconfig sno/auth/kubeconfig apply --filename klusterlet-crd.yaml
 | 
			
		||||
oc --kubeconfig sno/auth/kubeconfig apply --filename import.yaml
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
If everything works fine you should see JOINED and AVAILABLE sno cluster from within your hub cluster
 | 
			
		||||
If everything works fine you should see ~JOINED~ and ~AVAILABLE~ sno cluster from within your hub cluster.
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
❯ kubectl get managedcluster -n sno
 | 
			
		||||
@ -346,11 +336,14 @@ local-cluster   true           https://api.hub.<yourdomain>.com:6443
 | 
			
		||||
sno             true           https://api.cluster-vzmvz.<yourdomain>.com:6443                True     True        31m
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
* 7 - Creating the edge workload on SNO
 | 
			
		||||
For edge scenarios we only send metrics to the hub cluster if certain thresholds are hit for a certain period of time (here 70% for more than 2 minutes - you can see this configuration in the open-cluster-management-addon-observability namespace under ConfigMaps observability-metrics-allowlist in the collect_rules section under SNOHighCPUUsage).
 | 
			
		||||
* 7 - Creating the edge workload
 | 
			
		||||
 | 
			
		||||
For edge scenarios we only send metrics to the hub cluster if certain thresholds are hit for a certain period of time (here ~70%~ cpu for more than 2 minutes) - you can see this configuration in the ~open-cluster-management-addon-observability~ namespace under ConfigMaps observability-metrics-allowlist in the collect_rules section under SNOHighCPUUsage).
 | 
			
		||||
 | 
			
		||||
In order to hit that trigger we now deploy a cpu-heavy workload in order for sno-cluster metrics being sent to the ACM hub cluster.
 | 
			
		||||
 | 
			
		||||
Let's get started by creating a new project on the sno cluster:
 | 
			
		||||
 | 
			
		||||
#+begin_src tmux
 | 
			
		||||
oc new-project cpu-load-test
 | 
			
		||||
#+end_src
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user