Working progressive migration demo.
This commit is contained in:
@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: payment-processor
|
||||||
|
name: payment-processor
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: payment-processor
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: payment-processor
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: payment-processor
|
||||||
|
image: quay.io/skupper/patient-portal-payment-processor
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: payment-processor
|
||||||
|
name: payment-processor
|
||||||
|
namespace: demo-onprem
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
selector:
|
||||||
|
app: payment-processor
|
||||||
|
sessionAffinity: None
|
||||||
|
type: ClusterIP
|
||||||
@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: database
|
||||||
|
name: database
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: database
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: database
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: database
|
||||||
|
image: quay.io/skupper/patient-portal-database
|
||||||
|
ports:
|
||||||
|
- containerPort: 5432
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: database
|
||||||
|
name: database
|
||||||
|
namespace: demo-onprem
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 5432
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 5432
|
||||||
|
selector:
|
||||||
|
app: database
|
||||||
|
sessionAffinity: None
|
||||||
|
type: ClusterIP
|
||||||
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: frontend
|
||||||
|
name: frontend
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: frontend
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: frontend
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: frontend
|
||||||
|
image: quay.io/skupper/patient-portal-frontend
|
||||||
|
env:
|
||||||
|
- name: DATABASE_SERVICE_HOST
|
||||||
|
value: database
|
||||||
|
- name: DATABASE_SERVICE_PORT
|
||||||
|
value: "5432"
|
||||||
|
- name: PAYMENT_PROCESSOR_SERVICE_HOST
|
||||||
|
value: payment-processor
|
||||||
|
- name: PAYMENT_PROCESSOR_SERVICE_PORT
|
||||||
|
value: "8080"
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: frontend
|
||||||
|
name: frontend
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 8080
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
selector:
|
||||||
|
app: frontend
|
||||||
|
type: ClusterIP
|
||||||
@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lRU1Q3UXlDNjBDaTcxSTBZYmlPWU1QREFOQmdrcWhraUc5dzBCQVFzRkFEQWEKTVJnd0ZnWURWUVFERXc5emEzVndjR1Z5TFhOcGRHVXRZMkV3SGhjTk1qTXdNakl4TURnME9UQTJXaGNOTWpndwpNakl3TURnME9UQTJXakFhTVJnd0ZnWURWUVFERXc5emEzVndjR1Z5TFhOcGRHVXRZMkV3Z2dFaU1BMEdDU3FHClNJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUNsZDVIdXNmY1VVRkQwV3ROS3BIU0dnRmk1T2dkdSt1em8KMHQwQllRcnBHYVJHVlhUenZLR1RMVmVBbnZGWTJ0QjJEQkoyd2xEVFNjL1Q3YUFDaFhXTnhIcEdyU1c3TmZ6dwpLenNPWnNLa1k1TEtHbC9NTGhlRWU2UnZoUnlYTXBGRXAxQUpuQlRvWmhwYUJncmFWdlZOUGxBK3krNjRrQ0tiCmhjQXZQdjd3OTFHTE8zZ0ZlbWlpbWdxOGw2alRKOGJiMlVGVG1EZUczUkZOd01VL2pPOGN6WGRnMXNNbE5TM2sKSDRHQmFTcVVCTTN4TkRqcTRDaWozcHRBRFhNbGhpalNxSldlSEF1SnBSakRoM1E1eWlRb0c0QjJxZ0Y5dUVqQQpaQzJiNFRuQVBwWG5SYnc5ZnVzTVNjc2Flb0hvTzgwODJQTmR2VUJqUGFzYzE0dk9zRDZwQWdNQkFBR2piakJzCk1BNEdBMVVkRHdFQi93UUVBd0lDcERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjREFRWUlLd1lCQlFVSEF3SXcKRHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVNkwyTmhBVktNOWY0WDZOOFNNbkExTTlFazJFdwpDd1lEVlIwUkJBUXdBb0lBTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUUJhZ0g1bDkwVjNFam51Z3JrNWIwCldiVmRReFNMSVZ5WDBPcStYRVNvL0RGOElFUlFBdEVHMytqRS9uZlFLM1lVZk1PTzBNUllVN1p4RjNPeVVURVYKRVZ0UCtNL3B5UjZHVnFmYlVvSTYrcTFqMEloUHdEYnVROVl1cGdIU1Vkb2k3dUVyMmFoK2tyWU8zT1RhVDNjSgpyUG5DRndwTTBDcGhwRW5mRjhRcGdDVDI2TURzYzNBYjRFUnpRUVFubFpSanAvTVV6REZqVEtOVklnTUhEOWNtCjlUOFIvTlhpQ2kzT2xJTlZuN05wVkNnSUw5ZXVLTGpKb1o1NFVvZklnWVlKQWpYUGx5Ym9EcDRtZ2l0STNzNUsKaU1mcHYxR3dMVXlyODRnVzM1THFGeHh5aE00Qmw5M2ZkTndzSGliSWYrY2NSNDE0UDhxUm9teEJQdFNLYU9aZQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||||
|
password: SDJrMFdDamU4Z1JuZVoyaE5ySlV0ZTg2
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
skupper.io/generated-by: 154bd5bc-6d1c-4f38-8805-d30c848fb948
|
||||||
|
skupper.io/site-version: 1.2.3
|
||||||
|
skupper.io/url: https://claims-demo-public.apps.rosa-mgmwm.c4s2.p1.openshiftapps.com:443/be666c93-b1c9-11ed-83a2-f44637808126
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
skupper.io/type: token-claim
|
||||||
|
name: be666c93-b1c9-11ed-83a2-f44637808126
|
||||||
@ -17,7 +17,7 @@ For our first demo we will highlight the possibility of progressive migrations,
|
|||||||
|
|
||||||
** Install skupper cli
|
** Install skupper cli
|
||||||
|
|
||||||
The skupper command-line tool is the primary entrypoint for installing and configuring the Skupper infrastructure. You need to install the skupper command only once for each development environment.
|
The skupper command-line tool is the primary entrypoint for installing and configuring the Skupper infrastructure. You need to install the skupper cli only once for each development environment.
|
||||||
|
|
||||||
We can use the provided install script to install skupper:
|
We can use the provided install script to install skupper:
|
||||||
|
|
||||||
@ -29,16 +29,9 @@ curl https://skupper.io/install.sh | sh && skupper version
|
|||||||
|
|
||||||
** Deploy demo workload on premises
|
** Deploy demo workload on premises
|
||||||
|
|
||||||
|
Before we get into deploying skupper lets get familiar with our demo workload which is a traditional three tier container based application for a medical clinic consisting of postgres database, java backend service and web frontend.
|
||||||
|
|
||||||
|
#+NAME: Deploy demo workload on premises
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
** Initialise skupper on premises
|
|
||||||
|
|
||||||
Once we have skupper client installed lets initialise skupper in the kubernetes cluster running on our local machine, this will be our "private" / "on premise" cluster for the purposes of the demo.
|
|
||||||
|
|
||||||
#+NAME: Initialise skupper on local cluster
|
|
||||||
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
# Set kubeconfig
|
# Set kubeconfig
|
||||||
export KUBECONFIG=$HOME/.kube/config
|
export KUBECONFIG=$HOME/.kube/config
|
||||||
@ -47,7 +40,30 @@ export KUBECONFIG=$HOME/.kube/config
|
|||||||
kubectl create namespace demo-onprem --dry-run=client -o yaml | kubectl apply -f -
|
kubectl create namespace demo-onprem --dry-run=client -o yaml | kubectl apply -f -
|
||||||
kubectl config set-context --current --namespace demo-onprem
|
kubectl config set-context --current --namespace demo-onprem
|
||||||
|
|
||||||
# Initialise skupper
|
# Create deployments and services
|
||||||
|
kubectl create -f 1-progressive-migration/database.yaml
|
||||||
|
kubectl rollout status deployment/database
|
||||||
|
|
||||||
|
kubectl create -f 1-progressive-migration/backend.yaml
|
||||||
|
kubectl rollout status deployment/payment-processor
|
||||||
|
|
||||||
|
kubectl create -f 1-progressive-migration/frontend.yaml
|
||||||
|
kubectl rollout status deployment/frontend
|
||||||
|
|
||||||
|
# Launch application in browser
|
||||||
|
flatpak run org.chromium.Chromium --new-window "http://localhost:8080"
|
||||||
|
|
||||||
|
# Start port forward
|
||||||
|
kubectl port-forward --pod-running-timeout=10s deployment/frontend 8080 &
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
** Initialise skupper on premises
|
||||||
|
|
||||||
|
Once we have skupper client installed and a workload running lets initialise skupper in the kubernetes cluster running on our local machine, this will be our "private" / "on premise" cluster for the purposes of the demo.
|
||||||
|
|
||||||
|
#+NAME: Initialise skupper on local cluster
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
skupper init && skupper status
|
skupper init && skupper status
|
||||||
#+end_src
|
#+end_src
|
||||||
|
|
||||||
@ -67,7 +83,78 @@ flatpak run org.chromium.Chromium --new-window "https://admin:${password}@${cons
|
|||||||
#+end_src
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
** Initialise skupper in public cluster
|
||||||
|
|
||||||
|
So we've been tasked with migrating this application to public cloud, rather than doing a big bang migration lets use skupper to perform a progressive migration. Our first step is to setup skupper in our public cloud cluster.
|
||||||
|
|
||||||
|
#+NAME: Initialise
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
|
# Ensure namespace exists
|
||||||
|
kubectl --kubeconfig=$HOME/.kube/rosa create namespace demo-public --dry-run=client -o yaml | kubectl --kubeconfig=$HOME/.kube/rosa apply -f -
|
||||||
|
|
||||||
|
# Initialise skupper
|
||||||
|
skupper --kubeconfig=$HOME/.kube/rosa --namespace demo-public init
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
** Link public and private clusters
|
||||||
|
|
||||||
* Demo two - high availability
|
Creating a link requires use of two skupper commands in conjunction, ~skupper token create~ and ~skupper link create~.
|
||||||
|
|
||||||
|
The skupper token create command generates a secret token that signifies permission to create a link. The token also carries the link details. Then, in a remote namespace, The ~skupper link create~ command uses the token to create a link to the namespace that generated it.
|
||||||
|
|
||||||
|
First, use ~skupper token create~ in one namespace to generate the token. Then, use ~skupper link create~ in the other to create a link.
|
||||||
|
|
||||||
|
#+NAME: Establish link between clusters
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
|
# Create the token on public
|
||||||
|
skupper --kubeconfig=$HOME/.kube/rosa --namespace demo-public token create 1-progressive-migration/secret.token
|
||||||
|
|
||||||
|
# Initiate the link from private
|
||||||
|
skupper link create --name "van" 1-progressive-migration/secret.token
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
Now that we have linked our clusters lets review the skupper interface to confirm that new link is present.
|
||||||
|
|
||||||
|
#+NAME: Review skupper console
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
|
# Open skupper console
|
||||||
|
flatpak run org.chromium.Chromium --new-window "https://admin:${password}@${console}:8080"
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
** Expose backend service to public cluster
|
||||||
|
|
||||||
|
With a virtual application network in place lets use it to expose our backend service to our public cluster.
|
||||||
|
|
||||||
|
#+NAME: Expose payments-processor service
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
|
# Show list of services on public cluster
|
||||||
|
kubectl get svc --kubeconfig $HOME/.kube/rosa --namespace demo-public
|
||||||
|
|
||||||
|
# Expose the services to the skupper network
|
||||||
|
skupper expose deployment/payment-processor --port 8080
|
||||||
|
skupper expose deployment/database --port 5432
|
||||||
|
|
||||||
|
# Show list of services after expose
|
||||||
|
kubectl get svc --kubeconfig $HOME/.kube/rosa --namespace demo-public
|
||||||
|
|
||||||
|
# Describe the new service
|
||||||
|
kubectl describe svc --kubeconfig $HOME/.kube/rosa --namespace demo-public payment-processor
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|
||||||
|
** Migrate frontend to public cluster
|
||||||
|
|
||||||
|
Our backend service is now available in our public cluster thanks to our skupper virtual application network so lets proceed with our cloud migration for our frontend.
|
||||||
|
|
||||||
|
#+NAME: Migrate frontend to the public cluster
|
||||||
|
#+begin_src tmate :socket /tmp/james.tmate.tmate
|
||||||
|
# Deploy a fresh set of frontend replicas on public cluster
|
||||||
|
kubectl --kubeconfig $HOME/.kube/rosa --namespace demo-public create -f 1-progressive-migration/frontend.yaml
|
||||||
|
kubectl --kubeconfig $HOME/.kube/rosa --namespace demo-public rollout status deployment/frontend
|
||||||
|
|
||||||
|
# Tear down the old frontend on premises
|
||||||
|
kubectl delete -f 1-progressive-migration/frontend.yaml
|
||||||
|
#+end_src
|
||||||
|
|||||||
Reference in New Issue
Block a user