Skip to content

GCLOUD CLI

How to create some resources with gcloud cli
Cloud Shell direct URL: https://shell.cloud.google.com/?show=terminal

Install gcloud CLI - Linux
  # More options : https://cloud.google.com/sdk/docs/install#linux

  curl https://sdk.cloud.google.com | bash

  # List installed components in Cloud SDK
  gcloud components list

  # Enable compute Engine API
  gcloud services enable compute.googleapis.com

Log In & Authentification

Authenfication
      gcloud auth login
      # to genrate browser link and manually open it
      gcloud auth login --no-launch-browser

      # SA file
      gcloud auth activate-service-account --key-file credentials.json

      # Switch back to default user
      gcloud config configurations activate default
      # Switch to existing user2
      gcloud config configurations activate user2

IAM & SA

Code
  # Get token
  gcloud auth print-identity-token

  gcloud iam roles list | grep "name:"
  gcloud iam roles describe roles/compute.instanceAdmin

  ############### Mange access/role for user2

  export USERID2=student-01-10f77b9af8fe@qwiklabs.net"
  # View role on project2
  gcloud projects add-iam-policy-binding $PROJECTID2 --member user:$USERID2 --role=roles/viewer

  # Create custom role devops
  gcloud iam roles create devops --project $PROJECTID2 --permissions \
  "compute.instances.create,compute.instances.delete,compute.instances.start,compute.instances.stop,compute.instances.update, \
  compute.disks.create,compute.subnetworks.use,compute.subnetworks.useExternalIp,compute.instances.setMetadata,compute.instances.setServiceAccount"

  # Bind the role of iam.serviceAccountUser to the second user onto the second project.
  # user2 need this SA role  to be able to use the existing SA inside this Instance  to create vm
  gcloud projects add-iam-policy-binding $PROJECTID2 --member user:$USERID2 --role=roles/iam.serviceAccountUser

  # Bind the custom role devops to the second user onto the second project
  gcloud projects add-iam-policy-binding $PROJECTID2 --member user:$USERID2 --role=projects/$PROJECTID2/roles/devops
  # Create instance work now
  gcloud compute instances create lab-2
ServiceAccount SA
  gcloud config configurations activate default
  gcloud config set project $PROJECTID2
  gcloud iam service-accounts create devops --display-name devops
  # List SA
  gcloud iam service-accounts list  --filter "displayName=devops"
  # Get SA email as SA account
  SA=$(gcloud iam service-accounts list --format="value(email)" --filter "displayName=devops")

  # Give the service account the role of iam.serviceAccountUser
  gcloud projects add-iam-policy-binding $PROJECTID2 --member serviceAccount:$SA --role=roles/iam.serviceAccountUser

  # Give the service account the role of compute.instanceAdmin
  gcloud projects add-iam-policy-binding $PROJECTID2 --member serviceAccount:$SA --role=roles/compute.instanceAdmin

  # Create an instance with the devops service account attached
  # also have to specify an access scope that defines the API calls that the instance can make

  gcloud compute instances create lab-3 --service-account $SA --scopes "https://www.googleapis.com/auth/compute"

  ##### Testing the SA attched to instance lab-3
  gcloud compute ssh lab-3
  # Inside lab-3
  gcloud config list

  # Instance lab-3 with the Sa devops can create an instance
  gcloud compute instances create lab-4
  ```

GKE

gcloud container ...
      # https://cloud.google.com/kubernetes-engine/docs/deploy-app-cluster#autopilot

      # Set default settings for gcloud
      gcloud config set project PROJECT_ID
      gcloud config set compute/zone COMPUTE_ZONE
      gcloud config set compute/region COMPUTE_REGION

      # Autopilot
      gcloud container clusters create-auto hello-cluster  --region=COMPUTE_REGION

      # Standard
      gcloud container clusters create hello-cluster --num-nodes=X --zone=COMPUTE_ZONE

      # With autoscaler, Ip-alias, release-channel
      gcloud beta container clusters create  hello-cluster \
          --enable-ip-alias \
          --release-channel=stable \
          --machine-type=e2-standard-2 \
          --enable-autoscaling --min-nodes=1 --max-nodes=10 \
          --num-nodes=1 \
          --autoscaling-profile=optimize-utilization

      # GET KUBECONFIG
      gcloud container clusters get-credentials hello-cluster

      # Delete
      gcloud container clusters delete lab-cluster
Create cluster, add new larger nodepool, move workload and delete default/old nodepool
    # Create
    gcloud container clusters create onlineboutique-cluster-296 --machine-type=n1-standard-2 \
    --num-nodes=2 --zone us-central1-a --release-channel rapid

    # Create & Associate new nodepool
    gcloud container node-pools create optimized-pool-4407  --cluster=onlineboutique-cluster-296  \
    --machine-type=custom-2-3584 --num-nodes=2 --zone=us-central1-a

    # Cordon & drain old nodes to move workload on new nodes

    for node in $(kubectl get nodes -l cloud.google.com/gke-nodepool=default-pool -o=name); do
      kubectl cordon "$node";
    done

    for node in $(kubectl get nodes -l cloud.google.com/gke-nodepool=default-pool -o=name); do
      kubectl drain --force --ignore-daemonsets --delete-local-data --grace-period=10 "$node";
    done

    # Check workload is on new nodes
    kubectl get pods -o=wide

    # Delete old nodepool
    gcloud container node-pools delete default-pool --cluster onlineboutique-cluster-296  --zone us-central1-a

    # Cluster AutoScaler
      #### A workload auto-scaling: HPA Horizontal Pod AutoScaling

      kubectl autoscale deployment/frontend --min=1 --max=12 --cpu-percent=50 -n dev
      # But what if the spike exceeds the compute resources you currently have provisioned? You may need to add additional compute nodes.

    gcloud beta container clusters update onlineboutique-cluster-296 --enable-autoscaling --min-nodes 1 \
    --max-nodes 6 --zone us-central1-a # will use default profile= balanced

    ## Change auto-scaler profile
    gcloud beta container clusters update onlineboutique-cluster-296 --autoscaling-profile optimize-utilization --zone us-central1-a

Create VM Instance

gcloud compute instances...
      gcloud compute project-info describe --project $(gcloud config get-value project)
      gcloud config set compute/region us-west3
      gcloud config get-value compute/region
      gcloud config set compute/zone us-west3-c

      gcloud config list --all
      gcloud components list

      export PROJECT_ID=$(gcloud config get-value project)
      export ZONE=$(gcloud config get-value compute/zone)
      gcloud compute instances create gcelab2 --machine-type e2-medium --zone $ZONE

      # Full version example
      # For No external IP, add flag: --no-address

      gcloud compute instances create privatenet-us-vm --zone=us-central1-c --machine-type=f1-micro --subnet=privatesubnet-us \
      --image-family=debian-10 --image-project=debian-cloud --boot-disk-size=10GB --boot-disk-type=pd-standard \
      --boot-disk-device-name=privatenet-us-vm --tags ssh,http,rules

      # filtering
      gcloud compute instances list --filter="name=('gcelab2')"
      gcloud compute firewall-rules list --filter="network='default'"
      gcloud compute firewall-rules list --filter="NETWORK:'default' AND ALLOW:'icmp'"

      # Connect to VM and install nginx
      gcloud compute ssh gcelab2 --zone $ZONE

      # Add tag to VM
      gcloud compute instances add-tags gcelab2 --tags http-server,https-server
      gcloud compute firewall-rules create default-allow-http --direction=INGRESS --priority=1000 --network=default --action=ALLOW \
      --rules=tcp:80 --source-ranges=0.0.0.0/0 --target-tags=http-server

      gcloud compute firewall-rules list --filter=ALLOW:'80'
        NAME: default-allow-http
        NETWORK: default
        DIRECTION: INGRESS
        PRIORITY: 1000
        ALLOW: tcp:80
        DENY:
        DISABLED: False

      # Test
      curl http://$(gcloud compute instances list --filter=name:gcelab2 --format='value(EXTERNAL_IP)')

Connect to VM Instance

Linux SSH
  # SSH
  gcloud compute ssh gcelab2 --zone us-west4-c
Windows RDP
  # check if RDP is ready to use 
  gcloud compute instances get-serial-port-output instance-1 --zone us-east1-b
  # Reset default password
  gcloud compute reset-windows-password instance-1  --zone us-east1-b --user admin
      ip_address: 35.237.40.98
      password:   xxx
      username:   admin
  # then use a RDP tool to connect
Resize VM Persistent Disk
# format & mount disk disk

sudo mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-minecraft-disk

sudo mkdir -p /home/minecraft
sudo mount -o discard,defaults /dev/disk/by-id/google-minecraft-disk /home/minecraft

Cloud VPN

gcloud compute
    # reserved static IP
      # vpn-1-static-ip: 34.170.172.203
      # vpn-2-static-ip: 35.187.34.239

    #### Create the vpn-1 gateway and tunnel1to2

    gcloud compute target-vpn-gateways create vpn-1 --project=qwiklabs-gcp-02-e7823821d2d9 --region=us-central1 --network=vpn-network-1

    gcloud compute forwarding-rules create vpn-1-rule-esp --project=qwiklabs-gcp-02-e7823821d2d9 --region=us-central1 \
    --address=34.170.172.203 --ip-protocol=ESP --target-vpn-gateway=vpn-1

    gcloud compute forwarding-rules create vpn-1-rule-udp500 --project=qwiklabs-gcp-02-e7823821d2d9 --region=us-central1 \
    --address=34.170.172.203 --ip-protocol=UDP --ports=500 --target-vpn-gateway=vpn-1

    gcloud compute forwarding-rules create vpn-1-rule-udp4500 --project=qwiklabs-gcp-02-e7823821d2d9 --region=us-central1 \
    --address=34.170.172.203 --ip-protocol=UDP --ports=4500 --target-vpn-gateway=vpn-1

    gcloud compute vpn-tunnels create tunnel1to2 --project=qwiklabs-gcp-02-e7823821d2d9 --region=us-central1 --peer-address=35.187.34.239 \
    --shared-secret=gcprocks --ike-version=2 --local-traffic-selector=0.0.0.0/0 --remote-traffic-selector=0.0.0.0/0 --target-vpn-gateway=vpn-1

    gcloud compute routes create tunnel1to2-route-1 --project=qwiklabs-gcp-02-e7823821d2d9 --network=vpn-network-1 --priority=1000 \
    --destination-range=10.1.3.0/24 --next-hop-vpn-tunnel=tunnel1to2 --next-hop-vpn-tunnel-region=us-central1

    #### Create the vpn-2 gateway and tunnel2to1

    gcloud compute target-vpn-gateways create vpn-2 --project=qwiklabs-gcp-02-e7823821d2d9 --region=europe-west1 --network=vpn-network-1

    gcloud compute forwarding-rules create vpn-2-rule-esp --project=qwiklabs-gcp-02-e7823821d2d9 --region=europe-west1 \
    --address=35.187.34.239 --ip-protocol=ESP --target-vpn-gateway=vpn-2

    gcloud compute forwarding-rules create vpn-2-rule-udp500 --project=qwiklabs-gcp-02-e7823821d2d9 --region=europe-west1 \
    --address=35.187.34.239 --ip-protocol=UDP --ports=500 --target-vpn-gateway=vpn-2

    gcloud compute forwarding-rules create vpn-2-rule-udp4500 --project=qwiklabs-gcp-02-e7823821d2d9 --region=europe-west1 \
    --address=35.187.34.239 --ip-protocol=UDP --ports=4500 --target-vpn-gateway=vpn-2

    gcloud compute vpn-tunnels create tunnel2to1 --project=qwiklabs-gcp-02-e7823821d2d9 --region=europe-west1 --peer-address=34.170.172.203 \
    --shared-secret=gcprocks --ike-version=2 --local-traffic-selector=0.0.0.0/0 --remote-traffic-selector=0.0.0.0/0 --target-vpn-gateway=vpn-2

    gcloud compute routes create tunnel2to1-route-1 --project=qwiklabs-gcp-02-e7823821d2d9 --network=vpn-network-1 --priority=1000 \
    --destination-range=10.5.4.0/24 --next-hop-vpn-tunnel=tunnel2to1 --next-hop-vpn-tunnel-region=europe-west1

VPC Create & Manage Custom Network

Custom VPC
      # Create custom network with initial subnet   managementsubnet-us
      gcloud compute networks create managementnet --project=qwiklabs-gcp-01-6acb8053ba4b --subnet-mode=custom --mtu=1460 --bgp-routing-mode=regional

      gcloud compute networks subnets create managementsubnet-us --project=qwiklabs-gcp-01-6acb8053ba4b --range=10.130.0.0/20
      --network=managementnet --region=us-central1

      gcloud compute networks list
      gcloud compute networks subnets list --sort-by=NETWORK

      # create fw rule
      gcloud compute --project=qwiklabs-gcp-01-6acb8053ba4b firewall-rules create managementnet-allow-icmp-ssh-rdp --direction=INGRESS --priority=1000  \
      --network=managementnet --action=ALLOW --rules=tcp:22,tcp:3389 --source-ranges=0.0.0.0/0

      gcloud compute firewall-rules list --sort-by=NETWORK

VPC Access Connector - Cloud Run

Create a serverless VPC Access connector for Cloud Run Service
  export LOCATION="us-central1"

  # Create a subnet mysubnet
  gcloud compute networks subnets create mysubnet \
  --range=192.168.0.0/28 --network=default --region=$LOCATION

  # Create  VPC Access connector named myconnector
  # with the  previously created subnetwork named mysubnet u
  gcloud compute networks vpc-access connectors create myconnector \
    --region=$LOCATION \
    --subnet-project=$GOOGLE_CLOUD_PROJECT \
    --subnet=mysubnet

  # Configure NAT

  gcloud compute routers create myrouter \
    --network=default \
    --region=$LOCATION

  gcloud compute addresses create myoriginip --region=$LOCATION

  gcloud compute routers nats create mynat \
    --router=myrouter \
    --region=$LOCATION \
    --nat-custom-subnet-ip-ranges=mysubnet \
    --nat-external-ip-pool=myoriginip

  # Route Cloud Run traffic through the VPC network

  gcloud run deploy sample-go \
    --image=gcr.io/$GOOGLE_CLOUD_PROJECT/sample-go \
    --region=$LOCATION \
    --vpc-connector=myconnector \
    --vpc-egress=all-traffic

Cloud Run

gcloud run
      # https://cloud.google.com/sdk/gcloud/reference/run/deploy

      # Enable Api: gcloud services enable run.googleapis.com

      gcloud run deploy hello-nginx  --image nginx

      gcloud run deploy pdf-converter \
        --image gcr.io/qwiklabs-gcp-02-d7248d83166f/pdf-converter \
        --platform managed  --region us-east1 --max-instances=1 \
        --set-env-vars PDF_BUCKET=$GOOGLE_CLOUD_PROJECT-processed \
        --memory=2Gi  --no-allow-unauthenticated

      # Manage SA authenfication when you are using option: --no-allow-unauthenticated
      # (For user authentification, use Cloud IAP)
      ## Create SA
      gcloud iam service-accounts create cloud-run-invoker --display-name "Cloud Run Invoker"
      ## Give Access to SA
      gcloud run services add-iam-policy-binding pdf-converter \
        --member=serviceAccount:cloud-run-invoker@$GOOGLE_CLOUD_PROJECT.iam.gserviceaccount.com \
        --role=roles/run.invoker \
        --region us-east1
      ## Get service Url
      SERVICE_URL=$(gcloud run services describe pdf-converter \
        --platform managed \
        --region us-east1 \
        --format "value(status.url)")

      # Perform a call
      curl -X GET -H "Authoricurlzation: Bearer $(gcloud auth print-identity-token)" $SERVICE_URL
Deploy Multiple Revisions and Traffic Migration
      LOCATION="us-central1"

      # version 0.0.1 - tag test1
      gcloud run deploy product-service \
        --image gcr.io/qwiklabs-resources/product-status:0.0.1 \
        --tag test1 \
        --region $LOCATION \
        --allow-unauthenticated

      TEST1_PRODUCT_SERVICE_URL=$(gcloud run services describe product-service \
      --platform managed --region us-central1 --format="value(status.address.url)")

      curl $TEST1_PRODUCT_SERVICE_URL/help -w "\n"
        API Microservice example: v1

      # version 0.0.2 - tag test2 with no traffic by default
      gcloud run deploy product-service \
        --image gcr.io/qwiklabs-resources/product-status:0.0.2 \
        --no-traffic \
        --tag test2 \
        --region=$LOCATION \
        --allow-unauthenticated

      TEST2_PRODUCT_STATUS_URL=$(gcloud run services describe product-service \
      --platform managed --region=us-central1 --format="value(status.traffic[2].url)")

      curl $TEST2_PRODUCT_STATUS_URL/help -w "\n"
        API Microservice example: v2

      # Migrate 50% of the traffic to the revision with tag test2

      gcloud run services update-traffic product-service \
        --to-tags test2=50 \
        --region=$LOCATION

      # Confirm the original test1 endpoint is distributing traffic with test2
      for i in {1..10}; do curl $TEST1_PRODUCT_SERVICE_URL/help -w "\n"; done
        API Microservice example: v1
        API Microservice example: v2
        API Microservice example: v1
        ...

      # Deploy version 0.0.3 and 0.0.4 with no-traffic option
      # Output the list of revisions
      gcloud run services describe product-service \
      --region=$LOCATION \
      --format='value(status.traffic.revisionName)'
         product-service-00001-gak;product-service-00002-bil;product-service-00001-gak;
         product-service-00002-bil;product-service-00004-kow;product-service-00005-cam

      # Get the the revisions and assign them 25% of traffic
      LIST=$(gcloud run services describe product-service --platform=managed \
      --region=$LOCATION --format='value[delimiter="=25,"](status.traffic.revisionName)')"=25"

      # Split traffic between the four services using the LIST
      gcloud run services update-traffic product-service \
        --to-revisions $LIST --region=$LOCATION

      # Test
      for i in {1..10}; do curl $TEST1_PRODUCT_SERVICE_URL/help -w "\n"; done

      # Reset the service traffic profile to use the latest deployment
      # Send all traffic to the latest

      gcloud run services update-traffic product-service --to-latest --platform=managed --region=$LOCATION
          OK Updating traffic... Done.                                         
            OK Routing traffic...
          Done.
          URL: https://product-service-3qgre62cbq-uc.a.run.app
          Traffic:
            0%   product-service-00001-gak
                  test1: https://test1---product-service-3qgre62cbq-uc.a.run.app
            0%   product-service-00002-bil
                  test2: https://test2---product-service-3qgre62cbq-uc.a.run.app
            0%   product-service-00004-kow
                  test3: https://test3---product-service-3qgre62cbq-uc.a.run.app
            0%   product-service-00005-cam
                  test4: https://test4---product-service-3qgre62cbq-uc.a.run.app
            100% LATEST (currently product-service-00005-cam)

      LATEST_PRODUCT_STATUS_URL=$(gcloud run services describe product-service \
      --platform managed --region=$LOCATION --format="value(status.address.url)")

      curl $LATEST_PRODUCT_STATUS_URL/help -w "\n"
        API Microservice example: v4

Using Global LB with Cloud Run

Deploy Service
  gcloud services enable run.googleapis.com
  gcloud config set compute/region us-central1
  gcloud config set run/region us-central1
  export LOCATION="us-central1"

  # Build image using $pwd
  gcloud builds submit --tag gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld

  # Deploy container
  gcloud run deploy helloworld --image gcr.io/$GOOGLE_CLOUD_PROJECT/helloworld

  # Reserve an external IP address
  gcloud compute addresses create example-ip \
      --ip-version=IPV4 --global

  gcloud compute addresses describe example-ip \
      --format="get(address)" \
      --global

    34.111.226.135

Create the external HTTP Load Balancer

Load balancers use a serverless Network Endpoint Group (NEG) backend
to direct requests to a serverless Cloud Run service.

Reserve Ip and Create LB
  # Create a serverless NEG with a Cloud Run service
  gcloud compute network-endpoint-groups create myneg \
    --region=$LOCATION \
    --network-endpoint-type=serverless  \
    --cloud-run-service=helloworld

  # Create a backend service
  gcloud compute backend-services create mybackendservice \
      --global
  # Add the serverless NEG as a backend to this backend servic
  gcloud compute backend-services add-backend mybackendservice \
      --global \
      --network-endpoint-group=myneg \
      --network-endpoint-group-region=$LOCATION

  # Create a URL map to route incoming requests to the backend service  
  gcloud compute url-maps create myurlmap \
      --default-service mybackendservice

  # reate a target HTTP(S) proxy to route requests to your URL map
  gcloud compute target-http-proxies create mytargetproxy \
      --url-map=myurlmap

  # Create a global forwarding rule to route incoming requests to the proxy
  gcloud compute forwarding-rules create myforwardingrule \
      --address=34.111.226.135 \
      --target-http-proxy=mytargetproxy \
      --global \
      --ports=80

Buildpacks

pack build
  git clone https://github.com/GoogleCloudPlatform/buildpack-samples.git

  cd buildpack-samples/sample-python
  pack build --builder=gcr.io/buildpacks/builder sample-python

  # Push image on Artifact registry and deploy image at the same time
  # This command is equivalent to running `gcloud builds submit --pack image=[IMAGE] .` and `gcloud run deploy sample-python --image [IMAGE]`
  gcloud beta run deploy --source .

APP Engine

gcloud app
    # App Engine application has to be created/Enabled
    gcloud app create --region=us-central

    # Deploy the app
    gcloud app deploy app.yaml
    # Get url
    gcloud app browse
    # redeploy after code modif
    gcloud app deploy app.yaml --quiet

Cloud Function

Helloworld example
      mkdir gcf_hello_world
      cd gcf_hello_world

      cat > index.js <<EOF
      /**
      * Background Cloud Function to be triggered by Pub/Sub.
      * This function is exported by index.js, and executed when
      * the trigger topic receives a message.
      *
      * @param {object} data The event payload.
      * @param {object} context The event metadata.
      */
      exports.helloWorld = (data, context) => {
      const pubSubMessage = data;
      const name = pubSubMessage.data
          ? Buffer.from(pubSubMessage.data, 'base64').toString() : "Hello World";
      console.log(`My Cloud Function: ${name}`);
      };
      EOF

      # create bucket with crrent folder data
      gsutil mb -p qwiklabs-gcp-04-b5177f5b8912 gs://qwiklabs-gcp-04-b5177f5b8912

      # Deploy the function to a pub/sub topic named hello_world, in bucket
      # When deploying a new function, you must specify --trigger-topic, --trigger-bucket, or --trigger-http

      gcloud functions deploy helloWorld --stage-bucket qwiklabs-gcp-04-b5177f5b8912  --trigger-topic hello_world  --runtime nodejs8

      # Check Function
      gcloud functions describe helloWorld

      # Test the function
      DATA=$(printf 'Hello World!'|base64) && gcloud functions call helloWorld --data '{"data":"'$DATA'"}'
      executionId: fgy5naxgkpx3

      # View log
      gcloud functions logs read helloWorld

Cloud Build

gcloud builds
      gcloud builds submit -t gcr.io/$DEVSHELL_PROJECT_ID/quiz-backend ./backend/

      gcloud builds submit --tag gcr.io/$GOOGLE_CLOUD_PROJECT/pdf-converter

Logging

gcloud looging
      gcloud logging logs list
      gcloud logging logs list --filter="compute"
      gcloud logging read "resource.type=gce_instance" --limit 5 # resource type gce_instance
      #
      gcloud logging read "resource.type=gce_instance AND labels.instance_name='gcelab2'" --limit 5

Pub/Sub

Topic
      # create Topic
      gcloud pubsub topics create myTopic
      gcloud pubsub topics create Test1

      # List them
      gcloud pubsub topics list

      # Delete
      gcloud pubsub topics delete Test1

      # Create Subscription to myTopic
      gcloud  pubsub subscriptions create --topic myTopic mySubscription
      gcloud  pubsub subscriptions create --topic myTopic Test1

      # List Subscrip
      gcloud pubsub topics list-subscriptions myTopic

      # Publish message
      gcloud pubsub topics publish myTopic --message "Hello"
      gcloud pubsub topics publish myTopic --message "Publisher thinks Pub/Sub is awesome"

      # Pull/Get message (one by one: FIFO)
      gcloud pubsub subscriptions pull mySubscription --auto-ack

      ## To pull many messages at the same time use: --limit
      gcloud pubsub subscriptions pull mySubscription --auto-ack --limit=3

LoadBalancing

Network LB: Create multiple web server instances & LB
    gcloud config set compute/region us-east4
    gcloud config set compute/zone us-east4-b

    # www1
      gcloud compute instances create www1 \
        --zone=us-east4-b \
        --tags=network-lb-tag \
        --machine-type=e2-medium \
        --image-family=debian-11 \
        --image-project=debian-cloud \
        --metadata=startup-script='#!/bin/bash
          apt-get update
          apt-get install apache2 -y
          service apache2 restart
          echo "
    <h3>Web Server: www1</h3>" | tee /var/www/html/index.html'

    # ww2
      gcloud compute instances create www2 \
        --zone=us-east4-b \
        --tags=network-lb-tag \
        --machine-type=e2-medium \
        --image-family=debian-11 \
        --image-project=debian-cloud \
        --metadata=startup-script='#!/bin/bash
          apt-get update
          apt-get install apache2 -y
          service apache2 restart
          echo "
    <h3>Web Server: www2</h3>" | tee /var/www/html/index.html'

      gcloud compute instances create www3 \
        --zone=us-east4-b \
        --tags=network-lb-tag \
        --machine-type=e2-medium \
        --image-family=debian-11 \
        --image-project=debian-cloud \
        --metadata=startup-script='#!/bin/bash
          apt-get update
          apt-get install apache2 -y
          service apache2 restart
          echo "
    <h3>Web Server: www3</h3>" | tee /var/www/html/index.html'

    # create fw rule
    gcloud compute firewall-rules create www-firewall-network-lb \
        --target-tags network-lb-tag --allow tcp:80

    ### Task 3. Configure the load balancing service

    # Create a static external IP address for your load balancer:
    gcloud compute addresses create network-lb-ip-1 \
        --region us-east4

    # create health check
    gcloud compute http-health-checks create basic-check

    # Add target pool = on UI: create TCP(UDP) LB (with Backend type = Target Pool or Target Instance)
    gcloud compute target-pools create www-pool \
        --region us-east4 --http-health-check basic-check

    # Add instance in target pool
    gcloud compute target-pools add-instances www-pool \
        --instances www1,www2,www3

    # Forward static external IP to the LB www-pool
    gcloud compute forwarding-rules create www-rule \
        --region  us-east4 \
        --ports 80 \
        --address network-lb-ip-1 \
        --target-pool www-pool
    gcloud compute forwarding-rules describe www-rule --region us-east4
    IPADDRESS=$(gcloud compute forwarding-rules describe www-rule --region us-east4 --format="json" | jq -r .IPAddress)
Create an HTTP LB for Managed Group Instances
    # Instance template

    gcloud compute instance-templates create lb-backend-template \
      --region=us-east4 \
      --network=default \
      --subnet=default \
      --tags=allow-health-check \
      --machine-type=e2-medium \
      --image-family=debian-11 \
      --image-project=debian-cloud \
      --metadata=startup-script='#!/bin/bash
        apt-get update
        apt-get install apache2 -y
        a2ensite default-ssl
        a2enmod ssl
        vm_hostname="$(curl -H "Metadata-Flavor:Google" \
        http://169.254.169.254/computeMetadata/v1/instance/name)"
        echo "Page served from: $vm_hostname" | \
        tee /var/www/html/index.html
        systemctl restart apache2'

    # Possible to create instace template by using existing instance

    # gcloud compute instance-templates create fancy-fe --source-instance=frontend

    # create managed Instance Group

    gcloud compute instance-groups managed create lb-backend-group \
      --template=lb-backend-template --size=2 --zone=us-east4-b

    # create fw rule to allow port 80

    gcloud compute firewall-rules create fw-allow-health-check \
      --network=default \
      --action=allow \
      --direction=ingress \
      --source-ranges=130.211.0.0/22,35.191.0.0/16 \
      --target-tags=allow-health-check \
      --rules=tcp:80

    # create/Reserve External IP

    gcloud compute addresses create lb-ipv4-1 \
      --ip-version=IPV4 \
      --global

    gcloud compute addresses describe lb-ipv4-1 \
      --format="get(address)" \
      --global
    34.110.245.188

    # Create healthcheck

    gcloud compute health-checks create http http-basic-check \
      --port 80

    # create LB backend service
    gcloud compute backend-services create web-backend-service \
      --protocol=HTTP \
      --port-name=http \
      --health-checks=http-basic-check \
      --global

    # Add  instance group as the backend to the backend service:
    gcloud compute backend-services add-backend web-backend-service \
      --instance-group=lb-backend-group \
      --instance-group-zone=us-east4-b \
      --global

    # Create a URL map to route the incoming requests to the default backend service => create LB(without frontend, map is added into backend)

    gcloud compute url-maps create web-map-http \
        --default-service web-backend-service

    # 9 Create a target HTTP proxy to route requests to your URL map:
    gcloud compute target-http-proxies create http-lb-proxy \
        --url-map web-map-http

    Create a global forwarding rule to route incoming requests to the proxy: => LB Frontend
    gcloud compute forwarding-rules create http-content-rule \
        --address=lb-ipv4-1\
        --global \
        --target-http-proxy=http-lb-proxy \
        --ports=80
    # Add CDN after: gcloud compute backend-services update http-content-rule --enable-cdn --global

Bucket

Create - Manage Bucket
    # create bucket
    gsutil mb gs://qwiklabs-gcp-01-199cf6f21349

    #upload
    gsutil cp ada.jpg gs://YOUR-BUCKET-NAME

    # download 
    gsutil cp -r gs://YOUR-BUCKET-NAME/ada.jpg .

    # create a folder called image-folder and copy the
    gsutil cp gs://qwiklabs-gcp-01-199cf6f21349/ada.jpg gs://qwiklabs-gcp-01-199cf6f21349/image-folder/

    gsutil ls  gs://qwiklabs-gcp-01-199cf6f21349/
      gs://qwiklabs-gcp-01-199cf6f21349/ada.jpg
      gs://qwiklabs-gcp-01-199cf6f21349/image-folder/

    gsutil ls -l gs://qwiklabs-gcp-01-199cf6f21349/image-folder
      368723  2022-09-28T17:58:45Z  gs://qwiklabs-gcp-01-199cf6f21349/image-folder/ada.jpg
      TOTAL: 1 objects, 368723 bytes (360.08 KiB)

    # Make public Access
    gsutil acl ch -u AllUsers:R gs://qwiklabs-gcp-01-199cf6f21349/ada.jpg
    # remove access
    gsutil acl ch -d AllUsers  gs://qwiklabs-gcp-01-199cf6f21349/ada.jpg

    # Synchronize a directory to a bucket
    gsutil rsync -r ./firstlevel gs://qwiklabs-gcp-01-199cf6f21349/firstlevel

    # Notification: Tell Cloud Storage to send a Pub/Sub notification whenever a new file has finished uploading to the bucket
    # The notifications will be labeled with the topic "new-doc-notif".
    gsutil notification create -t new-doc-notif -f json -e OBJECT_FINALIZE gs://qwiklabs-gcp-02-d7248d83166f-upload

Fined-Grained ACL

Bucket with Fined-grained (object-level permission in addition to your bucket-level permissions) instead of uniform

Bucket ACL
    # Upload file
    gsutil cp setup.html gs://$BUCKET_NAME_1/

    # To get the default access list that's been assigned to setup.html
    gsutil acl get gs://$BUCKET_NAME_1/setup.html
        [
          {
            "entity": "project-owners-472917764794",
            "projectTeam": {
              "projectNumber": "472917764794",
              "team": "owners"
            },
            "role": "OWNER"
          },
          {
            "entity": "project-editors-472917764794",
            "projectTeam": {
              "projectNumber": "472917764794",
              "team": "editors"
            },
            "role": "OWNER"
          },
          {
            "entity": "project-viewers-472917764794",
            "projectTeam": {
              "projectNumber": "472917764794",
              "team": "viewers"
            },
            "role": "READER"
          },
          {
            "email": "student-02-d26551da7aba@qwiklabs.net",
            "entity": "user-student-02-d26551da7aba@qwiklabs.net",
            "role": "OWNER"
          }
        ]

    # Set access list to private
    gsutil acl set private gs://$BUCKET_NAME_1/setup.html
    # Get the new acl assigned
    gsutil acl get gs://$BUCKET_NAME_1/setup.html
        [
          {
            "email": "student-02-d26551da7aba@qwiklabs.net",
            "entity": "user-student-02-d26551da7aba@qwiklabs.net",
            "role": "OWNER"
          }
        ]

    # Make it public
    gsutil acl ch -u AllUsers:R gs://$BUCKET_NAME_1/setup.html
    gsutil acl get gs://$BUCKET_NAME_1/setup.html
        [
          {
            "email": "student-02-d26551da7aba@qwiklabs.net",
            "entity": "user-student-02-d26551da7aba@qwiklabs.net",
            "role": "OWNER"
          },
          {
            "entity": "allUsers",
            "role": "READER"
          }
        ]
    # Make it private
    gsutil acl set private  gs://$BUCKET_NAME_1/setup.html

Customer-Encrypted files

Custom Encryption
    ############# Generate a CSEK key

    python3 -c 'import base64; import os; print(base64.encodebytes(os.urandom(32)))'
    b'6zK7GJ/6PBX1BJGgzz1sm0OdlslfREGX8AUeNM6b3pQ=\n'

    # The encryption controls are contained in a gsutil configuration file named .boto.
    # if .boto is empty or does not exit create it with: gsutil config -n. If it  is still empty, locate it using:  gsutil version -l
    vim .boto
      # Locate "#encryption_key=", uncomment and add CSEK key
      # encryption_key=6zK7GJ/6PBX1BJGgzz1sm0OdlslfREGX8AUeNM6b3pQ=

    gsutil cp setup2.html gs://$BUCKET_NAME_1/
      # file content in bucket: The target object is encrypted by a customer-supplied encryption key.
      # FIle is automaticcaly uncrypted when downloaded

    ########### Rotate CSEK keys
    vim .boto
      ## Comment encryption_key=6zK7GJ/6PBX1BJGgzz1sm0OdlslfREGX8AUeNM6b3pQ=
      ## Uncomment decryption_key1 and set value as 6zK7GJ/6PBX1BJGgzz1sm0OdlslfREGX8AUeNM6b3pQ=

    # Generare new CSEK Key
    python3 -c 'import base64; import os; print(base64.encodebytes(os.urandom(32)))'
    b'ktcnriC3NHPAGqMlXkg2aMzk/JOmBXfKn19eWoBqgSU=\n'

    vim .boto
      ## uncomment encryption_key and set new value as ktcnriC3NHPAGqMlXkg2aMzk/JOmBXfKn19eWoBqgSU=

    # rewrite file
    gsutil rewrite -k gs://$BUCKET_NAME_1/setup2.html

    vim .boto
      # Comment decryption_key1

Lifecycle Management

Bucket LifeCycle
    gsutil lifecycle get gs://$BUCKET_NAME_1
      gs://qwiklabs-gcp-02-adc472149aa9/ has no lifecycle configuration.

    # create filfecycle file
    cat > life.json << EOF
    {
      "rule":
      [
        {
          "action": {"type": "Delete"},
          "condition": {"age": 31}
        }
      ]
    }
    EOF

    # apply life.json
    gsutil lifecycle set life.json gs://$BUCKET_NAME_1

    # Check
    gsutil lifecycle get gs://$BUCKET_NAME_1
      {"rule": [{"action": {"type": "Delete"}, "condition": {"age": 31}}]}

    ####### enable versioning
    # Check versioning
    gsutil versioning get gs://$BUCKET_NAME_1
      gs://qwiklabs-gcp-02-adc472149aa9: Suspended # means not enabled
    # Enable it
    gsutil versioning set on gs://$BUCKET_NAME_1
    gsutil versioning get gs://$BUCKET_NAME_1
      gs://qwiklabs-gcp-02-adc472149aa9: Enabled

    # List all versions of given file
    gsutil ls -a gs://$BUCKET_NAME_1/setup.html
        gs://qwiklabs-gcp-02-adc472149aa9/setup.html#1664716793954699
        gs://qwiklabs-gcp-02-adc472149aa9/setup.html#1664719521003657
        gs://qwiklabs-gcp-02-adc472149aa9/setup.html#1664719563246198

Database Migration Service

Database Migration Service provides options for one-time and continuous jobs to migrate data to Cloud SQL
using different connectivity options, including IP allowlists, VPC peering, and reverse SSH tunnels
https://cloud.google.com/database-migration/docs/postgres/configure-connectivity

In this lab, you migrate a stand-alone PostgreSQL database (running on a virtual machine) to Cloud SQL for PostgreSQL
using a continuous Database Migration Service job and VPC peering for connectivity.

Details

  Migrating a database via Database Migration Service requires some preparation of
  the source database, including creating a dedicated user with replication rights,
  adding the pglogical database extension to the source database and granting rights
  to the schemata and tables in the database to be migrated, as well as the postgres
  database, to that user.

    Enable:
    * Database Migration API
    * Service Networking API
Prepare the source database for migration
# SSH Postgres VM

# Install plogical
sudo apt install postgresql-13-pglogical

# Download and apply some additions to the PostgreSQL configuration files (to enable pglogical extension) and restart the postgresql servic
sudo su - postgres -c "gsutil cp gs://cloud-training/gsp918/pg_hba_append.conf ."
sudo su - postgres -c "gsutil cp gs://cloud-training/gsp918/postgresql_append.conf ."
sudo su - postgres -c "cat pg_hba_append.conf >> /etc/postgresql/13/main/pg_hba.conf"
sudo su - postgres -c "cat postgresql_append.conf >> /etc/postgresql/13/main/postgresql.conf"
sudo systemctl restart postgresql@13-main
  ## In pg_hba.conf these commands added a rule to allow access to all hosts
      #GSP918 - allow access to all hosts
      host    all all 0.0.0.0/0   md5
  ## In postgresql.conf, these commands set the minimal configuration for pglogical to configure it to listen on all addresses
      #GSP918 - added configuration for pglogical database extension
      wal_level = logical         # minimal, replica, or logical
      max_worker_processes = 10   # one per database needed on provider node
                                  # one per node needed on subscriber node
      max_replication_slots = 10  # one per node needed on provider node
      max_wal_senders = 10        # one per node needed on provider node
      shared_preload_libraries = 'pglogical'
      max_wal_size = 1GB
      min_wal_size = 80MB
      listen_addresses = '*'         # what IP address(es) to listen on, '*' is all

# Launch psql
sudo su - postgres
psql

# Add the pglogical database extension to the postgres, orders and gmemegen_db databases.
\c postgres;
CREATE EXTENSION pglogical;
\c orders;
CREATE EXTENSION pglogical;
\c gmemegen_db;
CREATE EXTENSION pglogical;

# List DB to check
\l

# Create the database migration user
CREATE USER migration_admin PASSWORD 'DMS_1s_cool!';
ALTER DATABASE orders OWNER TO migration_admin;
ALTER ROLE migration_admin WITH REPLICATION;

# Assign permissions to the migration user
### In psql, grant permissions to the pglogical schema and tables for the postgres database.
\c postgres;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;

### In psql, grant permissions to the pglogical schema and tables for the orders database
\c orders;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;

### In psql, grant permissions to the public schema and tables for the orders database
GRANT USAGE ON SCHEMA public TO migration_admin;
GRANT ALL ON SCHEMA public TO migration_admin;
GRANT SELECT ON public.distribution_centers TO migration_admin;
GRANT SELECT ON public.inventory_items TO migration_admin;
GRANT SELECT ON public.order_items TO migration_admin;
GRANT SELECT ON public.products TO migration_admin;
GRANT SELECT ON public.users TO migration_admin;

### In psql, grant permissions to the pglogical schema and tables for the gmemegen_db database.
\c gmemegen_db;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;

### In psql, grant permissions to the public schema and tables for the gmemegen_db database.

GRANT USAGE ON SCHEMA public TO migration_admin;
GRANT ALL ON SCHEMA public TO migration_admin;
GRANT SELECT ON public.meme TO migration_admin;

# The source databases are now prepared for migration. The permissions you have granted
# to the migration_admin user are all that is required for 
# Database Migration Service to migrate the postgres, orders and gmemegen_db databases.

# Make the migration_admin user the owner of the tables in the orders database,
# so that you can edit the source data later, when you test the migration

# In psql, run the following commands
\c orders;
\dt
ALTER TABLE public.distribution_centers OWNER TO migration_admin;
ALTER TABLE public.inventory_items OWNER TO migration_admin;
ALTER TABLE public.order_items OWNER TO migration_admin;
ALTER TABLE public.products OWNER TO migration_admin;
ALTER TABLE public.users OWNER TO migration_admin;
\dt
                        List of relations
    Schema |         Name         | Type  |      Owner      
    --------+----------------------+-------+-----------------
    public | distribution_centers | table | migration_admin
    public | inventory_items      | table | migration_admin
    public | order_items          | table | migration_admin
    public | products             | table | migration_admin
    public | users                | table | migration_admin