Sunday, August 3, 2025

OCP k8s CRC local monitoring

 HOWTO



Add memory 


$ crc stop
INFO Stopping kubelet and all containers...       
INFO Stopping the instance, this may take a few minutes... 
Stopped the instance
dave@fedora:~/Downloads$ crc config set memory 14336
Changes to configuration property 'memory' are only applied when the CRC instance is started.
If you already have a running CRC instance, then for this configuration change to take effect, stop the CRC instance with 'crc stop' and restart it with 'crc start'.
dave@fedora:~/Downloads$  crc config set enable-cluster-monitoring true
Successfully configured enable-cluster-monitoring to true
dave@fedora:~/Downloads$ crc start

Monitoring 


dave@fedora:~/Downloads$ oc config use-context crc-admin
Switched to context "crc-admin".
dave@fedora:~/Downloads$ oc whoami
kubeadmin
dave@fedora:~/Downloads$ oc get clusterversion version -ojsonpath='{range .spec.overrides[*]}{.name}{"\n"}{end}' | nl -v -2
    -2    cluster-monitoring-operator
    -1    monitoring
     0    cloud-credential-operator
     1    cloud-credential
     2    cluster-autoscaler-operator
     3    cluster-autoscaler
     4    cluster-cloud-controller-manager-operator
     5    cloud-controller-manager
dave@fedora:~/Downloads$ crc config set enable-cluster-monitoring true
Successfully configured enable-cluster-monitoring to true




Pod YAML

kind: Pod
apiVersion: v1
metadata:
  name: demo
  namespace: demo
  uid: 6be00704-f3ca-4d4b-9af6-f6bee0494ba1
  resourceVersion: '37632'
  creationTimestamp: '2025-08-03T09:24:59Z'
  labels:
    run: demo
  annotations:
    k8s.ovn.org/pod-networks: '{"default":{"ip_addresses":["10.217.0.66/23"],"mac_address":"0a:58:0a:d9:00:42","gateway_ips":["10.217.0.1"],"routes":[{"dest":"10.217.0.0/22","nextHop":"10.217.0.1"},{"dest":"10.217.4.0/23","nextHop":"10.217.0.1"},{"dest":"169.254.0.5/32","nextHop":"10.217.0.1"},{"dest":"100.64.0.0/16","nextHop":"10.217.0.1"}],"ip_address":"10.217.0.66/23","gateway_ip":"10.217.0.1","role":"primary"}}'
    k8s.v1.cni.cncf.io/network-status: |-
      [{
          "name": "ovn-kubernetes",
          "interface": "eth0",
          "ips": [
              "10.217.0.66"
          ],
          "mac": "0a:58:0a:d9:00:42",
          "default": true,
          "dns": {}
      }]
    openshift.io/scc: anyuid
  managedFields:
    - manager: crc
      operation: Update
      apiVersion: v1
      time: '2025-08-03T09:24:59Z'
      fieldsType: FieldsV1
      fieldsV1:
        'f:metadata':
          'f:annotations':
            'f:k8s.ovn.org/pod-networks': {}
      subresource: status
    - manager: kubectl-run
      operation: Update
      apiVersion: v1
      time: '2025-08-03T09:24:59Z'
      fieldsType: FieldsV1
      fieldsV1:
        'f:metadata':
          'f:labels':
            .: {}
            'f:run': {}
        'f:spec':
          'f:containers':
            'k:{"name":"demo"}':
              .: {}
              'f:command': {}
              'f:image': {}
              'f:imagePullPolicy': {}
              'f:name': {}
              'f:resources': {}
              'f:terminationMessagePath': {}
              'f:terminationMessagePolicy': {}
          'f:dnsPolicy': {}
          'f:enableServiceLinks': {}
          'f:restartPolicy': {}
          'f:schedulerName': {}
          'f:securityContext': {}
          'f:terminationGracePeriodSeconds': {}
    - manager: multus-daemon
      operation: Update
      apiVersion: v1
      time: '2025-08-03T09:24:59Z'
      fieldsType: FieldsV1
      fieldsV1:
        'f:metadata':
          'f:annotations':
            'f:k8s.v1.cni.cncf.io/network-status': {}
      subresource: status
    - manager: kubelet
      operation: Update
      apiVersion: v1
      time: '2025-08-03T09:55:05Z'
      fieldsType: FieldsV1
      fieldsV1:
        'f:status':
          'f:conditions':
            'k:{"type":"ContainersReady"}':
              .: {}
              'f:lastProbeTime': {}
              'f:lastTransitionTime': {}
              'f:status': {}
              'f:type': {}
            'k:{"type":"Initialized"}':
              .: {}
              'f:lastProbeTime': {}
              'f:lastTransitionTime': {}
              'f:status': {}
              'f:type': {}
            'k:{"type":"PodReadyToStartContainers"}':
              .: {}
              'f:lastProbeTime': {}
              'f:lastTransitionTime': {}
              'f:status': {}
              'f:type': {}
            'k:{"type":"Ready"}':
              .: {}
              'f:lastProbeTime': {}
              'f:lastTransitionTime': {}
              'f:status': {}
              'f:type': {}
          'f:containerStatuses': {}
          'f:hostIP': {}
          'f:hostIPs': {}
          'f:phase': {}
          'f:podIP': {}
          'f:podIPs':
            .: {}
            'k:{"ip":"10.217.0.66"}':
              .: {}
              'f:ip': {}
          'f:startTime': {}
      subresource: status
spec:
  restartPolicy: Always
  serviceAccountName: default
  imagePullSecrets:
    - name: default-dockercfg-p7cfm
  priority: 0
  schedulerName: default-scheduler
  enableServiceLinks: true
  terminationGracePeriodSeconds: 30
  preemptionPolicy: PreemptLowerPriority
  nodeName: crc
  securityContext:
    seLinuxOptions:
      level: 's0:c26,c0'
  containers:
    - resources: {}
      terminationMessagePath: /dev/termination-log
      name: demo
      command:
        - sleep
        - 600s
      securityContext:
        capabilities:
          drop:
            - MKNOD
      imagePullPolicy: Always
      volumeMounts:
        - name: kube-api-access-z7xrc
          readOnly: true
          mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      terminationMessagePolicy: File
      image: 'image-registry.openshift-image-registry.svc:5000/demo/ubi8@sha256:0686ee6a1b9f7a4eb706b3562e50bbf55b929a573f6055a1128052b4b2266a2c'
  serviceAccount: default
  volumes:
    - name: kube-api-access-z7xrc
      projected:
        sources:
          - serviceAccountToken:
              expirationSeconds: 3607
              path: token
          - configMap:
              name: kube-root-ca.crt
              items:
                - key: ca.crt
                  path: ca.crt
          - downwardAPI:
              items:
                - path: namespace
                  fieldRef:
                    apiVersion: v1
                    fieldPath: metadata.namespace
          - configMap:
              name: openshift-service-ca.crt
              items:
                - key: service-ca.crt
                  path: service-ca.crt
        defaultMode: 420
  dnsPolicy: ClusterFirst
  tolerations:
    - key: node.kubernetes.io/not-ready
      operator: Exists
      effect: NoExecute
      tolerationSeconds: 300
    - key: node.kubernetes.io/unreachable
      operator: Exists
      effect: NoExecute
      tolerationSeconds: 300
status:
  containerStatuses:
    - restartCount: 3
      started: true
      ready: true
      name: demo
      state:
        running:
          startedAt: '2025-08-03T09:55:04Z'
      volumeMounts:
        - name: kube-api-access-z7xrc
          mountPath: /var/run/secrets/kubernetes.io/serviceaccount
          readOnly: true
          recursiveReadOnly: Disabled
      imageID: 'image-registry.openshift-image-registry.svc:5000/demo/ubi8@sha256:0686ee6a1b9f7a4eb706b3562e50bbf55b929a573f6055a1128052b4b2266a2c'
      image: 'image-registry.openshift-image-registry.svc:5000/demo/ubi8@sha256:0686ee6a1b9f7a4eb706b3562e50bbf55b929a573f6055a1128052b4b2266a2c'
      lastState:
        terminated:
          exitCode: 0
          reason: Completed
          startedAt: '2025-08-03T09:45:03Z'
          finishedAt: '2025-08-03T09:55:03Z'
          containerID: 'cri-o://fe191488478ade4fb4fbd6f045f4896cdd3128a02cd23c9163dea85039de6efc'
      containerID: 'cri-o://f9351ee373330f42397d46be58820ae2d34616afcddf9be923e30bbd906b5238'
  qosClass: BestEffort
  hostIPs:
    - ip: 192.168.126.11
  podIPs:
    - ip: 10.217.0.66
  podIP: 10.217.0.66
  hostIP: 192.168.126.11
  startTime: '2025-08-03T09:24:59Z'
  conditions:
    - type: PodReadyToStartContainers
      status: 'True'
      lastProbeTime: null
      lastTransitionTime: '2025-08-03T09:25:03Z'
    - type: Initialized
      status: 'True'
      lastProbeTime: null
      lastTransitionTime: '2025-08-03T09:24:59Z'
    - type: Ready
      status: 'True'
      lastProbeTime: null
      lastTransitionTime: '2025-08-03T09:55:05Z'
    - type: ContainersReady
      status: 'True'
      lastProbeTime: null
      lastTransitionTime: '2025-08-03T09:55:05Z'
    - type: PodScheduled
      status: 'True'
      lastProbeTime: null
      lastTransitionTime: '2025-08-03T09:24:59Z'
  phase: Running



Deploying a sample application into OCP CRC local with odo

HOWTO

See also


 Prerequisites


Installing odo



dave@fedora:~/Downloads$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/v3.16.1/odo-linux-amd64 -o odo -o odo
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 90.1M  100 90.1M    0     0  4255k      0  0:00:21  0:00:21 --:--:-- 4785k
dave@fedora:~/Downloads$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/v3.16.1/odo-linux-amd64.sha256 -o odo.sha256
echo "$(<odo.sha256)  odo" | shasum -a 256 --check
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100    64  100    64    0     0    181      0 --:--:-- --:--:-- --:--:--   181
bash: shasum: command not found...
Install package 'perl-Digest-SHA' to provide command 'shasum'? [N/y] y


 * Waiting in queue... 
 * Loading list of packages.... 
The following packages have to be installed:
 perl-Digest-SHA-1:6.04-513.fc42.x86_64    Perl extension for SHA-1/224/256/384/512
Proceed with changes? [N/y] y


 * Waiting in queue... 
 * Waiting for authentication... 
 * Waiting in queue... 
 * Downloading packages... 
 * Requesting data... 
 * Testing changes... 
 * Installing packages... 
shasum: standard input: no properly formatted SHA checksum lines found

dave@fedora:~/Downloads$ curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/odo/v3.16.1/odo-linux-amd64.sha256 -o odo.sha256
echo "$(<odo.sha256)  odo" | shasum -a 256 --check
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100    64  100    64    0     0    385      0 --:--:-- --:--:-- --:--:--   385
odo: OK
dave@fedora:~/Downloads$ sudo install -o root -g root -m 0755 odo /usr/local/bin/odo
[sudo] password for dave: 

dave@fedora:~/Downloads$  odo login -u developer -p developer
Connecting to the OpenShift cluster

Login successful.

You don't have any projects. You can try to create a new project, by running

    odo create project <projectname>

dave@fedora:~/Downloads$ odo create project sample-app
 ✓  Creating the project "sample-app" [80ms]
 ✓  Project "sample-app" is ready for use
 ✓  New project created and now using project: sample-app
dave@fedora:~/Downloads$ mkdir sample-app
dave@fedora:~/Downloads$ cd sample-app
dave@fedora:~/Downloads/sample-app$ git clone https://github.com/openshift/nodejs-ex
Cloning into 'nodejs-ex'...
remote: Enumerating objects: 836, done.
remote: Counting objects: 100% (1/1), done.
remote: Total 836 (delta 0), reused 0 (delta 0), pack-reused 835 (from 2)
Receiving objects: 100% (836/836), 773.00 KiB | 908.00 KiB/s, done.
Resolving deltas: 100% (321/321), done.
dave@fedora:~/Downloads/sample-app$ cd nodejs-ex




Install local Openshift - CRC

 HOWTO



Install prerequisites


[sudo] password for dave: 
Updating and loading repositories:
Repositories loaded.
Package "NetworkManager-1:1.52.1-1.fc42.x86_64" is already installed.

Package                                                Arch          Version                                                Repository                         Size
Installing:
 libvirt                                               x86_64        11.0.0-3.fc42                                          updates                         0.0   B
Installing dependencies:
 libvirt-client-qemu                                   x86_64        11.0.0-3.fc42                                          updates                        64.0 KiB
 libvirt-daemon-config-nwfilter                        x86_64        11.0.0-3.fc42                                          updates                        20.2 KiB
 libvirt-daemon-driver-ch                              x86_64        11.0.0-3.fc42                                          updates                       838.1 KiB
 libvirt-daemon-driver-libxl                           x86_64        11.0.0-3.fc42                                          updates                         1.0 MiB
 libvirt-daemon-driver-lxc                             x86_64        11.0.0-3.fc42                                          updates                         1.1 MiB
 libvirt-daemon-driver-vbox                            x86_64        11.0.0-3.fc42                                          updates                       949.7 KiB
 python3-libvirt                                       x86_64        11.0.0-1.fc42                                          fedora                          2.0 MiB

Transaction Summary:
 Installing:         8 packages

Total size of inbound packages is 1 MiB. Need to download 1 MiB.
After this operation, 6 MiB extra will be used (install 6 MiB, remove 0 B).
Is this ok [y/N]: y
[1/8] libvirt-0:11.0.0-3.fc42.x86_64                                                                                       100% |  52.9 KiB/s |  10.8 KiB |  00m00s
[2/8] libvirt-client-qemu-0:11.0.0-3.fc42.x86_64                                                                           100% | 135.2 KiB/s |  31.2 KiB |  00m00s
[3/8] libvirt-daemon-config-nwfilter-0:11.0.0-3.fc42.x86_64                                                                100% |  98.9 KiB/s |  23.2 KiB |  00m00s
[4/8] libvirt-daemon-driver-ch-0:11.0.0-3.fc42.x86_64                                                                      100% | 772.5 KiB/s | 227.1 KiB |  00m00s
[5/8] libvirt-daemon-driver-libxl-0:11.0.0-3.fc42.x86_64                                                                   100% |   1.1 MiB/s | 299.4 KiB |  00m00s
[6/8] libvirt-daemon-driver-lxc-0:11.0.0-3.fc42.x86_64                                                                     100% | 969.8 KiB/s | 313.2 KiB |  00m00s
[7/8] python3-libvirt-0:11.0.0-1.fc42.x86_64                                                                               100% |   1.4 MiB/s | 363.5 KiB |  00m00s
[8/8] libvirt-daemon-driver-vbox-0:11.0.0-3.fc42.x86_64                                                                    100% |   1.0 MiB/s | 267.1 KiB |  00m00s
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
[8/8] Total                                                                                                                100% |   1.4 MiB/s |   1.5 MiB |  00m01s
Running transaction
[ 1/10] Verify package files                                                                                               100% |   1.3 KiB/s |   8.0   B |  00m00s
[ 2/10] Prepare transaction                                                                                                100% |  41.0   B/s |   8.0   B |  00m00s
[ 3/10] Installing python3-libvirt-0:11.0.0-1.fc42.x86_64                                                                  100% | 137.1 MiB/s |   2.1 MiB |  00m00s
[ 4/10] Installing libvirt-client-qemu-0:11.0.0-3.fc42.x86_64                                                              100% |   2.5 MiB/s |  64.7 KiB |  00m00s
[ 5/10] Installing libvirt-daemon-driver-vbox-0:11.0.0-3.fc42.x86_64                                                       100% |  33.2 MiB/s | 952.0 KiB |  00m00s
[ 6/10] Installing libvirt-daemon-driver-lxc-0:11.0.0-3.fc42.x86_64                                                        100% |  33.1 MiB/s |   1.1 MiB |  00m00s
[ 7/10] Installing libvirt-daemon-driver-libxl-0:11.0.0-3.fc42.x86_64                                                      100% |  33.5 MiB/s |   1.0 MiB |  00m00s
[ 8/10] Installing libvirt-daemon-driver-ch-0:11.0.0-3.fc42.x86_64                                                         100% |  32.8 MiB/s | 840.2 KiB |  00m00s
[ 9/10] Installing libvirt-daemon-config-nwfilter-0:11.0.0-3.fc42.x86_64                                                   100% | 115.1 KiB/s |  14.2 KiB |  00m00s
[10/10] Installing libvirt-0:11.0.0-3.fc42.x86_64                                                                          100% | 147.0   B/s | 124.0   B |  00m01s
>>> Running %posttrans scriptlet: libvirt-daemon-driver-vbox-0:11.0.0-3.fc42.x86_64                                                                                
>>> Finished %posttrans scriptlet: libvirt-daemon-driver-vbox-0:11.0.0-3.fc42.x86_64                                                                               
>>> Scriptlet output:                                                                                                                                              
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtvboxd.socket' → '/usr/lib/systemd/system/virtvboxd.socket'.                                      
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtvboxd-ro.socket' → '/usr/lib/systemd/system/virtvboxd-ro.socket'.                                
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtvboxd-admin.socket' → '/usr/lib/systemd/system/virtvboxd-admin.socket'.                          
>>> Created symlink '/etc/systemd/system/multi-user.target.wants/virtvboxd.service' → '/usr/lib/systemd/system/virtvboxd.service'.                                 
>>>                                                                                                                                                                
>>> Running %posttrans scriptlet: libvirt-daemon-driver-lxc-0:11.0.0-3.fc42.x86_64                                                                                 
>>> Finished %posttrans scriptlet: libvirt-daemon-driver-lxc-0:11.0.0-3.fc42.x86_64                                                                                
>>> Scriptlet output:                                                                                                                                              
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtlxcd.socket' → '/usr/lib/systemd/system/virtlxcd.socket'.                                        
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtlxcd-ro.socket' → '/usr/lib/systemd/system/virtlxcd-ro.socket'.                                  
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtlxcd-admin.socket' → '/usr/lib/systemd/system/virtlxcd-admin.socket'.                            
>>> Created symlink '/etc/systemd/system/multi-user.target.wants/virtlxcd.service' → '/usr/lib/systemd/system/virtlxcd.service'.                                   
>>>                                                                                                                                                                
>>> Running %posttrans scriptlet: libvirt-daemon-driver-libxl-0:11.0.0-3.fc42.x86_64                                                                               
>>> Finished %posttrans scriptlet: libvirt-daemon-driver-libxl-0:11.0.0-3.fc42.x86_64                                                                              
>>> Scriptlet output:                                                                                                                                              
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtxend.socket' → '/usr/lib/systemd/system/virtxend.socket'.                                        
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtxend-ro.socket' → '/usr/lib/systemd/system/virtxend-ro.socket'.                                  
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtxend-admin.socket' → '/usr/lib/systemd/system/virtxend-admin.socket'.                            
>>> Created symlink '/etc/systemd/system/multi-user.target.wants/virtxend.service' → '/usr/lib/systemd/system/virtxend.service'.                                   
>>> Created symlink '/etc/systemd/system/sockets.target.wants/virtlockd-admin.socket' → '/usr/lib/systemd/system/virtlockd-admin.socket'.                          
>>>                                                                                                                                                                
Complete!

Download installation 






Install CRC



 
dave@fedora:~$ cd ~/Downloads/
dave@fedora:~/Downloads$ ls -l crc-linux-amd64.tar.xz 
-rw-r--r--. 1 dave dave 37031432 Aug  3 09:32 crc-linux-amd64.tar.xz
dave@fedora:~/Downloads$ tar xvf crc-linux-amd64.tar.xz
crc-linux-2.53.0-amd64/
crc-linux-2.53.0-amd64/LICENSE
crc-linux-2.53.0-amd64/crc
dave@fedora:~/Downloads$ mkdir -p ~/bin
dave@fedora:~/Downloads$  cp ~/Downloads/crc-linux-*-amd64/crc ~/bin
dave@fedora:~/Downloads$ export PATH=$PATH:$HOME/bin
dave@fedora:~/Downloads$ echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc
dave@fedora:~/Downloads$ find ~/bin/
/home/dave/bin/
/home/dave/bin/crc

Creating CRC
$ crc delete # Remove previous cluster (if present)
$ crc config set preset openshift # Configure to use openshift preset
$ crc setup # Initialize environment for cluster
$ crc start # Start the cluster

Setup CRC


dave@fedora:~/Downloads$ crc config set preset openshift 
To confirm your system is ready, and you have the needed system bundle, please run 'crc setup' before 'crc start'.
dave@fedora:~/Downloads$ crc setup 
CRC is constantly improving and we would like to know more about usage (more details at https://developers.redhat.com/article/tool-data-collection)
Your preference can be changed manually if desired using 'crc config set consent-telemetry <yes/no>'
Would you like to contribute anonymous usage statistics? [y/N]: y
Thanks for helping us! You can disable telemetry with the command 'crc config set consent-telemetry no'.
INFO Using bundle path /home/dave/.crc/cache/crc_libvirt_4.19.3_amd64.crcbundle 
INFO Checking if running as non-root              
INFO Checking if running inside WSL2              
INFO Checking if crc-admin-helper executable is cached 
INFO Caching crc-admin-helper executable          
INFO Using root access: Changing ownership of /home/dave/.crc/bin/crc-admin-helper-linux-amd64 
[sudo] password for dave: 
INFO Using root access: Setting suid for /home/dave/.crc/bin/crc-admin-helper-linux-amd64 
INFO Checking if running on a supported CPU architecture 
INFO Checking if crc executable symlink exists    
INFO Creating symlink for crc executable          
INFO Checking minimum RAM requirements            
INFO Check if Podman binary exists in: /home/dave/.crc/bin/oc 
INFO Checking if Virtualization is enabled        
INFO Checking if KVM is enabled                   
INFO Checking if libvirt is installed             
INFO Checking if user is part of libvirt group    
INFO Adding user to libvirt group                 
INFO Using root access: Adding user to the libvirt group 
INFO Checking if active user/process is currently part of the libvirt group 
INFO Checking if libvirt daemon is running        
INFO Checking if a supported libvirt version is installed 
INFO Checking if crc-driver-libvirt is installed  
INFO Installing crc-driver-libvirt                
INFO Checking crc daemon systemd service          
INFO Setting up crc daemon systemd service        
INFO Checking crc daemon systemd socket units     
INFO Setting up crc daemon systemd socket units   
INFO Checking if vsock is correctly configured    
INFO Setting up vsock support                     
INFO Using root access: Setting CAP_NET_BIND_SERVICE capability for /home/dave/bin/crc executable 
INFO Using root access: Creating udev rule for /dev/vsock 
INFO Using root access: Changing permissions for /etc/udev/rules.d/99-crc-vsock.rules to 644  
INFO Using root access: Reloading udev rules database 
INFO Using root access: Loading vhost_vsock kernel module 
INFO Using root access: Creating file /etc/modules-load.d/vhost_vsock.conf 
INFO Using root access: Changing permissions for /etc/modules-load.d/vhost_vsock.conf to 644  
INFO Checking if CRC bundle is extracted in '$HOME/.crc' 
INFO Checking if /home/dave/.crc/cache/crc_libvirt_4.19.3_amd64.crcbundle exists 
INFO Getting bundle for the CRC executable        
INFO Downloading bundle: /home/dave/.crc/cache/crc_libvirt_4.19.3_amd64.crcbundle... 

Start CRC


dave@fedora:~/Downloads$ crc start
INFO Using bundle path /home/dave/.crc/cache/crc_libvirt_4.19.3_amd64.crcbundle 
INFO Checking if running as non-root              
INFO Checking if running inside WSL2              
INFO Checking if crc-admin-helper executable is cached 
INFO Checking if running on a supported CPU architecture 
INFO Checking if crc executable symlink exists    
INFO Checking minimum RAM requirements            
INFO Check if Podman binary exists in: /home/dave/.crc/bin/oc 
INFO Checking if Virtualization is enabled        
INFO Checking if KVM is enabled                   
INFO Checking if libvirt is installed             
INFO Checking if user is part of libvirt group    
INFO Checking if active user/process is currently part of the libvirt group 
INFO Checking if libvirt daemon is running        
INFO Checking if a supported libvirt version is installed 
INFO Checking if crc-driver-libvirt is installed  
INFO Checking crc daemon systemd socket units     
INFO Checking if vsock is correctly configured    
INFO Loading bundle: crc_libvirt_4.19.3_amd64...  
CRC requires a pull secret to download content from Red Hat.
You can copy it from the Pull Secret section of https://console.redhat.com/openshift/create/local.
? Please enter the pull secret *********
X Sorry, your reply was invalid: invalid pull secret: invalid character 'c' looking for beginning of value
? Please enter the pull secret ******************************************************************************************************************
INFO Creating CRC VM for OpenShift 4.19.3...      
INFO Generating new SSH key pair...               
INFO Generating new password for the kubeadmin user 
INFO Starting CRC VM for openshift 4.19.3...      
INFO CRC instance is running with IP 127.0.0.1    
INFO CRC VM is running                            
INFO Updating authorized keys...                  
INFO Configuring shared directories               
INFO Check internal and public DNS query...       
INFO Check DNS query from host...                 
INFO Verifying validity of the kubelet certificates... 
INFO Starting kubelet service                     
INFO Waiting for kube-apiserver availability... [takes around 2min] 
INFO Adding user's pull secret to the cluster...  
INFO Updating SSH key to machine config resource... 
INFO Waiting until the user's pull secret is written to the instance disk... 



Started OCP cluster 


Started the OpenShift cluster.

The server is accessible via web console at:
  https://console-openshift-console.apps-crc.testing

Log in as administrator:
  Username: kubeadmin
  Password: SOME-PASSWORD

Log in as user:
  Username: developer
  Password: developer

Use the 'oc' command line interface:
  $ eval $(crc oc-env)
  $ oc login -u developer https://api.crc.testing:6443

oc projects
ave@fedora:~/Downloads$ oc projects
You have access to the following projects and can switch between them with ' project <projectname>':

  * default
    hostpath-provisioner
    kube-node-lease
    kube-public
    kube-system
    openshift
    openshift-apiserver
    openshift-apiserver-operator
    openshift-authentication
    openshift-authentication-operator
    openshift-cloud-network-config-controller
    openshift-cloud-platform-infra
    openshift-cluster-machine-approver
    openshift-cluster-samples-operator
    openshift-cluster-storage-operator
    openshift-cluster-version
    openshift-config
    openshift-config-managed
    openshift-config-operator
    openshift-console
    openshift-console-operator
    openshift-console-user-settings
    openshift-controller-manager
    openshift-controller-manager-operator
    openshift-dns
    openshift-dns-operator
    openshift-etcd
    openshift-etcd-operator
    openshift-host-network
    openshift-image-registry
    openshift-infra
    openshift-ingress
    openshift-ingress-canary
    openshift-ingress-operator
    openshift-kni-infra
    openshift-kube-apiserver
    openshift-kube-apiserver-operator
    openshift-kube-controller-manager
    openshift-kube-controller-manager-operator
    openshift-kube-scheduler
    openshift-kube-scheduler-operator
    openshift-kube-storage-version-migrator
    openshift-kube-storage-version-migrator-operator
    openshift-machine-api
    openshift-machine-config-operator
    openshift-marketplace
    openshift-monitoring
    openshift-multus
    openshift-network-console
    openshift-network-diagnostics
    openshift-network-node-identity
    openshift-network-operator
    openshift-node
    openshift-nutanix-infra
    openshift-oauth-apiserver
    openshift-openstack-infra
    openshift-operator-lifecycle-manager
    openshift-operators
    openshift-ovirt-infra
    openshift-ovn-kubernetes
    openshift-route-controller-manager
    openshift-service-ca
    openshift-service-ca-operator
    openshift-user-workload-monitoring
    openshift-vsphere-infra

Using project "default" on server "https://api.crc.testing:6443".

Login to OCP console



Login using CLI


ave@fedora:~/Downloads$ oc login -u developer https://api.crc.testing:6443
Logged into "https://api.crc.testing:6443" as "developer" using existing credentials.

You don't have any projects. You can try to create a new project, by running

    oc new-project <projectname>

dave@fedora:~/Downloads$ oc whoami
developer

Become admin via CLI

ave@fedora:~/Downloads$ oc whoami
developer
dave@fedora:~/Downloads$ oc config use-context crc-admin
$ oc whoami
Switched to context "crc-admin".
bash: $: command not found...
dave@fedora:~/Downloads$ oc whoami
kubeadmin
dave@fedora:~/Downloads$ oc get co
NAME                                       VERSION   AVAILABLE   PROGRESSING   DEGRADED   SINCE   MESSAGE
authentication                             4.19.3    True        False         False      12m     
config-operator                            4.19.3    True        False         False      23d     
console                                    4.19.3    True        False         False      15m     
control-plane-machine-set                  4.19.3    True        False         False      23d     
dns                                        4.19.3    True        False         False      16m     
etcd                                       4.19.3    True        False         False      23d     
image-registry                             4.19.3    True        False         False      15m     
ingress                                    4.19.3    True        False         False      23d     
kube-apiserver                             4.19.3    True        False         False      23d     
kube-controller-manager                    4.19.3    True        False         False      23d     
kube-scheduler                             4.19.3    True        False         False      23d     
kube-storage-version-migrator              4.19.3    True        False         False      16m     
machine-api                                4.19.3    True        False         False      23d     
machine-approver                           4.19.3    True        False         False      23d     
machine-config                             4.19.3    True        False         False      23d     
marketplace                                4.19.3    True        False         False      23d     
network                                    4.19.3    True        False         False      23d     
openshift-apiserver                        4.19.3    True        False         False      16m     
openshift-controller-manager               4.19.3    True        False         False      5m57s   
openshift-samples                          4.19.3    True        False         False      23d     
operator-lifecycle-manager                 4.19.3    True        False         False      23d     
operator-lifecycle-manager-catalog         4.19.3    True        False         False      23d     
operator-lifecycle-manager-packageserver   4.19.3    True        False         False      16m     
service-ca                                 4.19.3    True        False         False      23d     
dave@fedora:~/Downloads$ 


Create demo project via CLI

dave@fedora:~/Downloads$ oc whoami
kubeadmin
dave@fedora:~/Downloads$  oc registry login --insecure=true
info: Using registry public hostname default-route-openshift-image-registry.apps-crc.testing
Saved credentials for default-route-openshift-image-registry.apps-crc.testing into /run/user/1000/containers/auth.json
dave@fedora:~/Downloads$  oc new-project demo
Now using project "demo" on server "https://api.crc.testing:6443".

You can add applications to this project with the 'new-app' command. For example, try:

    oc new-app rails-postgresql-example

to build a new example application in Ruby. Or use kubectl to deploy a simple Kubernetes application:

    kubectl create deployment hello-node --image=registry.k8s.io/e2e-test-images/agnhost:2.43 -- /agnhost serve-hostname

dave@fedora:~/Downloads$ oc image mirror registry.access.redhat.com/ubi8/ubi:latest=default-route-openshift-image-registry.apps-crc.testing/demo/ubi8:latest --insecure=true --filter-by-os=linux/amd64
default-route-openshift-image-registry.apps-crc.testing/
  demo/ubi8
    blobs:
      registry.access.redhat.com/ubi8/ubi sha256:2ff9823b0bdc42e2d925ed7de8114a50e10c0efc5ba3b71f9828cdd8b4463294 5.018KiB
      registry.access.redhat.com/ubi8/ubi sha256:6a22c1a537480d7699f4f391f3810de860e3dbe23b3fc4128ed78dda4189dda4 74.23MiB
    manifests:
      sha256:0686ee6a1b9f7a4eb706b3562e50bbf55b929a573f6055a1128052b4b2266a2c -> latest
  stats: shared=0 unique=2 size=74.24MiB ratio=1.00

phase 0:
  default-route-openshift-image-registry.apps-crc.testing demo/ubi8 blobs=2 mounts=0 manifests=1 shared=0

info: Planning completed in 1.09s
uploading: default-route-openshift-image-registry.apps-crc.testing/demo/ubi8 sha256:6a22c1a537480d7699f4f391f3810de860e3dbe23b3fc4128ed78dda4189dda4 74.23MiB
sha256:0686ee6a1b9f7a4eb706b3562e50bbf55b929a573f6055a1128052b4b2266a2c default-route-openshift-image-registry.apps-crc.testing/demo/ubi8:latest
info: Mirroring completed in 20.67s (3.765MB/s)
dave@fedora:~/Downloads$ oc get is
NAME   IMAGE REPOSITORY                                                    TAGS     UPDATED
ubi8   default-route-openshift-image-registry.apps-crc.testing/demo/ubi8   latest   8 seconds ago
dave@fedora:~/Downloads$ oc set image-lookup ubi8
imagestream.image.openshift.io/ubi8 image lookup updated
dave@fedora:~/Downloads$ oc run demo --image=ubi8 --command -- sleep 600s
Warning: would violate PodSecurity "restricted:latest": allowPrivilegeEscalation != false (container "demo" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container "demo" must set securityContext.capabilities.drop=["ALL"]), runAsNonRoot != true (pod or container "demo" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "demo" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost")
pod/demo created

Check demo app via OCP console


Project 


Topology

Pod 


Events