time="2023-11-06T16:07:33+01:00" level=info msg="Starting k3s v1.25.4+k3s- ()" time="2023-11-06T16:07:33+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" time="2023-11-06T16:07:33+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." time="2023-11-06T16:07:33+01:00" level=info msg="Database tables and indexes are up to date" time="2023-11-06T16:07:33+01:00" level=info msg="Kine available at unix://kine.sock" time="2023-11-06T16:07:33+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" time="2023-11-06T16:07:33+01:00" level=info msg="Tunnel server egress proxy mode: agent" time="2023-11-06T16:07:33+01:00" level=info msg="Tunnel server egress proxy waiting for runtime core to become available" time="2023-11-06T16:07:33+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --etcd-servers=unix://kine.sock --feature-gates=JobTrackingWithFinalizers=true --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" time="2023-11-06T16:07:33+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" time="2023-11-06T16:07:33+01:00" level=info msg="Waiting for API server to become available" time="2023-11-06T16:07:33+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --feature-gates=JobTrackingWithFinalizers=true --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" I1106 16:07:33.311250 13653 server.go:581] external host was not specified, using 192.168.0.103 time="2023-11-06T16:07:33+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" I1106 16:07:33.312446 13653 server.go:171] Version: v1.25.4+k3s- I1106 16:07:33.312498 13653 server.go:173] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" time="2023-11-06T16:07:33+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" time="2023-11-06T16:07:33+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.0.103:6443 -t ${SERVER_NODE_TOKEN}" time="2023-11-06T16:07:33+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" time="2023-11-06T16:07:33+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.0.103:6443 -t ${AGENT_NODE_TOKEN}" time="2023-11-06T16:07:33+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" time="2023-11-06T16:07:33+01:00" level=info msg="Run: k3s kubectl" I1106 16:07:33.336065 13653 shared_informer.go:255] Waiting for caches to sync for node_authorizer I1106 16:07:33.337185 13653 plugins.go:158] Loaded 12 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionWebhook. I1106 16:07:33.337199 13653 plugins.go:161] Loaded 11 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,ValidatingAdmissionWebhook,ResourceQuota. I1106 16:07:33.338448 13653 plugins.go:158] Loaded 12 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionWebhook. I1106 16:07:33.338461 13653 plugins.go:161] Loaded 11 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,ValidatingAdmissionWebhook,ResourceQuota. {"level":"warn","ts":"2023-11-06T16:07:33.340+0100","logger":"etcd-client","caller":"v3/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"etcd-endpoints://0xc000121dc0/kine.sock","attempt":0,"error":"rpc error: code = Unknown desc = no such table: dbstat"} time="2023-11-06T16:07:33+01:00" level=info msg="certificate CN=flawless signed by CN=k3s-server-ca@1699221619: notBefore=2023-11-05 22:00:19 +0000 UTC notAfter=2024-11-05 15:07:33 +0000 UTC" time="2023-11-06T16:07:33+01:00" level=info msg="certificate CN=system:node:flawless,O=system:nodes signed by CN=k3s-client-ca@1699221619: notBefore=2023-11-05 22:00:19 +0000 UTC notAfter=2024-11-05 15:07:33 +0000 UTC" time="2023-11-06T16:07:33+01:00" level=info msg="Module overlay was already loaded" time="2023-11-06T16:07:33+01:00" level=info msg="Module nf_conntrack was already loaded" time="2023-11-06T16:07:33+01:00" level=info msg="Module br_netfilter was already loaded" time="2023-11-06T16:07:33+01:00" level=info msg="Module iptable_nat was already loaded" W1106 16:07:33.363768 13653 genericapiserver.go:656] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. I1106 16:07:33.364543 13653 instance.go:261] Using reconciler: lease time="2023-11-06T16:07:33+01:00" level=warning msg="Flannel is using external addresses with an insecure backend: vxlan. Please consider using an encrypting flannel backend." time="2023-11-06T16:07:33+01:00" level=info msg="Logging containerd to /var/lib/rancher/k3s/agent/containerd/containerd.log" time="2023-11-06T16:07:33+01:00" level=info msg="Running containerd -c /var/lib/rancher/k3s/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/k3s/agent/containerd" I1106 16:07:33.454158 13653 instance.go:574] API group "internal.apiserver.k8s.io" is not enabled, skipping. W1106 16:07:33.592015 13653 genericapiserver.go:656] Skipping API authentication.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.593507 13653 genericapiserver.go:656] Skipping API authorization.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.596470 13653 genericapiserver.go:656] Skipping API autoscaling/v2beta1 because it has no resources. W1106 16:07:33.601061 13653 genericapiserver.go:656] Skipping API batch/v1beta1 because it has no resources. W1106 16:07:33.602605 13653 genericapiserver.go:656] Skipping API certificates.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.603858 13653 genericapiserver.go:656] Skipping API coordination.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.603903 13653 genericapiserver.go:656] Skipping API discovery.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.607084 13653 genericapiserver.go:656] Skipping API networking.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.607094 13653 genericapiserver.go:656] Skipping API networking.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.608218 13653 genericapiserver.go:656] Skipping API node.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.608227 13653 genericapiserver.go:656] Skipping API node.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.608261 13653 genericapiserver.go:656] Skipping API policy/v1beta1 because it has no resources. W1106 16:07:33.611752 13653 genericapiserver.go:656] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.611766 13653 genericapiserver.go:656] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.612867 13653 genericapiserver.go:656] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.612876 13653 genericapiserver.go:656] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.616192 13653 genericapiserver.go:656] Skipping API storage.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.619254 13653 genericapiserver.go:656] Skipping API flowcontrol.apiserver.k8s.io/v1alpha1 because it has no resources. W1106 16:07:33.622549 13653 genericapiserver.go:656] Skipping API apps/v1beta2 because it has no resources. W1106 16:07:33.622562 13653 genericapiserver.go:656] Skipping API apps/v1beta1 because it has no resources. W1106 16:07:33.624108 13653 genericapiserver.go:656] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. W1106 16:07:33.625626 13653 genericapiserver.go:656] Skipping API events.k8s.io/v1beta1 because it has no resources. I1106 16:07:33.626483 13653 plugins.go:158] Loaded 12 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionWebhook. I1106 16:07:33.626494 13653 plugins.go:161] Loaded 11 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,ValidatingAdmissionWebhook,ResourceQuota. W1106 16:07:33.640137 13653 genericapiserver.go:656] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. time="2023-11-06T16:07:34+01:00" level=info msg="Containerd is now running" time="2023-11-06T16:07:34+01:00" level=info msg="Connecting to proxy" url="wss://127.0.0.1:6443/v1-k3s/connect" time="2023-11-06T16:07:34+01:00" level=info msg="Running kubelet --address=0.0.0.0 --allowed-unsafe-sysctls=net.ipv4.ip_forward,net.ipv6.conf.all.forwarding --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override=flawless --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --node-labels= --pod-infra-container-image=rancher/mirrored-pause:3.6 --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --read-only-port=0 --resolv-conf=/etc/resolv.conf --serialize-image-pulls=false --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" time="2023-11-06T16:07:34+01:00" level=info msg="Handling backend connection request [flawless]" time="2023-11-06T16:07:34+01:00" level=info msg="Waiting to retrieve kube-proxy configuration; server is not ready: https://127.0.0.1:6443/v1-k3s/readyz: 500 Internal Server Error" I1106 16:07:34.725997 13653 dynamic_cafile_content.go:157] "Starting controller" name="request-header::/var/lib/rancher/k3s/server/tls/request-header-ca.crt" I1106 16:07:34.726031 13653 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/var/lib/rancher/k3s/server/tls/client-ca.crt" I1106 16:07:34.726152 13653 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt::/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" I1106 16:07:34.726518 13653 secure_serving.go:210] Serving securely on 127.0.0.1:6444 I1106 16:07:34.726558 13653 tlsconfig.go:240] "Starting DynamicServingCertificateController" I1106 16:07:34.726594 13653 dynamic_serving_content.go:132] "Starting controller" name="aggregator-proxy-cert::/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt::/var/lib/rancher/k3s/server/tls/client-auth-proxy.key" I1106 16:07:34.726632 13653 apf_controller.go:300] Starting API Priority and Fairness config controller I1106 16:07:34.726681 13653 autoregister_controller.go:141] Starting autoregister controller I1106 16:07:34.726691 13653 cache.go:32] Waiting for caches to sync for autoregister controller I1106 16:07:34.726713 13653 crdregistration_controller.go:111] Starting crd-autoregister controller I1106 16:07:34.726720 13653 shared_informer.go:255] Waiting for caches to sync for crd-autoregister I1106 16:07:34.726759 13653 apiservice_controller.go:97] Starting APIServiceRegistrationController I1106 16:07:34.726783 13653 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller I1106 16:07:34.726802 13653 controller.go:83] Starting OpenAPI AggregationController I1106 16:07:34.726815 13653 available_controller.go:491] Starting AvailableConditionController I1106 16:07:34.726821 13653 cache.go:32] Waiting for caches to sync for AvailableConditionController controller I1106 16:07:34.726932 13653 cluster_authentication_trust_controller.go:440] Starting cluster_authentication_trust_controller controller I1106 16:07:34.726941 13653 shared_informer.go:255] Waiting for caches to sync for cluster_authentication_trust_controller I1106 16:07:34.726961 13653 controller.go:85] Starting OpenAPI controller I1106 16:07:34.726972 13653 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/var/lib/rancher/k3s/server/tls/client-ca.crt" I1106 16:07:34.726988 13653 controller.go:85] Starting OpenAPI V3 controller I1106 16:07:34.727003 13653 naming_controller.go:291] Starting NamingConditionController I1106 16:07:34.727023 13653 establishing_controller.go:76] Starting EstablishingController I1106 16:07:34.727047 13653 nonstructuralschema_controller.go:192] Starting NonStructuralSchemaConditionController I1106 16:07:34.727065 13653 apiapproval_controller.go:186] Starting KubernetesAPIApprovalPolicyConformantConditionController I1106 16:07:34.727086 13653 crd_finalizer.go:266] Starting CRDFinalizer I1106 16:07:34.727265 13653 controller.go:80] Starting OpenAPI V3 AggregationController I1106 16:07:34.728062 13653 dynamic_cafile_content.go:157] "Starting controller" name="request-header::/var/lib/rancher/k3s/server/tls/request-header-ca.crt" I1106 16:07:34.726936 13653 customresource_discovery_controller.go:209] Starting DiscoveryController I1106 16:07:34.826935 13653 cache.go:39] Caches are synced for AvailableConditionController controller I1106 16:07:34.826958 13653 cache.go:39] Caches are synced for APIServiceRegistrationController controller I1106 16:07:34.827125 13653 shared_informer.go:262] Caches are synced for cluster_authentication_trust_controller I1106 16:07:34.827149 13653 shared_informer.go:262] Caches are synced for crd-autoregister I1106 16:07:34.827226 13653 apf_controller.go:305] Running API Priority and Fairness config worker I1106 16:07:34.827244 13653 cache.go:39] Caches are synced for autoregister controller I1106 16:07:34.836228 13653 shared_informer.go:262] Caches are synced for node_authorizer I1106 16:07:35.503555 13653 controller.go:132] OpenAPI AggregationController: action for item k8s_internal_local_delegation_chain_0000000000: Nothing (removed from the queue). I1106 16:07:35.729356 13653 storage_scheduling.go:111] all system priority classes are created successfully or already exist. Flag --cloud-provider has been deprecated, will be removed in 1.25 or later, in favor of removing cloud provider code from Kubelet. Flag --containerd has been deprecated, This is a cadvisor flag that was mistakenly registered with the Kubelet. Due to legacy concerns, it will follow the standard CLI deprecation timeline before being removed. Flag --pod-infra-container-image has been deprecated, will be removed in 1.27. Image garbage collector will get sandbox image information from CRI. I1106 16:07:36.381341 13653 server.go:199] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" I1106 16:07:36.382668 13653 server.go:408] "Kubelet version" kubeletVersion="v1.25.4+k3s-" I1106 16:07:36.382684 13653 server.go:410] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I1106 16:07:36.383595 13653 dynamic_cafile_content.go:157] "Starting controller" name="client-ca-bundle::/var/lib/rancher/k3s/agent/client-ca.crt" W1106 16:07:36.383635 13653 manager.go:159] Cannot detect current cgroup on cgroup v2 time="2023-11-06T16:07:36+01:00" level=info msg="Annotations and labels have already set on node: flawless" time="2023-11-06T16:07:36+01:00" level=info msg="Starting flannel with backend vxlan" I1106 16:07:36.389801 13653 server.go:655] "--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /" I1106 16:07:36.389964 13653 container_manager_linux.go:262] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] I1106 16:07:36.390013 13653 container_manager_linux.go:267] "Creating Container Manager object based on Node Config" nodeConfig={RuntimeCgroupsName: SystemCgroupsName: KubeletCgroupsName: KubeletOOMScoreAdj:-999 ContainerRuntime: CgroupsPerQOS:true CgroupRoot:/ CgroupDriver:cgroupfs KubeletRootDir:/var/lib/kubelet ProtectKernelDefaults:false NodeAllocatableConfig:{KubeReservedCgroupName: SystemReservedCgroupName: ReservedSystemCPUs: EnforceNodeAllocatable:map[pods:{}] KubeReserved:map[] SystemReserved:map[] HardEvictionThresholds:[{Signal:imagefs.available Operator:LessThan Value:{Quantity: Percentage:0.05} GracePeriod:0s MinReclaim:} {Signal:nodefs.available Operator:LessThan Value:{Quantity: Percentage:0.05} GracePeriod:0s MinReclaim:}]} QOSReserved:map[] ExperimentalCPUManagerPolicy:none ExperimentalCPUManagerPolicyOptions:map[] ExperimentalTopologyManagerScope:container ExperimentalCPUManagerReconcilePeriod:10s ExperimentalMemoryManagerPolicy:None ExperimentalMemoryManagerReservedMemory:[] ExperimentalPodPidsLimit:-1 EnforceCPULimits:true CPUCFSQuotaPeriod:100ms ExperimentalTopologyManagerPolicy:none} I1106 16:07:36.390038 13653 topology_manager.go:134] "Creating topology manager with policy per scope" topologyPolicyName="none" topologyScopeName="container" I1106 16:07:36.390047 13653 container_manager_linux.go:302] "Creating device plugin manager" devicePluginEnabled=true I1106 16:07:36.390076 13653 state_mem.go:36] "Initialized new in-memory state store" I1106 16:07:36.392841 13653 kubelet.go:381] "Attempting to sync node with API server" I1106 16:07:36.392873 13653 kubelet.go:270] "Adding static pod path" path="/var/lib/rancher/k3s/agent/pod-manifests" I1106 16:07:36.392897 13653 kubelet.go:281] "Adding apiserver pod source" I1106 16:07:36.392916 13653 apiserver.go:42] "Waiting for node sync before watching apiserver pods" I1106 16:07:36.393235 13653 kuberuntime_manager.go:240] "Container runtime initialized" containerRuntime="containerd" version="v1.6.8-k3s1" apiVersion="v1" I1106 16:07:36.393553 13653 server.go:1170] "Started kubelet" I1106 16:07:36.393589 13653 server.go:155] "Starting to listen" address="0.0.0.0" port=10250 E1106 16:07:36.393839 13653 cri_stats_provider.go:452] "Failed to get the info of the filesystem with mountpoint" err="unable to find data in memory cache" mountpoint="/var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.overlayfs" E1106 16:07:36.393862 13653 kubelet.go:1317] "Image garbage collection failed once. Stats initialization may not have completed yet" err="invalid capacity 0 on image filesystem" I1106 16:07:36.394117 13653 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" I1106 16:07:36.394167 13653 volume_manager.go:293] "Starting Kubelet Volume Manager" I1106 16:07:36.394232 13653 desired_state_of_world_populator.go:149] "Desired state populator starts to run" I1106 16:07:36.394440 13653 server.go:438] "Adding debug handlers to kubelet server" I1106 16:07:36.398250 13653 controller.go:616] quota admission added evaluator for: leases.coordination.k8s.io I1106 16:07:36.405781 13653 cpu_manager.go:213] "Starting CPU manager" policy="none" I1106 16:07:36.405802 13653 cpu_manager.go:214] "Reconciling" reconcilePeriod="10s" I1106 16:07:36.405815 13653 state_mem.go:36] "Initialized new in-memory state store" I1106 16:07:36.405924 13653 state_mem.go:88] "Updated default CPUSet" cpuSet="" I1106 16:07:36.405936 13653 state_mem.go:96] "Updated CPUSet assignments" assignments=map[] I1106 16:07:36.405942 13653 policy_none.go:49] "None policy: Start" I1106 16:07:36.406207 13653 memory_manager.go:168] "Starting memorymanager" policy="None" I1106 16:07:36.406222 13653 state_mem.go:35] "Initializing new in-memory state store" I1106 16:07:36.406337 13653 state_mem.go:75] "Updated machine memory state" I1106 16:07:36.411393 13653 kubelet_network_linux.go:63] "Initialized iptables rules." protocol=IPv4 I1106 16:07:36.422059 13653 kubelet_network_linux.go:63] "Initialized iptables rules." protocol=IPv6 I1106 16:07:36.422075 13653 status_manager.go:161] "Starting to sync pod status with apiserver" I1106 16:07:36.422103 13653 kubelet.go:2010] "Starting kubelet main sync loop" E1106 16:07:36.422137 13653 kubelet.go:2034] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" E1106 16:07:36.448592 13653 cgroup_manager_linux.go:473] cgroup manager.Set failed: openat2 /sys/fs/cgroup/kubepods/cpu.weight: no such file or directory E1106 16:07:36.448660 13653 kubelet.go:1397] "Failed to start ContainerManager" err="failed to initialize top level QOS containers: root container [kubepods] doesn't exist"