diff --git a/.golangci.yml b/.golangci.yml index d23ce5f0..8c44044d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,7 +58,6 @@ linters-settings: local-prefixes: github.com/rancher/cluster-api-provider-rke2 gci: custom-order: true - #local-prefixes: github.com/rancher/cluster-api-provider-rke2 sections: - "standard" - "blank" @@ -68,6 +67,38 @@ linters-settings: - "prefix(github.com/rancher/cluster-api-provider-rke2)" wsl: force-err-cuddling: false + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr linters: enable-all: true disable: diff --git a/go.mod b/go.mod index 5f267335..7f83cc42 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/coreos/butane v0.19.0 github.com/coreos/ignition/v2 v2.18.0 + github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 github.com/go-logr/logr v1.4.2 github.com/google/gofuzz v1.2.0 github.com/onsi/ginkgo/v2 v2.20.1 @@ -56,7 +57,6 @@ require ( github.com/docker/docker v25.0.6+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect - github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect diff --git a/test/e2e/common.go b/test/e2e/common.go index 0fc514a6..8de682b9 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -45,6 +45,8 @@ const ( KubernetesVersionUpgradeTo = "KUBERNETES_VERSION_UPGRADE_TO" CPMachineTemplateUpgradeTo = "CONTROL_PLANE_MACHINE_TEMPLATE_UPGRADE_TO" WorkersMachineTemplateUpgradeTo = "WORKERS_MACHINE_TEMPLATE_UPGRADE_TO" + ControlPlaneMachineCount = "CONTROL_PLANE_MACHINE_COUNT" + WorkerMachineCount = "WORKER_MACHINE_COUNT" IPFamily = "IP_FAMILY" KindImageVersion = "KIND_IMAGE_VERSION" ) diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 9aff6ea4..8fb8c221 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -119,14 +119,15 @@ variables: KUBERNETES_VERSION: "v1.28.1" KIND_IMAGE_VERSION: "v1.28.0" NODE_DRAIN_TIMEOUT: "60s" - CONFORMANCE_WORKER_MACHINE_COUNT: "2" - CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT: "1" + WORKER_MACHINE_COUNT: "2" + CONTROL_PLANE_MACHINE_COUNT: "1" KUBERNETES_VERSION_UPGRADE_TO: "v1.28.12" KUBERNETES_UPGRADE_OCI_IMAGE_ID: "${KUBERNETES_UPGRADE_OCI_IMAGE_ID}" IP_FAMILY: "IPv4" EXP_CLUSTER_RESOURCE_SET: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_MACHINE_POOL: "true" + CLUSTER_TOPOLOGY: "true" intervals: default/wait-controllers: ["3m", "10s"] diff --git a/test/e2e/const.go b/test/e2e/const.go new file mode 100644 index 00000000..ee144cb9 --- /dev/null +++ b/test/e2e/const.go @@ -0,0 +1,31 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2024 SUSE. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + _ "embed" +) + +var ( + //go:embed data/infrastructure/clusterclass-template-docker.yaml + ClusterClassDocker []byte + //go:embed data/infrastructure/cluster-from-clusterclass-template-docker.yaml + ClusterFromClusterClassDocker []byte +) diff --git a/test/e2e/data/infrastructure/cluster-from-clusterclass-template-docker.yaml b/test/e2e/data/infrastructure/cluster-from-clusterclass-template-docker.yaml new file mode 100644 index 00000000..005e6906 --- /dev/null +++ b/test/e2e/data/infrastructure/cluster-from-clusterclass-template-docker.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.45.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.46.0.0/16 + topology: + class: "${CLASS_NAME}" + version: ${KUBERNETES_VERSION}+rke2r1 + controlPlane: + metadata: {} + replicas: ${CABPR_CP_REPLICAS} + workers: + machineDeployments: + - class: default-worker + name: md-0 + replicas: ${CABPR_WK_REPLICAS} + variables: + - name: dockerKindImage + value: kindest/node:${KIND_IMAGE_VERSION} diff --git a/test/e2e/data/infrastructure/clusterclass-template-docker.yaml b/test/e2e/data/infrastructure/clusterclass-template-docker.yaml new file mode 100644 index 00000000..66079e04 --- /dev/null +++ b/test/e2e/data/infrastructure/clusterclass-template-docker.yaml @@ -0,0 +1,205 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: "${CLASS_NAME}" + namespace: "${NAMESPACE}" +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: RKE2ControlPlaneTemplate + name: ${CLASS_NAME}-control-plane + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: ${CLASS_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: ${CLASS_NAME}-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: RKE2ConfigTemplate + name: ${CLASS_NAME}-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: ${CLASS_NAME}-default-worker-machinetemplate + variables: + - name: dockerKindImage + required: true + schema: + openAPIV3Schema: + type: string + default: kindest/node:v1.28.12 + patches: + - name: controlPlaneDockerKindImage + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/customImage + valueFrom: + variable: dockerKindImage + - name: workerDockerKindImage + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + variable: dockerKindImage +--- +apiVersion: v1 +data: + value: |- + # generated by kind + global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + # (see https://github.com/kubernetes-sigs/kind/pull/3115) + maxconn 100000 + + resolvers docker + nameserver dns 127.0.0.11:53 + + defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + + frontend stats + bind *:8404 + stats enable + stats uri / + stats refresh 10s + + frontend control-plane + bind *:{{ .FrontendControlPlanePort }} + {{ if .IPv6 -}} + bind :::{{ .FrontendControlPlanePort }}; + {{- end }} + default_backend kube-apiservers + + backend kube-apiservers + option httpchk GET /healthz + http-check expect status 401 + # TODO: we should be verifying (!) + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ JoinHostPort $address $.BackendControlPlanePort }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} + + frontend rke2-join + bind *:9345 + {{ if .IPv6 -}} + bind :::9345; + {{- end }} + default_backend rke2-servers + + backend rke2-servers + option httpchk GET /v1-rke2/readyz + http-check expect status 403 + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ $address }}:9345 check check-ssl verify none + {{- end}} +kind: ConfigMap +metadata: + name: ${CLASS_NAME}-lb-config + namespace: "${NAMESPACE}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: ${CLASS_NAME}-cluster + namespace: "${NAMESPACE}" +spec: + template: + spec: + loadBalancer: + customHAProxyConfigTemplateRef: + name: ${CLASS_NAME}-lb-config +--- +kind: RKE2ControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: ${CLASS_NAME}-control-plane + namespace: "${NAMESPACE}" +spec: + template: + spec: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: ${CLASS_NAME}-control-plane + serverConfig: + cni: calico + disableComponents: + kubernetesComponents: [ "cloudController"] + nodeDrainTimeout: 2m + rolloutStrategy: + type: "RollingUpdate" + rollingUpdate: + maxSurge: 1 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: ${CLASS_NAME}-control-plane + namespace: "${NAMESPACE}" +spec: + template: + spec: + customImage: kindest/node:v1.28.0 # will be replaced by the patch + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" + bootstrapTimeout: 15m +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: ${CLASS_NAME}-default-worker-machinetemplate + namespace: "${NAMESPACE}" +spec: + template: + spec: + customImage: kindest/node:v1.28.0 # will be replaced by the patch + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" + bootstrapTimeout: 15m +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: RKE2ConfigTemplate +metadata: + name: ${CLASS_NAME}-default-worker-bootstraptemplate + namespace: "${NAMESPACE}" +spec: + template: + spec: {} diff --git a/test/e2e/e2e_clusterclass_test.go b/test/e2e/e2e_clusterclass_test.go new file mode 100644 index 00000000..7c48d9d3 --- /dev/null +++ b/test/e2e/e2e_clusterclass_test.go @@ -0,0 +1,142 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2023 SUSE. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/drone/envsubst/v2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/cluster-api/util" +) + +var _ = Describe("Workload cluster creation", func() { + var ( + specName = "create-workload-cluster" + namespace *corev1.Namespace + cancelWatches context.CancelFunc + result *ApplyCustomClusterTemplateAndWaitResult + clusterName string + clusterClassName string + clusterctlLogFolder string + ) + + BeforeEach(func() { + Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) + + Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) + + By("Initializing the bootstrap cluster") + initBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) + + clusterName = fmt.Sprintf("caprke2-e2e-%s-clusterclass", util.RandomString(6)) + clusterClassName = "rke2-class" + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + + result = new(ApplyCustomClusterTemplateAndWaitResult) + + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + }) + + AfterEach(func() { + err := CollectArtifacts(ctx, bootstrapClusterProxy.GetKubeconfigPath(), filepath.Join(artifactFolder, bootstrapClusterProxy.GetName(), clusterName+specName)) + Expect(err).ToNot(HaveOccurred()) + + cleanInput := cleanupInput{ + SpecName: specName, + Cluster: result.Cluster, + ClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + CancelWatches: cancelWatches, + IntervalsGetter: e2eConfig.GetIntervals, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactFolder, + AdditionalCleanup: cleanupInstallation(ctx, clusterctlLogFolder, clusterctlConfigPath, bootstrapClusterProxy), + } + + dumpSpecResourcesAndCleanup(ctx, cleanInput) + }) + + Context("Creating a Cluster using ClusterClass", func() { + It("Should deploy a ClusterClass and create a Docker Cluster based on it", func() { + By("Apply ClusterClass template") + + clusterClassConfig, err := envsubst.Eval(string(ClusterClassDocker), func(s string) string { + switch s { + case "CLASS_NAME": + return clusterClassName + case "NAMESPACE": + return namespace.Name + default: + return os.Getenv(s) + } + }) + Expect(err).ToNot(HaveOccurred()) + Expect(bootstrapClusterProxy.Apply(ctx, []byte(clusterClassConfig))).To(Succeed(), "Failed to apply ClusterClass definition") + + By("Create a Docker Cluster from topology") + + clusterConfig, err := envsubst.Eval(string(ClusterFromClusterClassDocker), func(s string) string { + switch s { + case "CLUSTER_NAME": + return clusterName + case "CLASS_NAME": + return clusterClassName + case "NAMESPACE": + return namespace.Name + case "KUBERNETES_VERSION": + return e2eConfig.GetVariable(KubernetesVersion) + case "KIND_IMAGE_VERSION": + return e2eConfig.GetVariable(KindImageVersion) + case "CABPR_CP_REPLICAS": + return e2eConfig.GetVariable(ControlPlaneMachineCount) + case "CABPR_WK_REPLICAS": + return e2eConfig.GetVariable(WorkerMachineCount) + default: + return os.Getenv(s) + } + }) + Expect(err).ToNot(HaveOccurred()) + + ApplyCustomClusterTemplateAndWait(ctx, ApplyCustomClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + CustomTemplateYAML: []byte(clusterConfig), + ClusterName: clusterName, + Namespace: namespace.Name, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + }, result) + }) + }) +}) diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 76a02908..480c7cea 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -29,6 +29,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" @@ -51,19 +52,41 @@ const ( // ApplyClusterTemplateAndWaitInput is the input type for ApplyClusterTemplateAndWait. type ApplyClusterTemplateAndWaitInput struct { - ClusterProxy framework.ClusterProxy - ConfigCluster clusterctl.ConfigClusterInput - WaitForClusterIntervals []interface{} - WaitForControlPlaneIntervals []interface{} - WaitForMachineDeployments []interface{} - Args []string // extra args to be used during `kubectl apply` - PreWaitForCluster func() - PostMachinesProvisioned func() - WaitForControlPlaneInitialized Waiter + ClusterProxy framework.ClusterProxy + ConfigCluster clusterctl.ConfigClusterInput + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachineDeployments []interface{} + Args []string // extra args to be used during `kubectl apply` + PreWaitForCluster func() + PostMachinesProvisioned func() + ControlPlaneWaiters +} + +// ApplyCustomClusterTemplateAndWaitInput is the input type for ApplyCustomClusterTemplateAndWait. +type ApplyCustomClusterTemplateAndWaitInput struct { + ClusterProxy framework.ClusterProxy + CustomTemplateYAML []byte + ClusterName string + Namespace string + Flavor string + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachineDeployments []interface{} + Args []string // extra args to be used during `kubectl apply` + PreWaitForCluster func() + PostMachinesProvisioned func() + ControlPlaneWaiters } // Waiter is a function that runs and waits for a long-running operation to finish and updates the result. -type Waiter func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) +type Waiter func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) + +// ControlPlaneWaiters are Waiter functions for the control plane. +type ControlPlaneWaiters struct { + WaitForControlPlaneInitialized Waiter + WaitForControlPlaneMachinesReady Waiter +} // ApplyClusterTemplateAndWaitResult is the output type for ApplyClusterTemplateAndWait. type ApplyClusterTemplateAndWaitResult struct { @@ -73,10 +96,17 @@ type ApplyClusterTemplateAndWaitResult struct { MachineDeployments []*clusterv1.MachineDeployment } +// ApplyCustomClusterTemplateAndWaitResult is the output type for ApplyCustomClusterTemplateAndWait. +type ApplyCustomClusterTemplateAndWaitResult struct { + ClusterClass *clusterv1.ClusterClass + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.RKE2ControlPlane + MachineDeployments []*clusterv1.MachineDeployment +} + // ApplyClusterTemplateAndWait gets a managed cluster template using clusterctl, and waits for the cluster to be ready. // Important! this method assumes the cluster uses a RKE2ControlPlane and MachineDeployment. func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) { - setDefaults(&input) Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait") Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyManagedClusterTemplateAndWait") Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyClusterTemplateAndWait") @@ -108,37 +138,93 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate }) Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") - By("Applying the cluster template yaml to the cluster") + // Ensure we have a Cluster for dump and cleanup steps in AfterEach even if ApplyClusterTemplateAndWait fails. + result.Cluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: input.ConfigCluster.ClusterName, + Namespace: input.ConfigCluster.Namespace, + }, + } + + ApplyCustomClusterTemplateAndWait(ctx, ApplyCustomClusterTemplateAndWaitInput{ + ClusterProxy: input.ClusterProxy, + CustomTemplateYAML: workloadClusterTemplate, + ClusterName: input.ConfigCluster.ClusterName, + Namespace: input.ConfigCluster.Namespace, + Flavor: input.ConfigCluster.Flavor, + WaitForClusterIntervals: input.WaitForClusterIntervals, + WaitForControlPlaneIntervals: input.WaitForControlPlaneIntervals, + WaitForMachineDeployments: input.WaitForMachineDeployments, + PreWaitForCluster: input.PreWaitForCluster, + PostMachinesProvisioned: input.PostMachinesProvisioned, + ControlPlaneWaiters: input.ControlPlaneWaiters, + }, (*ApplyCustomClusterTemplateAndWaitResult)(result)) +} + +// ApplyCustomClusterTemplateAndWait deploys a cluster from a custom yaml file, and waits for the cluster to be ready. +// Important! this method assumes the cluster uses a RKE2ControlPlane and MachineDeployment. +func ApplyCustomClusterTemplateAndWait(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { + setDefaults(&input) + Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyCustomClusterTemplateAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyCustomClusterTemplateAndWait") + Expect(input.CustomTemplateYAML).NotTo(BeEmpty(), "Invalid argument. input.CustomTemplateYAML can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(input.ClusterName).NotTo(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(input.Namespace).NotTo(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyClusterTemplateAndWait") + + Byf("Creating the workload cluster with name %q from the provided yaml", input.ClusterName) + + // Ensure we have a Cluster for dump and cleanup steps in AfterEach even if ApplyClusterTemplateAndWait fails. + result.Cluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: input.ClusterName, + Namespace: input.Namespace, + }, + } + + Byf("Applying the cluster template yaml of cluster %s", klog.KRef(input.Namespace, input.ClusterName)) Eventually(func() error { - return input.ClusterProxy.Apply(ctx, workloadClusterTemplate, input.Args...) - }, 10*time.Second).Should(Succeed(), "Failed to apply the cluster template") + return input.ClusterProxy.Apply(ctx, input.CustomTemplateYAML, input.Args...) + // return input.ClusterProxy.CreateOrUpdate(ctx, input.CustomTemplateYAML, input.CreateOrUpdateOpts...) + }, 1*time.Minute).Should(Succeed(), "Failed to apply the cluster template") // Once we applied the cluster template we can run PreWaitForCluster. // Note: This can e.g. be used to verify the BeforeClusterCreate lifecycle hook is executed // and blocking correctly. if input.PreWaitForCluster != nil { - By("Calling PreWaitForCluster") + Byf("Calling PreWaitForCluster for cluster %s", klog.KRef(input.Namespace, input.ClusterName)) input.PreWaitForCluster() } - By("Waiting for the cluster infrastructure to be provisioned") + Byf("Waiting for the cluster infrastructure of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName)) result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ Getter: input.ClusterProxy.GetClient(), - Namespace: input.ConfigCluster.Namespace, - Name: input.ConfigCluster.ClusterName, + Namespace: input.Namespace, + Name: input.ClusterName, }, input.WaitForClusterIntervals...) - By("Waiting for RKE2 control plane to be initialized") + if result.Cluster.Spec.Topology != nil { + result.ClusterClass = framework.GetClusterClassByName(ctx, framework.GetClusterClassByNameInput{ + Getter: input.ClusterProxy.GetClient(), + Namespace: input.Namespace, + Name: result.Cluster.Spec.Topology.Class, + }) + } + + Byf("Waiting for control plane of cluster %s to be initialized", klog.KRef(input.Namespace, input.ClusterName)) input.WaitForControlPlaneInitialized(ctx, input, result) - Byf("Waiting for the machine deployments to be provisioned") - result.MachineDeployments = DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ + Byf("Waiting for control plane of cluster %s to be ready", klog.KRef(input.Namespace, input.ClusterName)) + input.WaitForControlPlaneMachinesReady(ctx, input, result) + + Byf("Waiting for the machine deployments of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName)) + result.MachineDeployments = framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ Lister: input.ClusterProxy.GetClient(), Cluster: result.Cluster, }, input.WaitForMachineDeployments...) if input.PostMachinesProvisioned != nil { - By("Calling PostMachinesProvisioned") + Byf("Calling PostMachinesProvisioned for cluster %s", klog.KRef(input.Namespace, input.ClusterName)) input.PostMachinesProvisioned() } } @@ -231,6 +317,75 @@ func GetRKE2ControlPlaneByCluster(ctx context.Context, input GetRKE2ControlPlane return nil } +// WaitForControlPlaneAndMachinesReadyInput is the input type for WaitForControlPlaneAndMachinesReady. +type WaitForControlPlaneAndMachinesReadyInput struct { + GetLister framework.GetLister + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.RKE2ControlPlane +} + +// WaitForControlPlaneAndMachinesReady waits for a RKE2ControlPlane object to be ready (all the machine provisioned and one node ready). +func WaitForControlPlaneAndMachinesReady(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneReady") + Expect(input.GetLister).ToNot(BeNil(), "Invalid argument. input.GetLister can't be nil when calling WaitForControlPlaneReady") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForControlPlaneReady") + Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling WaitForControlPlaneReady") + + if input.ControlPlane.Spec.Replicas != nil && int(*input.ControlPlane.Spec.Replicas) > 1 { + Byf("Waiting for the remaining control plane machines managed by %s to be provisioned", klog.KObj(input.ControlPlane)) + WaitForRKE2ControlPlaneMachinesToExist(ctx, WaitForRKE2ControlPlaneMachinesToExistInput{ + Lister: input.GetLister, + Cluster: input.Cluster, + ControlPlane: input.ControlPlane, + }, intervals...) + } + + Byf("Waiting for control plane %s to be ready (implies underlying nodes to be ready as well)", klog.KObj(input.ControlPlane)) + waitForControlPlaneToBeReadyInput := WaitForControlPlaneToBeReadyInput{ + Getter: input.GetLister, + ControlPlane: client.ObjectKeyFromObject(input.ControlPlane), + } + WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput, intervals...) + + framework.AssertControlPlaneFailureDomains(ctx, framework.AssertControlPlaneFailureDomainsInput{ + Lister: input.GetLister, + Cluster: input.Cluster, + }) +} + +// WaitForRKE2ControlPlaneMachinesToExistInput is the input for WaitForRKE2ControlPlaneMachinesToExist. +type WaitForRKE2ControlPlaneMachinesToExistInput struct { + Lister framework.Lister + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.RKE2ControlPlane +} + +// WaitForRKE2ControlPlaneMachinesToExist will wait until all control plane machines have node refs. +func WaitForRKE2ControlPlaneMachinesToExist(ctx context.Context, input WaitForRKE2ControlPlaneMachinesToExistInput, intervals ...interface{}) { + By("Waiting for all control plane nodes to exist") + inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) + // ControlPlane labels + matchClusterListOption := client.MatchingLabels{ + clusterv1.MachineControlPlaneLabel: "", + clusterv1.ClusterNameLabel: input.Cluster.Name, + } + + Eventually(func() (int, error) { + machineList := &clusterv1.MachineList{} + if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + Byf("Failed to list the machines: %+v", err) + return 0, err + } + count := 0 + for _, machine := range machineList.Items { + if machine.Status.NodeRef != nil { + count++ + } + } + return count, nil + }, intervals...).Should(Equal(int(*input.ControlPlane.Spec.Replicas)), "Timed out waiting for %d control plane machines to exist", int(*input.ControlPlane.Spec.Replicas)) +} + // WaitForControlPlaneToBeReadyInput is the input for WaitForControlPlaneToBeReady. type WaitForControlPlaneToBeReadyInput struct { Getter framework.Getter @@ -328,15 +483,27 @@ func WaitForClusterToUpgrade(ctx context.Context, input WaitForClusterToUpgradeI }, intervals...).Should(BeTrue(), framework.PrettyPrint(input.ControlPlane)) } -func setDefaults(input *ApplyClusterTemplateAndWaitInput) { +// setDefaults sets the default values for ApplyCustomClusterTemplateAndWaitInput if not set. +// Currently, we set the default ControlPlaneWaiters here, which are implemented for RKE2ControlPlane. +func setDefaults(input *ApplyCustomClusterTemplateAndWaitInput) { if input.WaitForControlPlaneInitialized == nil { - input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) { + input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { result.ControlPlane = DiscoveryAndWaitForRKE2ControlPlaneInitialized(ctx, DiscoveryAndWaitForRKE2ControlPlaneInitializedInput{ Lister: input.ClusterProxy.GetClient(), Cluster: result.Cluster, }, input.WaitForControlPlaneIntervals...) } } + + if input.WaitForControlPlaneMachinesReady == nil { + input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { + WaitForControlPlaneAndMachinesReady(ctx, WaitForControlPlaneAndMachinesReadyInput{ + GetLister: input.ClusterProxy.GetClient(), + Cluster: result.Cluster, + ControlPlane: result.ControlPlane, + }, input.WaitForControlPlaneIntervals...) + } + } } var secrets = []string{}