diff --git a/.dmtlint.yaml b/.dmtlint.yaml index 4e66aa7425..2e27a1ee4d 100644 --- a/.dmtlint.yaml +++ b/.dmtlint.yaml @@ -1,3 +1,7 @@ +global: + linters-settings: + documentation: + impact: error linters-settings: openapi: exclude-rules: @@ -44,8 +48,6 @@ linters-settings: - kubevirt-internal-virtualization-controller - kubevirt-internal-virtualization-handler module: - oss: - disable: true exclude-rules: license: files: @@ -53,6 +55,10 @@ linters-settings: - images/hooks/pkg/hooks/tls-certificates-audit/hook.go directories: - tests/ + - api/client/generated + - hack/ + - images/ + - tools/ images: # CDI patches are soon to be phased out by moving them to 3p repo patches: diff --git a/.github/workflows/dev_module_build-and-registration.yml b/.github/workflows/dev_module_build-and-registration.yml index d6e38f0cf2..edbc197e66 100644 --- a/.github/workflows/dev_module_build-and-registration.yml +++ b/.github/workflows/dev_module_build-and-registration.yml @@ -35,7 +35,10 @@ on: workflow_dispatch: inputs: tag: - description: "Input existing tag, example v0.16.1. Image module tag in dev-registry will have suffix -dev. For example: v0.16.1-dev" + description: | + Allow input tag vX.Y.Z (release tag) or vX.Y.Z-rc.N (release candidate tag). + + Example: v1.0.0 or v1.0.0-rc.1 type: string required: true @@ -75,7 +78,7 @@ jobs: # Check if tag matches vX.Y.Z pattern (release) if echo "$TAG" | grep -P '^v\d+\.\d+\.\d+$' > /dev/null; then echo "Release tag detected" - echo "MODULES_MODULE_TAG=${TAG}-dev" >> $GITHUB_OUTPUT + echo "MODULES_MODULE_TAG=${TAG}" >> $GITHUB_OUTPUT # Check if tag matches vX.Y.Z-rc.N pattern (release candidate) elif echo "$TAG" | grep -P '^v\d+\.\d+\.\d+-rc\.\d+$' > /dev/null; then echo "Release candidate tag detected" diff --git a/.github/workflows/dev_module_build.yml b/.github/workflows/dev_module_build.yml index 5c85d0b76f..2ca650dbe2 100644 --- a/.github/workflows/dev_module_build.yml +++ b/.github/workflows/dev_module_build.yml @@ -217,7 +217,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -285,7 +286,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -306,7 +308,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -587,6 +590,9 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ginkgo working-directory: ./tests/e2e/ diff --git a/.github/workflows/dev_validation.yaml b/.github/workflows/dev_validation.yaml index a85a5456cf..728d88756a 100644 --- a/.github/workflows/dev_validation.yaml +++ b/.github/workflows/dev_validation.yaml @@ -66,7 +66,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -90,7 +91,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -114,7 +116,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: actions/checkout@v4 with: @@ -137,7 +140,8 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - uses: azure/setup-helm@v4.3.0 with: @@ -182,7 +186,8 @@ jobs: if: matrix.components.component != 'vm-route-forge' || env.route_forge_skip != 'true' uses: arduino/setup-task@v2 with: - version: 3.37.2 + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install dependencies if: matrix.components.component != 'vm-route-forge' || env.route_forge_skip != 'true' diff --git a/.github/workflows/nightly_e2e_tests_ceph.yaml b/.github/workflows/nightly_e2e_tests_ceph.yaml index 9d2a10213a..207b005b34 100644 --- a/.github/workflows/nightly_e2e_tests_ceph.yaml +++ b/.github/workflows/nightly_e2e_tests_ceph.yaml @@ -19,12 +19,12 @@ env: STORAGE_CLASS_NAME: ceph-pool-r2-csi-rbd-immediate CI_COMMIT_REF_NAME: ${{ github.ref_name }} GO_VERSION: "1.24.6" - TIMEOUT: "2h" + TIMEOUT: "3h" on: workflow_dispatch: schedule: - - cron: "0 1 * * *" + - cron: "0 0 * * *" defaults: run: @@ -44,12 +44,15 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ginkgo working-directory: ./tests/e2e/ run: | echo "Install ginkgo" - go tool install + go install tool - name: Install Deckhouse-cli run: | diff --git a/.github/workflows/nightly_e2e_tests_replicated.yaml b/.github/workflows/nightly_e2e_tests_replicated.yaml index 52b2e4b4db..e9fbec639c 100644 --- a/.github/workflows/nightly_e2e_tests_replicated.yaml +++ b/.github/workflows/nightly_e2e_tests_replicated.yaml @@ -19,7 +19,7 @@ env: STORAGE_CLASS_NAME: linstor-thin-r1 CI_COMMIT_REF_NAME: ${{ github.ref_name }} GO_VERSION: "1.24.5" - TIMEOUT: "2h" + TIMEOUT: "3h" on: workflow_dispatch: @@ -44,6 +44,9 @@ jobs: - name: Install Task uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ginkgo working-directory: ./tests/e2e/ diff --git a/.github/workflows/nightly_e2e_tests_report.yaml b/.github/workflows/nightly_e2e_tests_report.yaml index 10d6698122..baebc437dd 100644 --- a/.github/workflows/nightly_e2e_tests_report.yaml +++ b/.github/workflows/nightly_e2e_tests_report.yaml @@ -17,12 +17,12 @@ name: Nightly End-to-End tests report env: CI_COMMIT_REF_NAME: ${{ github.ref_name }} GO_VERSION: "1.24.5" - TIMEOUT: "2h" + TIMEOUT: "3h" on: workflow_dispatch: schedule: - - cron: "0 6 * * *" + - cron: "30 6 * * *" defaults: run: @@ -45,7 +45,7 @@ jobs: markdown_table="" header="| CSI | Status | Passed | Failed | Pending | Skipped | Date | Time | Branch|\n" - separator="|---|---|---|---|---|---|---|---|\n" + separator="|---|---|---|---|---|---|---|---|---|\n" markdown_table+="$header" markdown_table+="$separator" diff --git a/CHANGELOG/CHANGELOG-v1.1.0.yml b/CHANGELOG/CHANGELOG-v1.1.0.yml new file mode 100644 index 0000000000..fe792c1c5c --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.1.0.yml @@ -0,0 +1,82 @@ +ci: + features: + - summary: add alias linter for golangci-lint + pull_request: https://github.com/deckhouse/virtualization/pull/1387 + fixes: + - summary: E2E tests no longer fail due to kubectl logs exit codes during teardown. + pull_request: https://github.com/deckhouse/virtualization/pull/1466 + - summary: fix variable name for delve debug + pull_request: https://github.com/deckhouse/virtualization/pull/1442 +core: + features: + - summary: add e2e framework + pull_request: https://github.com/deckhouse/virtualization/pull/1366 +docs: + fixes: + - summary: english documentation remove cyrillic characters cdi_kubevirt_patching + pull_request: https://github.com/deckhouse/virtualization/pull/1481 +module: + features: + - summary: >- + Added the `D8VirtualizationDVCRInsufficientCapacityRisk` alert, which warns of the risk of + insufficient free space in the virtual machine image storage (DVCR). + pull_request: https://github.com/deckhouse/virtualization/pull/1461 + - summary: >- + Added the `KubeNodeAwaitingVirtualMachinesEvictionBeforeShutdown` alert, which is triggered + when the node hosting the virtual machines is about to shut down but VM evacuation is not + yet complete. + pull_request: https://github.com/deckhouse/virtualization/pull/1268 +observability: + fixes: + - summary: >- + Fixed the graph on the virtual machine dashboard that displays memory copy statistics during + VM migration. + pull_request: https://github.com/deckhouse/virtualization/pull/1474 +vd: + fixes: + - summary: respect user-specified storage class when restoring from snapshot + pull_request: https://github.com/deckhouse/virtualization/pull/1417 +vm: + features: + - summary: >- + Added the ability to migrate VMs using disks on local storage. Restrictions: + + - The feature is not available in the CE edition. + + - Migration is only possible for running VMs (`phase: Running`). + + - Migration of VMs with local disks connected via `VirtualMachineBlockDeviceAttachment` + (hotplug) is not supported yet. + + + Added the ability to migrate storage for VM disks (change `StorageClass`). Restrictions: + + - The feature is not available in the CE edition. + + - Migration is only possible for running VMs (`phase: Running`). + + - Storage migration for disks connected via `VirtualMachineBlockDeviceAttachment` (hotplug) + is not supported yet. + pull_request: https://github.com/deckhouse/virtualization/pull/1360 +vmclass: + fixes: + - summary: >- + Use qemu64 CPU model for Discovery and Features types to fix nested virtualization on AMD + hosts + pull_request: https://github.com/deckhouse/virtualization/pull/1446 +vmop: + features: + - summary: >- + Added an operation with the `Clone` type to create a clone of a VM from an existing VM + (`VirtualMachineOperation` `.spec.type: Clone`). + pull_request: https://github.com/deckhouse/virtualization/pull/1418 + fixes: + - summary: >- + Fix the problem where a disk that in the "Terminating" phase was wrongly added to kvvm's + volumes during a restore operation in Strict mode. + pull_request: https://github.com/deckhouse/virtualization/pull/1493 + - summary: >- + Fixed garbage collector behavior: previously, all VMOP objects were deleted after restarting + the virtualization controller, ignoring cleanup rules. + pull_request: https://github.com/deckhouse/virtualization/pull/1471 + diff --git a/CHANGELOG/CHANGELOG-v1.1.1.yml b/CHANGELOG/CHANGELOG-v1.1.1.yml new file mode 100644 index 0000000000..a9584b0bdd --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.1.1.yml @@ -0,0 +1,66 @@ +api: + features: + - summary: Do not wait agent in label and annotation test. + pull_request: https://github.com/deckhouse/virtualization/pull/1500 +ci: + fixes: + - summary: fix build qemu image + pull_request: https://github.com/deckhouse/virtualization/pull/1586 + - summary: fix Nightly End-to-End tests report + pull_request: https://github.com/deckhouse/virtualization/pull/1565 + - summary: Fixed mirrord health probes and certificate paths for local development. + pull_request: https://github.com/deckhouse/virtualization/pull/1547 + - summary: E2E test timeout errors now include the condition that was being waited for. + pull_request: https://github.com/deckhouse/virtualization/pull/1514 +core: + features: + - summary: Add volumesnapshot resources to e2e fail dump. + pull_request: https://github.com/deckhouse/virtualization/pull/1499 + fixes: + - summary: fixing paths to source code + pull_request: https://github.com/deckhouse/virtualization/pull/1551 + - summary: >- + Fixed an issue in containerdv2 where storage providing a PVC with the FileSystem type was + incorrectly attached via `VirtualMachineBlockDeviceAttachment`. + pull_request: https://github.com/deckhouse/virtualization/pull/1548 + - summary: Fix setting LastTransitionTime in condition if reason changed. + pull_request: https://github.com/deckhouse/virtualization/pull/1543 + - summary: >- + Added error reporting in the status of disks and images when the data source (URL) is + unavailable. + pull_request: https://github.com/deckhouse/virtualization/pull/1534 +module: + fixes: + - summary: fix CVE-2025-58058 and CVE-2025-54410 + pull_request: https://github.com/deckhouse/virtualization/pull/1572 + - summary: This PR fix bring some fixes for fuzzing tests. + pull_request: https://github.com/deckhouse/virtualization/pull/1261 +vi: + fixes: + - summary: >- + When creating virtual images from virtual disk snapshots, the + `spec.persistentVolumeClaim.storageClassName` parameter is now respected. Previously, it + could be ignored. + pull_request: https://github.com/deckhouse/virtualization/pull/1533 +vm: + fixes: + - summary: >- + Fixed the `NetworkReady` condition output. It no longer shows the `Unknown` state and + appears only when needed. + pull_request: https://github.com/deckhouse/virtualization/pull/1567 + - summary: Prohibit duplicate networks in the virtual machine `.spec.network` specification. + pull_request: https://github.com/deckhouse/virtualization/pull/1545 +vmbda: + fixes: + - summary: >- + Fixed a bug where, when detaching a virtual image through + `VirtualMachineBlockDeviceAttachment`, the resource could get stuck in the Terminating + state. + pull_request: https://github.com/deckhouse/virtualization/pull/1542 +vmip: + fixes: + - summary: >- + Added validation for static IP addresses to avoid creating a `VirtualMachineIPAddress` + resource with an IP already in use in the cluster. + pull_request: https://github.com/deckhouse/virtualization/pull/1530 + diff --git a/CHANGELOG/CHANGELOG-v1.1.md b/CHANGELOG/CHANGELOG-v1.1.md new file mode 100644 index 0000000000..b5d9d7677b --- /dev/null +++ b/CHANGELOG/CHANGELOG-v1.1.md @@ -0,0 +1,40 @@ +# Changelog v1.1 + +## Features + + + - **[module]** Added the `D8VirtualizationDVCRInsufficientCapacityRisk` alert, which warns of the risk of insufficient free space in the virtual machine image storage (DVCR). [#1461](https://github.com/deckhouse/virtualization/pull/1461) + - **[module]** Added the `KubeNodeAwaitingVirtualMachinesEvictionBeforeShutdown` alert, which is triggered when the node hosting the virtual machines is about to shut down but VM evacuation is not yet complete. [#1268](https://github.com/deckhouse/virtualization/pull/1268) + - **[vm]** Added the ability to migrate VMs using disks on local storage. Restrictions: + - The feature is not available in the CE edition. + - Migration is only possible for running VMs (`phase: Running`). + - Migration of VMs with local disks connected via `VirtualMachineBlockDeviceAttachment` (hotplug) is not supported yet. + + Added the ability to migrate storage for VM disks (change `StorageClass`). Restrictions: + - The feature is not available in the CE edition. + - Migration is only possible for running VMs (`phase: Running`). + - Storage migration for disks connected via `VirtualMachineBlockDeviceAttachment` (hotplug) is not supported yet. [#1360](https://github.com/deckhouse/virtualization/pull/1360) + - **[vmop]** Added an operation with the `Clone` type to create a clone of a VM from an existing VM (`VirtualMachineOperation` `.spec.type: Clone`). [#1418](https://github.com/deckhouse/virtualization/pull/1418) + +## Fixes + + + - **[core]** Fixed an issue in containerdv2 where storage providing a PVC with the FileSystem type was incorrectly attached via `VirtualMachineBlockDeviceAttachment`. [#1548](https://github.com/deckhouse/virtualization/pull/1548) + - **[core]** Added error reporting in the status of disks and images when the data source (URL) is unavailable. [#1534](https://github.com/deckhouse/virtualization/pull/1534) + - **[module]** fix CVE-2025-58058 and CVE-2025-54410 [#1572](https://github.com/deckhouse/virtualization/pull/1572) + - **[observability]** Fixed the graph on the virtual machine dashboard that displays memory copy statistics during VM migration. [#1474](https://github.com/deckhouse/virtualization/pull/1474) + - **[vd]** respect user-specified storage class when restoring from snapshot [#1417](https://github.com/deckhouse/virtualization/pull/1417) + - **[vi]** When creating virtual images from virtual disk snapshots, the `spec.persistentVolumeClaim.storageClassName` parameter is now respected. Previously, it could be ignored. [#1533](https://github.com/deckhouse/virtualization/pull/1533) + - **[vm]** Fixed the `NetworkReady` condition output. It no longer shows the `Unknown` state and appears only when needed. [#1567](https://github.com/deckhouse/virtualization/pull/1567) + - **[vm]** Prohibit duplicate networks in the virtual machine `.spec.network` specification. [#1545](https://github.com/deckhouse/virtualization/pull/1545) + - **[vmbda]** Fixed a bug where, when detaching a virtual image through `VirtualMachineBlockDeviceAttachment`, the resource could get stuck in the Terminating state. [#1542](https://github.com/deckhouse/virtualization/pull/1542) + - **[vmclass]** Use qemu64 CPU model for Discovery and Features types to fix nested virtualization on AMD hosts [#1446](https://github.com/deckhouse/virtualization/pull/1446) + - **[vmip]** Added validation for static IP addresses to avoid creating a `VirtualMachineIPAddress` resource with an IP already in use in the cluster. [#1530](https://github.com/deckhouse/virtualization/pull/1530) + - **[vmop]** Fix the problem where a disk that in the "Terminating" phase was wrongly added to kvvm's volumes during a restore operation in Strict mode. [#1493](https://github.com/deckhouse/virtualization/pull/1493) + - **[vmop]** Fixed garbage collector behavior: previously, all VMOP objects were deleted after restarting the virtualization controller, ignoring cleanup rules. [#1471](https://github.com/deckhouse/virtualization/pull/1471) + +## Chore + + + - **[observability]** Added Prometheus metrics for virtual machine snapshots (`d8_virtualization_virtualmachinesnapshot_info`) and virtual disk snapshots (`d8_virtualization_virtualdisksnapshot_info`), showing which objects they are associated with. [#1555](https://github.com/deckhouse/virtualization/pull/1555) + diff --git a/api/client/kubeclient/config.go b/api/client/kubeclient/config.go index c50413f28d..11738c025e 100644 --- a/api/client/kubeclient/config.go +++ b/api/client/kubeclient/config.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/tools/clientcmd" "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { @@ -56,7 +56,7 @@ func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { func GetClientFromRESTConfig(config *rest.Config) (Client, error) { shallowCopy := *config - shallowCopy.GroupVersion = &virtv2.SchemeGroupVersion + shallowCopy.GroupVersion = &v1alpha2.SchemeGroupVersion shallowCopy.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: Codecs} shallowCopy.APIPath = "/apis" shallowCopy.ContentType = runtime.ContentTypeJSON diff --git a/api/client/kubeclient/vm.go b/api/client/kubeclient/vm.go index d5e0e6df3b..2eb665ab57 100644 --- a/api/client/kubeclient/vm.go +++ b/api/client/kubeclient/vm.go @@ -34,7 +34,7 @@ import ( virtv1 "kubevirt.io/api/core/v1" virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type vm struct { @@ -96,7 +96,7 @@ func (v vm) VNC(name string) (virtualizationv1alpha2.StreamInterface, error) { return asyncSubresourceHelper(v.config, v.resource, v.namespace, name, "vnc", url.Values{}) } -func (v vm) PortForward(name string, opts v1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) { +func (v vm) PortForward(name string, opts subv1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) { params := url.Values{} if opts.Port > 0 { params.Add("port", strconv.Itoa(opts.Port)) @@ -107,7 +107,7 @@ func (v vm) PortForward(name string, opts v1alpha2.VirtualMachinePortForward) (v return asyncSubresourceHelper(v.config, v.resource, v.namespace, name, "portforward", params) } -func (v vm) Freeze(ctx context.Context, name string, opts v1alpha2.VirtualMachineFreeze) error { +func (v vm) Freeze(ctx context.Context, name string, opts subv1alpha2.VirtualMachineFreeze) error { path := fmt.Sprintf(subresourceURLTpl, v.namespace, v.resource, name, "freeze") unfreezeTimeout := virtv1.FreezeUnfreezeTimeout{ @@ -132,7 +132,7 @@ func (v vm) Unfreeze(ctx context.Context, name string) error { return v.restClient.Put().AbsPath(path).Do(ctx).Error() } -func (v vm) AddVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineAddVolume) error { +func (v vm) AddVolume(ctx context.Context, name string, opts subv1alpha2.VirtualMachineAddVolume) error { path := fmt.Sprintf(subresourceURLTpl, v.namespace, v.resource, name, "addvolume") return v.restClient. Put(). @@ -147,7 +147,7 @@ func (v vm) AddVolume(ctx context.Context, name string, opts v1alpha2.VirtualMac Error() } -func (v vm) RemoveVolume(ctx context.Context, name string, opts v1alpha2.VirtualMachineRemoveVolume) error { +func (v vm) RemoveVolume(ctx context.Context, name string, opts subv1alpha2.VirtualMachineRemoveVolume) error { path := fmt.Sprintf(subresourceURLTpl, v.namespace, v.resource, name, "removevolume") return v.restClient. Put(). diff --git a/api/core/v1alpha2/virtual_machine_operation.go b/api/core/v1alpha2/virtual_machine_operation.go index 0b6e0ebfac..40a585ba05 100644 --- a/api/core/v1alpha2/virtual_machine_operation.go +++ b/api/core/v1alpha2/virtual_machine_operation.go @@ -69,22 +69,26 @@ type VirtualMachineOperationRestoreSpec struct { VirtualMachineSnapshotName string `json:"virtualMachineSnapshotName"` } +// +kubebuilder:validation:XValidation:rule="(has(self.customization) && ((has(self.customization.namePrefix) && size(self.customization.namePrefix) > 0) || (has(self.customization.nameSuffix) && size(self.customization.nameSuffix) > 0))) || (has(self.nameReplacement) && size(self.nameReplacement) > 0)",message="At least one of customization.namePrefix, customization.nameSuffix, or nameReplacement must be set" // VirtualMachineOperationCloneSpec defines the clone operation. type VirtualMachineOperationCloneSpec struct { Mode VMOPRestoreMode `json:"mode"` // NameReplacement defines rules for renaming resources during cloning. + // +kubebuilder:validation:XValidation:rule="self.all(nr, has(nr.to) && size(nr.to) >= 1 && size(nr.to) <= 59)",message="Each nameReplacement.to must be between 1 and 59 characters" NameReplacement []NameReplacement `json:"nameReplacement,omitempty"` // Customization defines customization options for cloning. Customization *VirtualMachineOperationCloneCustomization `json:"customization,omitempty"` } +// +kubebuilder:validation:XValidation:rule="!has(self.namePrefix) || (size(self.namePrefix) >= 1 && size(self.namePrefix) <= 59)",message="namePrefix length must be between 1 and 59 characters if set" +// +kubebuilder:validation:XValidation:rule="!has(self.nameSuffix) || (size(self.nameSuffix) >= 1 && size(self.nameSuffix) <= 59)",message="nameSuffix length must be between 1 and 59 characters if set" // VirtualMachineOperationCloneCustomization defines customization options for cloning. type VirtualMachineOperationCloneCustomization struct { // NamePrefix adds a prefix to resource names during cloning. - // Applied to VirtualDisk, VirtualMachineIPAddress, VirtualMachineMACAddress, and Secret resources. + // Applied to VirtualMachine, VirtualDisk, VirtualMachineBlockDeviceAttachment, and Secret resources. NamePrefix string `json:"namePrefix,omitempty"` // NameSuffix adds a suffix to resource names during cloning. - // Applied to VirtualDisk, VirtualMachineIPAddress, VirtualMachineMACAddress, and Secret resources. + // Applied to VirtualMachine, VirtualDisk, VirtualMachineBlockDeviceAttachment, and Secret resources. NameSuffix string `json:"nameSuffix,omitempty"` } diff --git a/api/subresources/install/install.go b/api/subresources/install/install.go index 5c782bd9a4..d9b74037ac 100644 --- a/api/subresources/install/install.go +++ b/api/subresources/install/install.go @@ -21,12 +21,12 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "github.com/deckhouse/virtualization/api/subresources" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) // Install registers the API group and adds types to a scheme func Install(scheme *runtime.Scheme) { utilruntime.Must(subresources.AddToScheme(scheme)) - utilruntime.Must(v1alpha2.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1alpha2.SchemeGroupVersion)) + utilruntime.Must(subv1alpha2.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(subv1alpha2.SchemeGroupVersion)) } diff --git a/api/subresources/register.go b/api/subresources/register.go index 8d19e4c34c..9609f96cf7 100644 --- a/api/subresources/register.go +++ b/api/subresources/register.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // GroupName is the group name use in this package @@ -59,8 +59,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VirtualMachineFreeze{}, &VirtualMachineUnfreeze{}, &VirtualMachineCancelEvacuation{}, - &virtv2.VirtualMachine{}, - &virtv2.VirtualMachineList{}, + &v1alpha2.VirtualMachine{}, + &v1alpha2.VirtualMachineList{}, ) return nil } diff --git a/api/subresources/v1alpha2/register.go b/api/subresources/v1alpha2/register.go index 4df9e743ab..63b3970a3b 100644 --- a/api/subresources/v1alpha2/register.go +++ b/api/subresources/v1alpha2/register.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/subresources" ) @@ -59,8 +59,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VirtualMachineFreeze{}, &VirtualMachineUnfreeze{}, &VirtualMachineCancelEvacuation{}, - &virtv2.VirtualMachine{}, - &virtv2.VirtualMachineList{}, + &v1alpha2.VirtualMachine{}, + &v1alpha2.VirtualMachineList{}, ) return nil } diff --git a/build/components/versions.yml b/build/components/versions.yml index 69df6394f3..7e93569e86 100644 --- a/build/components/versions.yml +++ b/build/components/versions.yml @@ -3,8 +3,8 @@ firmware: libvirt: v10.9.0 edk2: stable202411 core: - 3p-kubevirt: v1.3.1-v12n.13 - 3p-containerized-data-importer: v1.60.3-v12n.10 + 3p-kubevirt: v1.3.1-v12n.17 + 3p-containerized-data-importer: v1.60.3-v12n.12 distribution: 2.8.3 package: acl: v2.3.1 @@ -13,7 +13,7 @@ package: dtc: v1.7.2 e2fsprogs: v1.47.1 file: FILE5_45 - gcc: releases/gcc-14.2.0 + gcc: releases/gcc-13.2.0 glib2: 2.84.2 glibc: glibc-2.38 libgmp: 6.3.0 @@ -31,6 +31,9 @@ package: libfuse3: fuse-3.16.2 libffi: v3.5.2 libgcrypt: libgcrypt-1.10.2 + liburing: liburing-2.6 + libuserspace-rcu: v0.14.0 + libunistring: v1.3 libxcrypt: v4.4.36 libgpg-error: libgpg-error-1.55 libibverbs: 1.0.0 @@ -81,9 +84,11 @@ package: readline: readline-8.2 cyrus-sasl2: cyrus-sasl-2.1.28 # libsasl2-3 libseccomp: v2.6.0 - selinux: 3.6 + selinux: 3.8 libslirp: v4.8.0 + libxkbcommon: xkbcommon-1.10.0 snappy: 1.2.2 # libsnappy + systemd: v255 zlib: v1.3.1 zstd: v1.5.7 krb5: krb5-1.21.3-final diff --git a/crds/virtualmachineoperations.yaml b/crds/virtualmachineoperations.yaml index f941b062f3..c96023b308 100644 --- a/crds/virtualmachineoperations.yaml +++ b/crds/virtualmachineoperations.yaml @@ -81,6 +81,19 @@ spec: Applied to VirtualMachine, VirtualDisk, VirtualMachineBlockDeviceAttachment, and Secret resources. type: string type: object + x-kubernetes-validations: + - message: + namePrefix length must be between 1 and 59 characters + if set + rule: + "!has(self.namePrefix) || (size(self.namePrefix) >= 1 + && size(self.namePrefix) <= 59)" + - message: + nameSuffix length must be between 1 and 59 characters + if set + rule: + "!has(self.nameSuffix) || (size(self.nameSuffix) >= 1 + && size(self.nameSuffix) <= 59)" mode: description: |- VMOPRestoreMode defines the kind of the restore operation. @@ -121,9 +134,23 @@ spec: - to type: object type: array + x-kubernetes-validations: + - message: Each nameReplacement.to must be between 1 and 59 characters + rule: + self.all(nr, has(nr.to) && size(nr.to) >= 1 && size(nr.to) + <= 59) required: - mode type: object + x-kubernetes-validations: + - message: + At least one of customization.namePrefix, customization.nameSuffix, + or nameReplacement must be set + rule: + (has(self.customization) && ((has(self.customization.namePrefix) + && size(self.customization.namePrefix) > 0) || (has(self.customization.nameSuffix) + && size(self.customization.nameSuffix) > 0))) || (has(self.nameReplacement) + && size(self.nameReplacement) > 0) force: description: |- Force execution of an operation. diff --git a/docs/ADMIN_GUIDE.md b/docs/ADMIN_GUIDE.md index f9ba9cd84f..9f7b55a4d3 100644 --- a/docs/ADMIN_GUIDE.md +++ b/docs/ADMIN_GUIDE.md @@ -465,7 +465,7 @@ It is not recommended to set the annotation on the `generic` class, since the an Example output of the class list without a default class: ```console -$ d8 k get vmclass +$ d8 k get vmclass NAME PHASE ISDEFAULT AGE generic Ready 1d @@ -482,7 +482,7 @@ virtualmachineclass.virtualization.deckhouse.io/host-passthrough-custom annotate After assigning the default class, the output will be: ```console -$ d8 k get vmclass +$ d8 k get vmclass NAME PHASE ISDEFAULT AGE generic Ready 1d @@ -854,15 +854,6 @@ spec: ## Reliability mechanisms -### VM Rebalancing - -The platform provides the ability to automate the management of already running virtual machines in the cluster. To activate this feature, you need to enable the `descheduler` module. - -When you enable the module, it automatically monitors the optimal operation of virtual machines in the cluster. The main features it provides are: - -- Load balancing: The system analyses CPU reservation on cluster nodes. When CPU reservations exceed 80% on a node, the system automatically transfers part of the VMs to less loaded nodes. This prevents overload and ensures stable VM operation. -- Appropriate placement: The system checks whether the current node meets the requirements of each VM and whether the placement rules are followed in relation to the node or other VMs in the cluster. For example, if a VM should not be on the same node as another VM, the module transfers it to a more suitable node. - ### Migration and maintenance mode Virtual machine migration is an important feature in virtualized infrastructure management. It allows you to move running virtual machines from one physical node to another without shutting them down. Virtual machine migration is required for a number of tasks and scenarios: @@ -871,6 +862,14 @@ Virtual machine migration is an important feature in virtualized infrastructure - Node maintenance: Virtual machines can be moved from nodes that need to be taken out of service to perform routine maintenance or software upgrade. - Upgrading a virtual machine firmware: The migration allows you to upgrade the firmware of virtual machines without interrupting their operation. +{{< alert level="warning" >}} +Live migration has the following limitations: + +- Only one virtual machine can migrate from each node simultaneously. +- The total number of concurrent migrations in the cluster cannot exceed the number of nodes where running virtual machines is permitted. +- The bandwidth for a single migration is limited to 5 Gbps. +{{< /alert >}} + #### Start migration of an arbitrary machine The following is an example of migrating a selected virtual machine. @@ -966,6 +965,17 @@ How to perform the operation in the web interface: - Select the desired node from the list and click the "Cordon + Drain" button. - To remove it from maintenance mode, click the "Uncordon" button. +### VM Rebalancing + +The platform allows you to automatically manage the placement of running virtual machines in the cluster. To enable this feature, activate the `descheduler` module. + +Live migration of virtual machines between cluster nodes is used for rebalancing. + +After the module is enabled, the system automatically monitors the distribution of virtual machines and maintains optimal node utilization. The main features of the module are: + +- Load balancing: The system monitors CPU reservation on each node. If more than 80% of CPU resources are reserved on a node, some virtual machines will be automatically migrated to less-loaded nodes. This helps avoid overloads and ensures stable VM operation. +- Correct placement: The system checks whether the current node meets the mandatory requirements of the virtual machine's requests, as well as rules regarding their relative placement. For example, if rules prohibit placing certain VMs on the same node, the module will automatically move them to a suitable server. + ### ColdStandby ColdStandby provides a mechanism to recover a virtual machine from a failure on a node it was running on. diff --git a/docs/ADMIN_GUIDE.ru.md b/docs/ADMIN_GUIDE.ru.md index b7adfe5ab1..c75dd09ffa 100644 --- a/docs/ADMIN_GUIDE.ru.md +++ b/docs/ADMIN_GUIDE.ru.md @@ -473,7 +473,7 @@ VirtualMachineClass по умолчанию задаётся с помощью Пример вывода списка классов без класса по умолчанию: ```console -$ d8 k get vmclass +$ d8 k get vmclass NAME PHASE ISDEFAULT AGE generic Ready 1d @@ -490,7 +490,7 @@ virtualmachineclass.virtualization.deckhouse.io/host-passthrough-custom annotate После назначения класса по умолчанию вывод будет таким: ```console -$ d8 k get vmclass +$ d8 k get vmclass NAME PHASE ISDEFAULT AGE generic Ready 1d @@ -866,15 +866,6 @@ spec: ## Механизмы обеспечения надежности -### Перебалансировка ВМ - -Платформа предоставляет возможность автоматизировать управление размещением уже запущенных виртуальных машин в кластере. Для активации этой функции необходимо включить модуль `descheduler`. - -После включения модуля система самостоятельно следит за оптимальной работой виртуальных машин в кластере. Основные возможности модуля: - -- Балансировка нагрузки — система анализирует резервирование процессора на узлах кластера. Если на узле зарезервировано более 80% процессора, система автоматически переносит часть ВМ на менее загруженные узлы. Это предотвращает перегрузку и обеспечивает стабильную работу ВМ. -- Подходящее размещение — система проверяет, соответствует ли текущий узел требованиям каждой ВМ, соблюдены ли правила размещения по отношению к узлу или другим ВМ кластера. Например, если ВМ не должна находиться на одном узле с другой ВМ, модуль переносит её на более подходящий узел. - ### Миграция и режим обслуживания Миграция виртуальных машин является важной функцией в управлении виртуализированной инфраструктурой. Она позволяет перемещать работающие виртуальные машины с одного физического узла на другой без их отключения. Миграция виртуальных машин необходима для ряда задач и сценариев: @@ -883,6 +874,14 @@ spec: - Перевод узла в режим обслуживания — виртуальные машины могут быть перемещены с узлов, которые нужно вывести из эксплуатации для выполнения планового обслуживания или обновления программного обеспечения. - Обновление «прошивки» виртуальных машин — миграция позволяет обновить «прошивку» виртуальных машин, не прерывая их работу. +{{< alert level="warning" >}} +При живой миграции действуют следующие ограничения: + +- С каждого узла одновременно может мигрировать только одна виртуальная машина. +- Одновременно в кластере может выполняться количество миграций, не превышающее число узлов, на которых разрешён запуск виртуальных машин. +- Пропускная способность для одной миграции ограничена 5 Гбит/с. +{{< /alert >}} + #### Запуск миграции произвольной машины Далее будет рассмотрен пример миграции выбранной виртуальной машины. @@ -978,6 +977,17 @@ d8 k uncordon - Из списка выберите нужный узел и нажмите кнопку «Сделать Cordon + Drain». - Чтобы вывести его из режима обслуживания, нажмите кнопку «Uncordon». +### Перебалансировка ВМ + +Платформа позволяет автоматически управлять размещением работающих виртуальных машин в кластере. Чтобы включить эту функцию, активируйте модуль `descheduler`. + +Для перебалансировки используется механизм живой миграции виртуальных машин между узлами кластера. + +После активации модуля система самостоятельно следит за распределением виртуальных машин и поддерживает оптимальную загрузку узлов. Основные возможности модуля: + +- Балансировка нагрузки — система отслеживает, сколько процессора зарезервировано на каждом узле. Если на каком-либо узле резервируется более 80% процессорных ресурсов, часть виртуальных машин будет автоматически перенесена на менее загруженные узлы. Это помогает избежать перегрузки и обеспечивает стабильную работу ВМ. +- Корректное размещение — система контролирует, соответствует ли текущий узел обязательным требованиям запросов виртуальной машины, а также правилам по их относительному расположению. Например, если правила не допускают размещения определённых ВМ на одном узле, модуль автоматически перенесёт их на подходящий сервер. + ### ColdStandby ColdStandby обеспечивает механизм восстановления работы виртуальной машины после сбоя на узле, на котором она была запущена. diff --git a/docs/RELEASE_NOTES.md b/docs/RELEASE_NOTES.md index dfdcbbbf01..2498c73e59 100644 --- a/docs/RELEASE_NOTES.md +++ b/docs/RELEASE_NOTES.md @@ -2,25 +2,72 @@ title: "Release Notes" weight: 70 --- -# v1.0.0 + +# v1.1.1 + +## Fixes + +[core] Fixed an issue in the containerd v2 where storage providing a PVC with the FileSystem type was incorrectly attached via `VirtualMachineBlockDeviceAttachment`. +- [core] Added error reporting in the status of disks and images when the data source (URL) is unavailable. +- [vi] When creating virtual images from virtual disk snapshots, the `spec.persistentVolumeClaim.storageClassName` parameter is now respected. Previously, it could be ignored. +- [vm] Fixed the `NetworkReady` condition output: it no longer shows the `Unknown` state and appears only when needed. +- [vm] Prohibit duplicate networks in the virtual machine `.spec.network` specification. +- [vmip] Added validation for static IP addresses to avoid creating a `VirtualMachineIPAddress` resource with an IP already in use in the cluster. +- [vmbda] Fixed a bug where, when detaching a virtual image through `VirtualMachineBlockDeviceAttachment`, the resource could get stuck in the Terminating state. + +## Other + +- [observability] Added Prometheus metrics for virtual machine snapshots (`d8_virtualization_virtualmachinesnapshot_info`) and virtual disk snapshots (`d8_virtualization_virtualdisksnapshot_info`), showing which objects they are associated with. + +## Security + +- [module] Fixed vulnerabilities CVE-2025-58058 and CVE-2025-54410. + +# v1.1.0 ## New features -* [vm] Added protection to prevent a cloud image (`VirtualImage` \ `ClusterVirtualImage`) from being connected as the first disk. Previously, this caused the VM to fail to start with the "No bootable device" error. -* [vmop] Added `Restore` operation to restore a VM from a previously created snapshot. +- [vm] Added the ability to migrate VMs using disks on local storage. Restrictions: + - The feature is not available in the CE edition. + - Migration is only possible for running VMs (`phase: Running`). + - Migration of VMs with local disks connected via `VirtualMachineBlockDeviceAttachment` (hotplug) is not supported yet. +- [vd] Added the ability to migrate storage for VM disks (change `StorageClass`). Restrictions: + - The feature is not available in the CE edition. + - Migration is only possible for running VMs (`phase: Running`). + - Storage migration for disks connected via `VirtualMachineBlockDeviceAttachment` (hotplug) is not supported yet. +- [vmop] Added an operation with the `Clone` type to create a clone of a VM from an existing VM (`VirtualMachineOperation` `.spec.type: Clone`). +- [observability] Added the `KubeNodeAwaitingVirtualMachinesEvictionBeforeShutdown` alert, which is triggered when the node hosting the virtual machines is about to shut down but VM evacuation is not yet complete. +- [observability] Added the `D8VirtualizationDVCRInsufficientCapacityRisk` alert, which warns of the risk of insufficient free space in the virtual machine image storage (DVCR). ## Fixes -* [vmsnapshot] When restoring a virtual machine from a snapshot, all annotations and labels that were present on the resources at the time of the snapshot are now restored correctly. -* [module] Fixed an issue with queue blocking when the `settings.modules.publicClusterDomain` parameter was empty in the global ModuleConfig resource. -* [module] Optimized hook performance during module installation. -* [vmclass] Fixed core/coreFraction validation in the `VirtualMachineClass` resource. -* [module] When the SDN module is disabled, the configuration of additional networks in the VM is not available. +- [vmclass] Fixed an issue in `VirtualMachineClass` types Features and Discovery that caused nested virtualization not to work on nodes with AMD processors. +- [vmop/restore] Fixed a bug where the controller sometimes started a restored VM before its disks were fully restored, resulting in the VM starting with old (unrestored) disks. +- [vmsnapshot] Fixed behavior when creating a VM snapshot with uncommitted changes: the snapshot now instantly captures the current state of the virtual machine, including all current changes. +- [module] Fixed an issue with installing the module on RedOS 8.X OS. +- [module] Improved validation to prevent adding empty values for parameters that define storage classes for disks and images. +- [vmop] Fixed garbage collector behavior: previously, all VMOP objects were deleted after restarting the virtualization controller, ignoring cleanup rules. +- [observability] The virtual machine dashboard now displays statistics for all networks (including additional ones) connected to the VM. +- [observability] Fixed the graph on the virtual machine dashboard that displays memory copy statistics during VM migration. + +# v1.0.0 + +## New features + +- [vm] Added protection to prevent a cloud image (`VirtualImage` \ `ClusterVirtualImage`) from being connected as the first disk. Previously, this caused the VM to fail to start with the "No bootable device" error. +- [vmop] Added `Restore` operation to restore a VM from a previously created snapshot. +## Fixes + +- [vmsnapshot] When restoring a virtual machine from a snapshot, all annotations and labels that were present on the resources at the time of the snapshot are now restored correctly. +- [module] Fixed an issue with queue blocking when the `settings.modules.publicClusterDomain` parameter was empty in the global ModuleConfig resource. +- [module] Optimized hook performance during module installation. +- [vmclass] Fixed core/coreFraction validation in the `VirtualMachineClass` resource. +- [module] When the SDN module is disabled, the configuration of additional networks in the VM is not available. ## Security -* Fixed CVE-2025-47907 +- Fixed CVE-2025-47907 # v0.25.0 @@ -36,8 +83,7 @@ After upgrading CRI from containerd v1 to containerd v2, it is necessary to recr - [vm] Added the ability to attach additional network interfaces to a virtual machine for networks provided by the `SDN` module. For this, the `SDN` module must be enabled in the cluster. - [vmclass] An annotation has been added to set the default `VirtualMachineClass`. You can designate a `VirtualMachineClass` as the default by adding the annotation `virtualmachineclass.virtualization.deckhouse.io/is-default-class=true`. -This allows creating VMs with an empty `spec.virtualMachineClassName` field, which will be automatically filled with the default class. - + This allows creating VMs with an empty `spec.virtualMachineClassName` field, which will be automatically filled with the default class. ## Fixes @@ -48,7 +94,7 @@ This allows creating VMs with an empty `spec.virtualMachineClassName` field, whi ## Other -- [vm] Improved the garbage collector (GC) for completed virtual machine operations: +- [vmop] Improved the garbage collector (GC) for completed virtual machine operations: - Runs daily at 00:00. - Removes successfully completed operations (`Completed` / `Failed`) after their TTL (24 hours) expires. - Retains only the last 10 completed operations. diff --git a/docs/RELEASE_NOTES.ru.md b/docs/RELEASE_NOTES.ru.md index c9fa0d2af9..b5226759a0 100644 --- a/docs/RELEASE_NOTES.ru.md +++ b/docs/RELEASE_NOTES.ru.md @@ -3,24 +3,74 @@ title: "Релизы" weight: 70 --- +# v1.1.1 + +## Fixes + +- [core] Исправлена проблема в containerd v2, из-за которой хранилище, предоставляющее PVC с типом `FileSystem`, некорректно подключалось через `VirtualMachineBlockDeviceAttachment`. +- [core] Добавлено отображение ошибок в статусе дисков и образов при недоступности источника данных (URL). +- [vi] Теперь при создании виртуальных образов из снимков виртуальных дисков учитывается параметр `spec.persistentVolumeClaim.storageClassName`. Ранее он мог игнорироваться. +- [vm] Исправлен вывод кондишна `NetworkReady`: он больше не отображается в состоянии `Unknown` и показывается только при необходимости. +- [vm] Добавлена валидация, предотвращающая указание одной и той же сети в спецификации виртуальной машины `.spec.network` более одного раза. +- [vmip] Добавлена валидация для статических IP-адресов, предотвращающая создание ресурсов `VirtualMachineIPAddress` с уже используемым в кластере адресом. +- [vmbda] Исправлена ошибка, из-за которой при отключении виртуального образа через `VirtualMachineBlockDeviceAttachment` ресурс мог зависать в состоянии `Terminating`. + +## Other + +- [observability] Добавлены метрики Prometheus для снимков виртуальных машин (`d8_virtualization_virtualmachinesnapshot_info`) и дисков (`d8_virtualization_virtualdisksnapshot_info`), показывающие, к каким объектам они относятся. + +## Security + +- [module] Исправлены уязвимости CVE-2025-58058 и CVE-2025-54410. + +# v1.1.0 + +## Новые возможности + +- [vm] Добавлена возможность миграции ВМ, использующей диски на локальных хранилищах. Ограничения: + - Функция недоступна в CE-редакции. + - Миграция возможна только для запущенной ВМ (`phase: Running`) + - Миграция ВМ с локальными дисками, подключенными через `VirtualMachineBlockDeviceAttachment` (hotplug), пока недоступна. +- [vd] Добавлена возможность миграции хранилища для дисков ВМ (изменение `StorageClass`). Ограничения: + - Функция недоступна в CE-редакции. + - Миграция возможна только для запущенной ВМ (`phase: Running`) + - Миграция хранилища для дисков, подключенных через `VirtualMachineBlockDeviceAttachment` (hotplug), пока недоступна. +- [vmop] Добавлена операция с типом `Clone` для создания клона ВМ из существующей ВМ (`VirtualMachineOperation` `.spec.type: Clone`). +- [observability] Добавлен алерт `KubeNodeAwaitingVirtualMachinesEvictionBeforeShutdown`, срабатывающий при получении узлом, на котором размещены виртуальные машины, команды на завершение работы — до завершения эвакуации ВМ. +- [observability] Добавлен алерт `D8VirtualizationDVCRInsufficientCapacityRisk`, предупреждающий о риске нехватки свободного места в хранилище образов виртуальных машин (DVCR). + +## Исправления + +- [vmclass] Исправлена ошибка в `VirtualMachineClass` типах `Features` и `Discovery`, из-за которой на узлах с процессорами AMD не работала вложенная виртуализация. +- [vmop/restore] Исправлена ошибка, при которой контроллер иногда запускал восстановленную ВМ до завершения восстановления её дисков, в результате чего ВМ стартовала со старыми (не восстановленными) дисками. +- [vmsnapshot] Исправлено поведение при создании снимка ВМ при наличии неприменённых изменений: снимок теперь мгновенно фиксирует актуальное состояние виртуальной машины, включая все текущие изменения. +- [module] Исправлена проблема установки модуля на RedOS 8.X ОС. +- [module] Улучшена валидация, предотвращающая добавление пустых значений для параметров, определяющих классы хранения для дисков и образов. +- [vmop] Исправлена работа сборщика мусора: ранее при перезапуске virtualization-controller все объекты VMOP удалялись без учёта правил очистки. +- [observability] Дашборд виртуальной машины теперь отображает статистику по всем сетям (в том числе и дополнительным), подключённым к ВМ. +- [observability] На дашборде виртуальной машины исправлен график, отображающий статистику копирования памяти во время миграции ВМ. + + +## Прочее + # v1.0.0 ## Новые возможности -* [vm] Добавлена защита от подключения cloud-образа (`VirtualImage` \ `ClusterVirtualImage`) в качестве первого диска. Ранее это приводило к невозможности запуска ВМ с ошибкой "No bootable device". -* [vmop] Добавлена операция с типом `Restore` для восстановления ВМ из ранее созданного снимка. +- [vm] Добавлена защита от подключения cloud-образа (`VirtualImage` \ `ClusterVirtualImage`) в качестве первого диска. Ранее это приводило к невозможности запуска ВМ с ошибкой "No bootable device". +- [vmop] Добавлена операция с типом `Restore` для восстановления ВМ из ранее созданного снимка. ## Исправления -* [vmsnapshot] Теперь при восстановлении виртуальной машины из снимка корректно восстанавливаются все аннотации и лейблы, которые были у ресурсов в момент снимка. -* [module] Исправлена проблема с блокировкой очереди, когда параметр `settings.modules.publicClusterDomain` был пустым в глобальном ресурсе ModuleConfig. -* [module] Оптимизирована производительность хука во время установки модуля. -* [vmclass] Исправлена валидация core/coreFraction в ресурсе VirtualMachineClass. -* [module] При выключенном модуле SDN конфигурация дополнительных сетей в ВМ недоступна. +- [vmsnapshot] Теперь при восстановлении виртуальной машины из снимка корректно восстанавливаются все аннотации и лейблы, которые были у ресурсов в момент снимка. +- [module] Исправлена проблема с блокировкой очереди, когда параметр `settings.modules.publicClusterDomain` был пустым в глобальном ресурсе ModuleConfig. +- [module] Оптимизирована производительность хука во время установки модуля. +- [vmclass] Исправлена валидация core/coreFraction в ресурсе VirtualMachineClass. +- [module] При выключенном модуле SDN конфигурация дополнительных сетей в ВМ недоступна. ## Безопасность -* Устранено CVE-2025-47907 +- Устранено CVE-2025-47907 # v0.25.0 @@ -31,21 +81,21 @@ weight: 70 ## Новые возможности -* [vm] Добавлена возможность подключения к виртуальной машине дополнительных сетевых интерфейсов к сетям, предоставляемым модулем `SDN`. Для этого модуль `SDN` должен быть включен в кластере. -* [vmmac] Для дополнительных сетевых интерфейсов добавлено управление MAC-адресами с использованием ресурсов `VirtualMachineMACAddress` и `VirtualMachineMACAddressLease`. -* [vmclass] Добавлена аннотация для установки класса виртуальной машины по умолчанию. Чтобы назначить `VirtualMachineClass` по умолчанию, необходимо добавить на него аннотацию `virtualmachineclass.virtualization.deckhouse.io/is-default-class=true`. Это позволяет создавать ВМ с пустым полем `spec.virtualMachineClassName`, автоматически заполняя его классом по умолчанию. -* [observability] Добавлены новые метрики Prometheus для отслеживания фазы ресурсов, таких как `VirtualMachineSnapshot`, `VirtualDiskSnapshot`, `VirtualImage` и `ClusterVirtualImage`. +- [vm] Добавлена возможность подключения к виртуальной машине дополнительных сетевых интерфейсов к сетям, предоставляемым модулем `SDN`. Для этого модуль `SDN` должен быть включен в кластере. +- [vmmac] Для дополнительных сетевых интерфейсов добавлено управление MAC-адресами с использованием ресурсов `VirtualMachineMACAddress` и `VirtualMachineMACAddressLease`. +- [vmclass] Добавлена аннотация для установки класса виртуальной машины по умолчанию. Чтобы назначить `VirtualMachineClass` по умолчанию, необходимо добавить на него аннотацию `virtualmachineclass.virtualization.deckhouse.io/is-default-class=true`. Это позволяет создавать ВМ с пустым полем `spec.virtualMachineClassName`, автоматически заполняя его классом по умолчанию. +- [observability] Добавлены новые метрики Prometheus для отслеживания фазы ресурсов, таких как `VirtualMachineSnapshot`, `VirtualDiskSnapshot`, `VirtualImage` и `ClusterVirtualImage`. ## Исправления -* [vm] Исправили проблему: при изменении типа операционной системы машина уходила в циклическую перезагрузку. -* [vm] Исправили зависание виртуальной машины в фазе Starting при нехватке квот проекта. Сообщение о нехватке квот будет отображаться в статусе виртуальной машины. Чтобы машина продолжила запуск, необходимо будет увеличить квоты проекта. -* [vi] Для создания виртуального образа на `PersistentVolumeClaim` должно быть использовано хранилище в режиме `RWX` и `Block`, в противном случае будет отображено предупреждение об ошибке. -* [module] Добавили валидацию, проверяющую, что подсети виртуальных машин не пересекаются с системными подсетями (`podSubnetCIDR` и `serviceSubnetCIDR`). +- [vm] Исправили проблему: при изменении типа операционной системы машина уходила в циклическую перезагрузку. +- [vm] Исправили зависание виртуальной машины в фазе Starting при нехватке квот проекта. Сообщение о нехватке квот будет отображаться в статусе виртуальной машины. Чтобы машина продолжила запуск, необходимо будет увеличить квоты проекта. +- [vi] Для создания виртуального образа на `PersistentVolumeClaim` должно быть использовано хранилище в режиме `RWX` и `Block`, в противном случае будет отображено предупреждение об ошибке. +- [module] Добавили валидацию, проверяющую, что подсети виртуальных машин не пересекаются с системными подсетями (`podSubnetCIDR` и `serviceSubnetCIDR`). ## Прочее -- [vm] Улучшили сборщик мусора (GC) для отработавших операций виртуальной машины: +- [vmop] Улучшили сборщик мусора (GC) для отработавших операций виртуальной машины: - GC запускается каждый день в 00:00; - GC будет удалять успешно завершённые операции (`Completed` \ `Failed`), если истёк их TTL (24 часа); - GC подчищает все завершённые операции (`Completed` \ `Failed`), оставляя только 10 последних. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 9237ccf099..393cd0e473 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -223,7 +223,11 @@ Example of creating a virtual machine with Ubuntu 22.04. ## Images -The `VirtualImage` resource is designed to load virtual machine images and then use them to create virtual machine disks. This resource is available only in the namespace or project in which it was created. +The `VirtualImage` resource is designed for uploading virtual machine images and subsequently using them to create virtual machine disks. + +{{< alert level="warning">}} +Please note that `VirtualImage` is a project resource, which means it is only available within the project or namespace where it was created. To use images at the cluster level, a separate resource is provided — [`ClusterVirtualImage`](./ADMIN_GUIDE.md#images). +{{< /alert >}} When connected to a virtual machine, the image is accessed in read-only mode. @@ -607,18 +611,18 @@ Depending on the storage properties, the behavior of disks during creation of vi VolumeBindingMode property: -`Immediate` - The disk is created immediately after the resource is created (the disk is assumed to be available for connection to a virtual machine on any node in the cluster). +`Immediate`: The disk is created immediately after the resource is created (the disk is assumed to be available for connection to a virtual machine on any node in the cluster). ![vd-immediate](images/vd-immediate.png) -`WaitForFirstConsumer` - The disk is created only after it is connected to the virtual machine and is created on the node on which the virtual machine will be running. +`WaitForFirstConsumer`: The disk is created only after it is connected to the virtual machine and is created on the node on which the virtual machine will be running. ![vd-wffc](images/vd-wffc.png) AccessMode: -- `ReadWriteOnce (RWO)` - only one instance of the virtual machine is granted access to the disk. -- `ReadWriteMany (RWX)` - multiple disk access. Live migration of virtual machines with such disks is possible. +- `ReadWriteMany (RWX)`: Multiple disk access. Live migration of virtual machines with such disks is possible. +- `ReadWriteOnce (RWO)`: Only one instance of the virtual machine can access the disk. Live migration of virtual machines with such disks is supported only in DVP commercial editions. Live migration is only available if all disks are connected statically via (`.spec.blockDeviceRefs`). Disks connected dynamically via `VirtualMachineBlockDeviceAttachments` must be reattached statically by specifying them in `.spec.blockDeviceRefs`. When creating a disk, the controller will independently determine the most optimal parameters supported by the storage. @@ -649,7 +653,7 @@ How to find out the available storage options in the DVP web interface: - Go to the "System" tab, then to the "Storage" section → "Storage Classes". -## Create an empty disk +### Create an empty disk Empty disks are usually used to install an OS on them, or to store some data. @@ -862,6 +866,35 @@ Method #2: - Click on the "Save" button that appears. - The disk status is displayed at the top left, under its name. +### Changing the disk StorageClass + +In the DVP commercial editions, it is possible to change the StorageClass for existing disks. Currently, this is only supported for running VMs (`Phase` should be `Running`). + +{{< alert level="warning">}} +Storage class migration is only available for disks connected statically via `.spec.blockDeviceRefs`. + +To migrate the storage class of disks attached via `VirtualMachineBlockDeviceAttachments`, they must be reattached statically by specifying disks names in `.spec.blockDeviceRefs`. +{{< /alert >}} + +Example: + +```bash +d8 k patch vd disk --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +``` + +After the disk configuration is updated, a live migration of the VM is triggered, during which the disk is migrated to the new storage. + +If a VM has multiple disks attached and you need to change the storage class for several of them, this operation must be performed sequentially: + +```bash +d8 k patch vd disk1 --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +d8 k patch vd disk2 --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +``` + +If migration fails, repeated attempts are made with increasing delays (exponential backoff algorithm). The maximum delay is 300 seconds (5 minutes). Delays: 5 seconds (1st attempt), 10 seconds (2nd), then each delay doubles, reaching 300 seconds (7th and subsequent attempts). The first attempt is performed without delay. + +To cancel migration, the user must return the storage class in the specification to the original one. + ## Virtual machines The `VirtualMachine` resource is used to create a virtual machine, its parameters allow you to configure: @@ -1277,6 +1310,54 @@ Starting the agent service: sudo systemctl enable --now qemu-guest-agent ``` +You can automate the installation of the agent for Linux OS using a cloud-init initialization script. Below is an example snippet of such a script to install qemu-guest-agent: + +```yaml + #cloud-config + package_update: true + packages: + - qemu-guest-agent + run_cmd: + - systemctl enable --now qemu-guest-agent.service +``` + +### User Configuration for Cloud Images + +When using cloud images (with cloud-init support), you must specify an SSH key or a password for the pre-installed user, or create a new user with a password or SSH key via cloud-init. Otherwise, it will be impossible to log in to the virtual machine! + +Examples: + +1. Setting a password for an existing user (for example, `ubuntu` is often present in official cloud images): + + In many cloud images, the default user is already predefined (e.g., `ubuntu` in Ubuntu Cloud Images), and its name cannot always be overridden via the `cloud-init` `users` block. In such cases, it is recommended to use dedicated cloud-init parameters for managing the default user. + + In a cloud image, you can add a public SSH key for the default user using the `ssh_authorized_keys` parameter at the root level of cloud-init: + + ```yaml + #cloud-config + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD... your-public-key ... + ``` + +2. Creating a new user with a password and SSH key: + + ```yaml + #cloud-config + users: + - name: cloud + passwd: "$6$rounds=4096$QktreHgVzeZy70h3$C8c4gjzYMY75.C7IjN1.GgrjMSdeyG79W.hZgsTNnlrJIzuB48qzCui8KP1par.OvCEV3Xi8FzRiqqZ74LOK6." + lock_passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh-authorized-keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD... your-public-key ... + ssh_pwauth: True + ``` + +{{< alert level="info" >}} +The value of the `passwd` field is a hashed password (for example, you can generate it using `mkpasswd --method=SHA-512 --rounds=4096`). +{{< /alert >}} + ### Connecting to a virtual machine The following methods are available for connecting to the virtual machine: @@ -1329,7 +1410,7 @@ The virtual machine startup policy is intended for automated virtual machine sta - `AlwaysOnUnlessStoppedManually` - (default) after creation, the VM is always in a running state. In case of failures the VM operation is restored automatically. It is possible to stop the VM only by calling the `d8 v stop` command or creating a corresponding operation. - `AlwaysOn` - after creation the VM is always in a running state, even in case of its shutdown by OS means. In case of failures the VM operation is restored automatically. -- `Manual` - after creation, the state of the VM is controlled manually by the user using commands or operations. +- `Manual` - after creation, the state of the VM is controlled manually by the user using commands or operations. The VM is powered off immediately after creation. To power it on, the `d8 v start` command must be executed. - `AlwaysOff` - after creation the VM is always in the off state. There is no possibility to turn on the VM through commands/operations. How to select a VM startup policy in the web interface: @@ -1777,9 +1858,9 @@ Block devices and their features are shown in the table below: | `ClusterVirtualImage` | connected in read-only mode, or as a cdrom for iso images | | `VirtualDisk` | connects in read/write mode | -#### Static block devices +#### Boot Block Devices -Static block devices are defined in the virtual machine specification in the `.spec.blockDeviceRefs` block as a list. The order of the devices in this list determines the sequence in which they are loaded. Thus, if a disk or image is specified first, the loader will first try to boot from it. If it fails, the system will go to the next device in the list and try to boot from it. And so on until the first boot loader is detected. +Boot block devices are defined in the virtual machine specification in the `.spec.blockDeviceRefs` block as a list. The order of the devices in this list determines the sequence in which they are loaded. Thus, if a disk or image is specified first, the loader will first try to boot from it. If it fails, the system will go to the next device in the list and try to boot from it. And so on until the first boot loader is detected. Changing the composition and order of devices in the `.spec.blockDeviceRefs` block is possible only with a reboot of the virtual machine. @@ -1794,19 +1875,19 @@ spec: name: ``` -How to work with static block devices in the web interface: +How to work with bootable block devices in the web interface: - Go to the "Projects" tab and select the desired project. - Go to the "Virtualization" → "Virtual Machines" section. - Select the required VM from the list and click on its name. - On the "Configuration" tab, scroll down to the "Disks and Images" section. -- You can add, extract, delete, resize, and reorder static block devices in the "Boot Disks" section. +- You can add, extract, delete, resize, and reorder bootable block devices in the "Boot Disks" section. -#### Dynamic Block Devices +#### Additional Block Devices -Dynamic block devices can be connected and disconnected from a virtual machine that is in a running state without having to reboot it. +Additional block devices can be connected and disconnected from a virtual machine that is in a running state without having to reboot it. -The `VirtualMachineBlockDeviceAttachment` (`vmbda`) resource is used to connect dynamic block devices. +The `VirtualMachineBlockDeviceAttachment` (`vmbda`) resource is used to connect additional block devices. As an example, create the following share that connects an empty blank-disk disk to a linux-vm virtual machine: @@ -1885,13 +1966,13 @@ spec: EOF ``` -How to work with dynamic block devices in the web interface: +How to work with additional block devices in the web interface: - Go to the "Projects" tab and select the desired project. - Go to the "Virtualization" → "Virtual Machines" section. - Select the required VM from the list and click on its name. - On the "Configuration" tab, scroll down to the "Disks and Images" section. -- You can add, extract, delete, and resize dynamic block devices in the "Additional Disks" section. +- You can add, extract, delete, and resize additional block devices in the "Additional Disks" section. ### Organizing interaction with virtual machines @@ -2141,8 +2222,19 @@ The live migration process involves several steps: ![](./images/migration.png) +{{< alert level="warning" >}} +For successful live migration, all disks attached to the VM must be accessible on the target nodes to which the migration is planned. + +If a disk uses storage with local disks, such storage must be available to create a new local volume on the target node. + +Otherwise, migration will not be possible. +{{< /alert >}} + + {{< alert level="warning">}} Network speed plays an important role. If bandwidth is low, there are more iterations and VM downtime can increase. In the worst case, the migration may not complete at all. + +To manage the migration process, configure the live migration policy using [`.spec.liveMigrationPolicy`](#configuring-migration-policy) in the VM settings. {{< /alert >}} #### AutoConverge mechanism @@ -2187,12 +2279,13 @@ The trigger for live migration is the appearance of the `VirtualMachineOperation The table shows the `VirtualMachineOperations` resource name prefixes with the `Evict` type that are created for live migrations caused by system events: -| Type of system event | Resource name prefix | -|----------------------------------|------------------------| -| Firmware-update-* | firmware-update-* | -| Load shifting | evacuation-* | -| Drain node | evacuation-* | -| Modify placement parameters | nodeplacement-update-* | +| Type of system event | Resource name prefix | +|---------------------------------|------------------------| +| Firmware update | firmware-update-* | +| Load shifting | evacuation-* | +| Drain node | evacuation-* | +| Modify placement parameters | nodeplacement-update-* | +| Disk storage migration | volume-migration-* | This resource can be in the following states: @@ -2463,26 +2556,12 @@ spec: name: user-net # Network name ``` -It is allowed to connect a VM to the same network multiple times. Example: - -```yaml -spec: - networks: - - type: Main # Must always be specified first - - type: Network - name: user-net # Network name - - type: Network - name: user-net # Network name -``` - Example of connecting to the cluster network `corp-net`: ```yaml spec: networks: - type: Main # Must always be specified first - - type: Network - name: user-net - type: Network name: user-net - type: ClusterNetwork @@ -2498,12 +2577,9 @@ status: - type: Network name: user-net macAddress: aa:bb:cc:dd:ee:01 - - type: Network - name: user-net - macAddress: aa:bb:cc:dd:ee:02 - type: ClusterNetwork name: corp-net - macAddress: aa:bb:cc:dd:ee:03 + macAddress: aa:bb:cc:dd:ee:02 ``` For each additional network interface, a unique MAC address is automatically generated and reserved to avoid collisions. The following resources are used for this: `VirtualMachineMACAddress` (`vmmac`) and `VirtualMachineMACAddressLease` (`vmmacl`). @@ -2696,9 +2772,12 @@ There is a risk of data loss or integrity violation when restoring from such a s Creating a virtual machine snapshot will fail if at least one of the following conditions is met: - not all dependencies of the virtual machine are ready; -- there are changes pending restart of the virtual machine; - there is a disk in the process of resizing among the dependent devices. +{{< alert level="warning" >}} +If there are pending VM changes awaiting a restart when the snapshot is created, the snapshot will include the updated VM configuration. +{{< /alert >}} + When a snapshot is created, the dynamic IP address of the VM is automatically converted to a static IP address and saved for recovery. If you do not want to convert and use the old IP address of the virtual machine, you can set the corresponding policy to `Never`. In this case, the address type without conversion (`Auto` or `Static`) will be used. @@ -2802,6 +2881,68 @@ d8 k get vmop -o json | jq “.status.resources” It is not recommended to cancel the restore operation (delete the `VirtualMachineOperation` resource in the `InProgress` phase) from a snapshot, which can result in an inconsistent state of the restored virtual machine. {{< /alert >}} + +## Creating a VM clone + +VM cloning is performed using the `VirtualMachineOperation` resource with the `clone` operation type. + +{{< alert level="warning">}} +Before cloning, the source VM must be [powered off](#vm-start-and-state-management-policy). +It is recommended to set the `.spec.runPolicy: AlwaysOff` parameter in the configuration of the VM being cloned if you want to prevent the VM clone from starting automatically. This is because the clone inherits the behaviour of the parent VM. +{{< /alert >}} + +```yaml +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineOperation +metadata: + name: +spec: + type: Clone + virtualMachineName: + clone: + mode: DryRun | Strict | BestEffort + nameReplacements: [] + customization: {} +``` + +{{< alert level="warning">}} +The cloned VM will be assigned a new IP address for the cluster network and MAC addresses for additional network interfaces (if any), so you will need to reconfigure the network settings of the guest OS after cloning. +{{< /alert >}} + +Cloning creates a copy of an existing VM, so the resources of the new VM must have unique names. To do this, use the `.spec.clone.nameReplacements` and/or `.spec.clone.customisation` parameters. + +- `.spec.clone.nameReplacements`: Allows you to replace the names of existing resources with new ones to avoid conflicts. +- `.spec.clone.customization`: Sets a prefix or suffix for the names of all cloned VM resources (disks, IP addresses, etc.). + +Configuration example: + +```yaml +spec: + clone: + nameReplacements: + - from: + kind: + name: + - to: + name: + customization: + namePrefix: + nameSuffix: +``` + +As a result, a VM named will be created. + +One of three modes can be used for the cloning operation: +- `DryRun`: A test run to check for possible conflicts. The results are displayed in the `status.resources` field of the VirtualMachineOperation resource. +- `Strict`: Strict mode, requiring all resources with new names and their dependencies (e.g., images) to be present in the cloned VM. +- `BestEffort`: Mode in which missing external dependencies (e.g., ClusterVirtualImage, VirtualImage) are automatically removed from the configuration of the cloned VM. + +Information about conflicts that arose during cloning can be viewed in the resource status: + +```bash +d8 k get vmop -o json | jq '.status.resources' +``` + ## Data export DVP allows you to export virtual machine disks and disk images using the `d8` utility (version 1.17 and above). diff --git a/docs/USER_GUIDE.ru.md b/docs/USER_GUIDE.ru.md index 263bdf5d20..66bc4a1e4f 100644 --- a/docs/USER_GUIDE.ru.md +++ b/docs/USER_GUIDE.ru.md @@ -224,7 +224,11 @@ weight: 50 ## Образы -Ресурс `VirtualImage` предназначен для загрузки образов виртуальных машин и их последующего использования для создания дисков виртуальных машин. Данный ресурс доступен только в неймспейсе или проекте в котором он был создан. +Ресурс `VirtualImage` предназначен для загрузки образов виртуальных машин и их последующего использования для создания дисков виртуальных машин. + +{{< alert level="warning">}} +Обратите внимание, что `VirtualImage` — это проектный ресурс, то есть он доступен только в том проекте или пространстве имен, в котором был создан. Для использования образов на уровне всего кластера предназначен отдельный ресурс — [`ClusterVirtualImage`](./ADMIN_GUIDE.ru.md#образы). +{{< /alert >}} При подключении к виртуальной машине доступ к образу предоставляется в режиме «только чтение». @@ -609,24 +613,25 @@ EOF ## Диски -Диски в виртуальных машинах необходимы для записи и хранения данных, они обеспечивают полноценное функционирование приложений и операционных систем. DVP предоставляет хранилище для этих дисков. +Диски в виртуальных машинах используются для записи и хранения данных, что необходимо для корректной работы приложений и операционных систем. Для этих целей в DVP можно использовать различные типы хранилищ. -В зависимости от свойств хранилища, поведение дисков при создании виртуальных машин в процессе эксплуатации может отличаться: +В зависимости от выбранного типа хранилища, поведение дисков при создании виртуальных машин и в процессе эксплуатации может отличаться. -Свойство VolumeBindingMode: +Параметр VolumeBindingMode: `Immediate` - Диск создается сразу после создания ресурса (предполагается, что диск будет доступен для подключения к виртуальной машине на любом узле кластера). ![](images/vd-immediate.ru.png) -`WaitForFirstConsumer` - Диск создается только после того как будет подключен к виртуальной машине и будет создан на том узле, на котором будет запущена виртуальная машина. +`WaitForFirstConsumer` - Диск создается только после того, как будет подключен к виртуальной машине и будет создан на том узле, на котором будет запущена виртуальная машина. ![](images/vd-wffc.ru.png) Режим доступа AccessMode: -- `ReadWriteOnce (RWO)` - доступ к диску предоставляется только одному экземпляру виртуальной машины. - `ReadWriteMany (RWX)` - множественный доступ к диску. Живая миграция виртуальных машин с такими дисками возможна. +- `ReadWriteOnce (RWO)` - доступ к диску предоставляется только одному экземпляру виртуальной машины. Живая миграция виртуальных машин с такими дисками поддерживается только для платных редакций DVP. Живая миграция доступна только если все диски подключены статически через `.spec.blockDeviceRefs`. Диски, подключенные динамически через `VirtualMachineBlockDeviceAttachments`, необходимо статически переподключить, указав их в `.spec.blockDeviceRefs`. + При создании диска контроллер самостоятельно определит наиболее оптимальные параметры поддерживаемые хранилищем. @@ -870,6 +875,35 @@ linux-vm-root Ready 11Gi 12m - Нажмите на появившуюся кнопку «Сохранить». - Статус диска отображается слева вверху, под его именем. +### Изменение класса хранения диска + +В платных редакциях DVP можно изменить класс хранения для существующих дисков. Сейчас это поддерживается только для работающих ВМ (`Phase` должна быть `Running`). + +{{< alert level="warning">}} +Миграция класса хранения поддерживается только для дисков, статически подключенных через параметр `.spec.blockDeviceRefs` в конфигурации виртуальной машины. + +Для миграции класса хранения дисков, подключенных через `VirtualMachineBlockDeviceAttachments`, необходимо переподключить их статически, указав имена дисков в `.spec.blockDeviceRefs`. +{{< /alert >}} + +Пример: + +```bash +d8 k patch vd disk --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +``` + +После изменения конфигурации диска запустится живая миграция ВМ, в процессе которой диск ВМ будет мигрирован на новое хранилище. + +Если к виртуальной машине подключены несколько дисков и требуется изменить класс хранения для нескольких дисков, эту операцию необходимо выполнить последовательно: + +```bash +d8 k patch vd disk1 --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +d8 k patch vd disk2 --type=merge --patch '{"spec":{"persistentVolumeClaim":{"storageClassName":"new-storage-class-name"}}}' +``` + +При неуспешной миграции повторные попытки выполняются с увеличивающимися задержками (алгоритм экспоненциального backoff). Максимальная задержка — 300 секунд (5 минут). Задержки: 5 секунд (1-я попытка), 10 секунд (2-я), далее каждая задержка удваивается, достигая 300 секунд (7-я и последующие попытки). Первая попытка выполняется без задержки. + +Для отмены миграции пользователь должен вернуть класс хранения в спецификации на исходный. + ## Виртуальные машины Для создания виртуальной машины используется ресурс `VirtualMachine`. Его параметры позволяют сконфигурировать: @@ -1291,6 +1325,54 @@ sudo yum install qemu-guest-agent sudo systemctl enable --now qemu-guest-agent ``` +Установку агента для Linux ОС можно автоматизировать с помощью сценария первичной инициализации cloud-init. Ниже приведён пример фрагмента такого сценария для установки qemu-guest-agent: + +```yaml + #cloud-config + package_update: true + packages: + - qemu-guest-agent + run_cmd: + - systemctl enable --now qemu-guest-agent.service +``` + +### Настройка пользователя для cloud-образов + +При использовании cloud-образов (с поддержкой cloud-init) обязательно задайте SSH-ключ или пароль для предустановленного пользователя, либо создайте нового пользователя с заданным паролем или SSH-ключом через cloud-init. В противном случае войти в виртуальную машину будет невозможно! + +Примеры: + +1. Установка пароля для существующего пользователя (например, `ubuntu`, часто присутствует в официальных cloud-образах): + + Во многих cloud-образах пользователь по умолчанию уже предопределён (например, `ubuntu` в Ubuntu Cloud Images) и его имя не всегда можно переопределить через `cloud-init` с помощью блока `users`. В таких случаях рекомендуется использовать специализированные параметры cloud-init для управления дефолтным пользователем. + + При использовании облачного образа можно добавить публичный SSH-ключ для пользователя по умолчанию с помощью параметра `ssh_authorized_keys` на корневом уровне cloud-init: + + ```yaml + #cloud-config + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD... your-public-key ... + ``` + +2. Создание нового пользователя с паролем и SSH-ключом: + + ```yaml + #cloud-config + users: + - name: cloud + passwd: "$6$rounds=4096$QktreHgVzeZy70h3$C8c4gjzYMY75.C7IjN1.GgrjMSdeyG79W.hZgsTNnlrJIzuB48qzCui8KP1par.OvCEV3Xi8FzRiqqZ74LOK6." + lock_passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh-authorized-keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD... your-public-key ... + ssh_pwauth: True + ``` + +{{< alert level="info" >}} +Значение поля `passwd` — это захешированный пароль (например, можно получить командой `mkpasswd --method=SHA-512 --rounds=4096`). +{{< /alert >}} + ### Подключение к виртуальной машине Для подключения к виртуальной машине доступны следующие способы: @@ -1342,7 +1424,7 @@ d8 v ssh cloud@linux-vm --local-ssh - `AlwaysOnUnlessStoppedManually` - (по умолчанию) после создания ВМ всегда находится в запущенном состоянии. В случае сбоев работа ВМ восстанавливается автоматически. Остановка ВМ возможно только путем вызова команды `d8 v stop` или создания соответствующей операции. - `AlwaysOn` - после создания ВМ всегда находится в работающем состоянии, даже в случае ее выключения средствами ОС. В случае сбоев работа ВМ восстанавливается автоматически. -- `Manual` - после создания состоянием ВМ управляет пользователь вручную с использованием команд или операций. +- `Manual` - после создания состоянием ВМ управляет пользователь вручную с использованием команд или операций. ВМ сразу после создания находится в выключенном состоянии. Для включения необходимо выполнить команду `d8 v start`. - `AlwaysOff` - после создания ВМ всегда находится в выключенном состоянии. Возможность включения ВМ через команды\операции - отсутствует. Как выбрать политику запуска ВМ в веб-интерфейсе: @@ -1796,9 +1878,9 @@ spec: | `ClusterVirtualImage` | подключается в режиме для чтения, или как cdrom для iso-образов | | `VirtualDisk` | подключается в режиме для чтения и записи | -#### Статические блочные устройства +#### Загрузочные блочные устройства -Статические блочные устройства указываются в спецификации виртуальной машины в блоке `.spec.blockDeviceRefs` в виде списка. Порядок устройств в этом списке определяет последовательность их загрузки. Таким образом, если диск или образ указан первым, загрузчик сначала попробует загрузиться с него. Если это не удастся, система перейдет к следующему устройству в списке и попытается загрузиться с него. И так далее до момента обнаружения первого загрузчика. +Загрузочные блочные устройства указываются в спецификации виртуальной машины в блоке `.spec.blockDeviceRefs` в виде списка. Порядок устройств в этом списке определяет последовательность их загрузки. Таким образом, если диск или образ указан первым, загрузчик сначала попробует загрузиться с него. Если это не удастся, система перейдет к следующему устройству в списке и попытается загрузиться с него. И так далее до момента обнаружения первого загрузчика. Изменение состава и порядка устройств в блоке `.spec.blockDeviceRefs` возможно только с перезагрузкой виртуальной машины. @@ -1813,19 +1895,19 @@ spec: name: ``` -Как работать со статическими блочными устройствами в веб-интерфейсе: +Как работать со загрузочными блочными устройствами в веб-интерфейсе: - Перейдите на вкладку «Проекты» и выберите нужный проект. - Перейдите в раздел «Виртуализация» → «Виртуальные машины». - Из списка выберите необходимую ВМ и нажмите на её имя. - На вкладке «Конфигурация» прокрутите страницу до раздела «Диски и образы». -- Вы можете добавлять, извлекать, удалять, изменять размер, менять порядок статических блочных устройств в секции «Загрузочные диски». +- Вы можете добавлять, извлекать, удалять, изменять размер, менять порядок загрузочных блочных устройств в секции «Загрузочные диски». -#### Динамические блочные устройства +#### Дополнительные блочные устройства -Динамические блочные устройства можно подключать и отключать от виртуальной машины, находящейся в запущенном состоянии, без необходимости её перезагрузки. +Дополнительные блочные устройства можно подключать и отключать от виртуальной машины, находящейся в запущенном состоянии, без необходимости её перезагрузки. -Для подключения динамических блочных устройств используется ресурс `VirtualMachineBlockDeviceAttachment` (`vmbda`). +Для подключения дополнительных блочных устройств используется ресурс `VirtualMachineBlockDeviceAttachment` (`vmbda`). Создайте ресурс, который подключит пустой диск blank-disk к виртуальной машине linux-vm: @@ -1904,13 +1986,13 @@ spec: EOF ``` -Как работать с динамическими блочными устройствами в веб-интерфейсе: +Как работать с дополнительными блочными устройствами в веб-интерфейсе: - Перейдите на вкладку «Проекты» и выберите нужный проект. - Перейдите в раздел «Виртуализация» → «Виртуальные машины». - Из списка выберите необходимую ВМ и нажмите на её имя. - На вкладке «Конфигурация» прокрутите страницу до раздела «Диски и образы». -- Вы можете добавлять, извлекать, удалять, изменять размер динамических блочных устройств в секции «Дополнительные диски». +- Вы можете добавлять, извлекать, удалять, изменять размер дополнительных блочных устройств в секции «Дополнительные диски». ### Организация взаимодействия с ВМ @@ -2162,8 +2244,18 @@ EOF ![](./images/migration.ru.png) +{{< alert level="warning" >}} +Для успешной живой миграции необходимо, чтобы все подключённые к ВМ диски были доступны на целевых узлах, куда планируется миграция. + +Если диск использует хранилище с локальными дисками, такое хранилище должно быть доступно для создания нового локального тома на целевом узле. + +В противном случае миграция будет невозможна. +{{< /alert >}} + {{< alert level="warning" >}} Cкорость сети играет важную роль. Если пропускная способность низкая, итераций становится больше, а время простоя ВМ может увеличиться. В худшем случае миграция может вообще не завершиться. + +Для управления процессом миграции настройке политику живой миграции [`.spec.liveMigrationPolicy`](#настройка-политики-миграции) в настройках ВМ. {{< /alert >}} #### Механизм AutoConverge @@ -2214,6 +2306,7 @@ AutoConverge — это своего рода «страховка», котор | Перераспределение нагрузки | evacuation-\* | | Drain узла | evacuation-\* | | Изменение параметров размещения | nodeplacement-update-\* | +| Миграция хранилища дисков | volume-migration-\* | Данный ресурс может находится в следующих состояниях: @@ -2494,26 +2587,12 @@ spec: name: user-net # Название сети ``` -Допускается подключать одну ВМ к одной и той же сети несколько раз. Пример: - -```yaml -spec: - networks: - - type: Main # Обязательно указывать первым - - type: Network - name: user-net # Название сети - - type: Network - name: user-net # Название сети -``` - Пример подключения кластерной сети `corp-net`: ```yaml spec: networks: - type: Main # Обязательно указывать первым - - type: Network - name: user-net - type: Network name: user-net - type: ClusterNetwork @@ -2529,12 +2608,9 @@ status: - type: Network name: user-net macAddress: aa:bb:cc:dd:ee:01 - - type: Network - name: user-net - macAddress: aa:bb:cc:dd:ee:02 - type: ClusterNetwork name: corp-net - macAddress: aa:bb:cc:dd:ee:03 + macAddress: aa:bb:cc:dd:ee:02 ``` Для каждого дополнительного сетевого интерфейса автоматически создается и резервируется уникальный MAC-адрес, что обеспечивает отсутствие коллизий MAC-адресов. Для этих целей используются ресурсы: `VirtualMachineMACAddress` (`vmmac`) и `VirtualMachineMACAddressLease` (`vmmacl`). @@ -2732,9 +2808,12 @@ EOF Создание снимка виртуальной машины будет неудачным, если выполнится хотя бы одно из следующих условий: - не все зависимые устройства виртуальной машины готовы; -- есть изменения, ожидающие перезапуска виртуальной машины; - среди зависимых устройств есть диск, находящийся в процессе изменения размера. +{{< alert level="warning" >}} +Если на момент создания снимка в виртуальной машине есть изменения, ожидающие перезапуска, в снимок попадёт обновлённая конфигурация. +{{< /alert >}} + При создании снимка динамический IP-адрес ВМ автоматически преобразуется в статический и сохраняется для восстановления. Если не требуется преобразование и использование старого IP-адреса виртуальной машины, можно установить соответствующую политику в значение `Never`. В этом случае будет использован тип адреса без преобразования (`Auto` или `Static`). @@ -2804,10 +2883,10 @@ metadata: name: spec: type: Restore - virtualMachineName: <название ВМ, которую требуется восстановить> + virtualMachineName: restore: mode: DryRun | Strict | BestEffort - virtualMachineSnapshotName: <название снимка ВМ из которого требуется восстановить> + virtualMachineSnapshotName: ``` Для данной операции возможно использовать один из трех режимов: @@ -2837,6 +2916,70 @@ d8 k get vmop -o json | jq '.status.resources' Не рекомендуется отменять операцию восстановления (удалять ресурс `VirtualMachineOperation` в фазе `InProgress`) из снимка, так как это может привести к неконсистентному состоянию восстанавливаемой виртуальной машины. {{< /alert >}} + +## Создание клона ВМ + +Клонирование ВМ выполняется с использованием ресурса `VirtualMachineOperation` с типом операции `clone`. + +{{< alert level="warning">}} +Перед клонированием ВМ должна быть [выключена](#политика-запуска-и-управление-состоянием-вм). + +Рекомендуется задавать параметр `.spec.runPolicy: AlwaysOff` в конфигурации клонируемой ВМ, чтобы предотвратить автоматический запуск клона ВМ. Это связано с тем, что клон наследует поведение родительской ВМ. +{{< /alert >}} + +```yaml +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineOperation +metadata: + name: +spec: + type: Clone + virtualMachineName: + clone: + mode: DryRun | Strict | BestEffort + nameReplacements: [] + customization: {} +``` + +{{< alert level="warning">}} +Клонируемой ВМ будет назначен новый IP-адрес для кластерной сети и MAC-адреса для дополнительных сетевых интерфейсов (если они есть), поэтому после клонирования потребуется перенастроить сетевые параметры гостевой ОС. +{{< /alert >}} + +Клонирование создает копию существующей ВМ, поэтому ресурсы новой ВМ должны иметь уникальные имена. Для этого используются параметры `.spec.clone.nameReplacements` и/или `.spec.clone.customization`. + +- `.spec.clone.nameReplacements` — позволяет заменить имена существующих ресурсов на новые, чтобы избежать конфликтов. +- `.spec.clone.customization` — задает префикс или суффикс для имен всех клонируемых ресурсов ВМ (дисков, IP-адресов и т. д.). + +Пример конфигурации: + +```yaml +spec: + clone: + nameReplacements: + - from: + kind: + name: + - to: + name: + customization: + namePrefix: + nameSuffix: +``` + +В результате будет создана ВМ с именем <префикс><новое имя><суффикс>. + +Для операции клонирования возможно использовать один из трех режимов: + +- `DryRun` — тестовый запуск для проверки возможных конфликтов. Результаты отображаются в поле `status.resources` ресурса VirtualMachineOperation. +- `Strict` — строгий режим, требующий наличия всех ресурсов с новыми именами и их зависимостей (например, образов) в клонируемой ВМ. +- `BestEffort` — режим, при котором отсутствующие внешние зависимости (например, ClusterVirtualImage, VirtualImage) автоматически удаляются из конфигурации клонируемой ВМ. + +Информацию о конфликтах, возникших при клонировании, можно просмотреть в статусе ресурса: + +```bash +d8 k get vmop -o json | jq '.status.resources' +``` + ## Экспорт данных DVP позволяет экспортировать диски и снимки дисков виртуальных машин с использованием утилиты `d8` (версия 1.17 и выше). diff --git a/docs/internal/cpu_model_type_discovery.md b/docs/internal/cpu_model_type_discovery.md index 2c5a72ad70..187000dabc 100644 --- a/docs/internal/cpu_model_type_discovery.md +++ b/docs/internal/cpu_model_type_discovery.md @@ -11,10 +11,10 @@ Other combinations lead to migration problems. These combinations are unpredicta The error might be a bug in libvirt when it compares features after resolving the target CPU model (still need to investigate). -The current approach is to use kvm64 model for Discovery and Features types. This model contains a small -set of features and migration works well. +The current approach is to use qemu64 model for Discovery and Features types. This model contains a small +set of features and migration works well. (We choose qemu64 over the kvm64 because of better compatibility with AMD CPUs). ## Solution -1. Use kvm64 model for Discovery and Features vmclass types. -2. Add patch for kubevirt to prevent adding nodeSelector for cpu model "kvm64". +1. Use qemu64 model for Discovery and Features vmclass types. +2. Add patch for kubevirt to prevent adding nodeSelector for cpu model "qemu64". diff --git a/images/bounder/werf.inc.yaml b/images/bounder/werf.inc.yaml index c8754aa397..312b449717 100644 --- a/images/bounder/werf.inc.yaml +++ b/images/bounder/werf.inc.yaml @@ -12,7 +12,7 @@ imageSpec: --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-cbuilder final: false -fromImage: builder/golang-bookworm-1.23 +fromImage: builder/golang-bookworm-1.24 git: - add: {{ .ModuleDir }}/images/{{ .ImageName }}/static_binaries to: /static_binaries diff --git a/images/cdi-artifact/werf.inc.yaml b/images/cdi-artifact/werf.inc.yaml index 84352a4756..bf2f44d8a7 100644 --- a/images/cdi-artifact/werf.inc.yaml +++ b/images/cdi-artifact/werf.inc.yaml @@ -13,6 +13,7 @@ packages: - libxml2 - xz - libtasn1 +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -34,8 +35,8 @@ secrets: shell: install: - | - echo "Git clone CDI repository..." - git clone --depth 1 --branch {{ $version }} $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} /src/containerized-data-importer + echo "Git clone {{ $gitRepoName }} repository..." + git clone --depth=1 --branch {{ $version }} $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} /src/containerized-data-importer rm -rf /src/containerized-data-importer/.git @@ -80,19 +81,8 @@ shell: cd /containerized-data-importer go mod download - echo Update modules to mitigate CVEs... - - # CVE-2024-45337,CVE-2025-22869 - go get golang.org/x/crypto@v0.38.0 - # CVE-2025-22870, CVE-2025-22872 - go get golang.org/x/net@v0.40.0 - # CVE-2025-27144 - go get github.com/go-jose/go-jose/v3@v3.0.4 - # CVE-2025-22868 - go get golang.org/x/oauth2@v0.27.0 - - go mod tidy go mod vendor + # Apply patch for json-patch from 3p-cdi repo git apply --ignore-space-change --ignore-whitespace patches/replace-op-for-evanphx-json-patch-v5-lib.patch @@ -153,7 +143,7 @@ shell: --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-cbuilder final: false -fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.23" "builder/alt-go-svace" }} +fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.24" "builder/alt-go-svace" }} git: - add: {{ .ModuleDir }}/images/{{ .ImageName }} to: / diff --git a/images/cdi-cloner/werf.inc.yaml b/images/cdi-cloner/werf.inc.yaml index f08ea278ed..ff238813b9 100644 --- a/images/cdi-cloner/werf.inc.yaml +++ b/images/cdi-cloner/werf.inc.yaml @@ -53,7 +53,7 @@ shell: --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-gobuild final: false -fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.23" "builder/alt-go-svace" }} +fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.24" "builder/alt-go-svace" }} git: - add: {{ .ModuleDir }}/images/{{ .ImageName }}/cloner-startup to: /app diff --git a/images/dvcr-artifact/.golangci.yaml b/images/dvcr-artifact/.golangci.yaml index 0867b18310..1be21e2a37 100644 --- a/images/dvcr-artifact/.golangci.yaml +++ b/images/dvcr-artifact/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: subv1alpha2 + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/delete.go b/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/delete.go index 1026297f41..b8b6a0d787 100644 --- a/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/delete.go +++ b/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/delete.go @@ -23,7 +23,7 @@ import ( "github.com/spf13/cobra" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var DeleteCmd = &cobra.Command{ @@ -41,7 +41,7 @@ var deleteViCmd = &cobra.Command{ ), RunE: func(cmd *cobra.Command, args []string) error { imgsDir := fmt.Sprintf("%s/vi/%s", RepoDir, NamespaceFlag) - err := DeleteImage(virtv2.VirtualImageKind, imgsDir, cmd, args) + err := DeleteImage(v1alpha2.VirtualImageKind, imgsDir, cmd, args) if err != nil { return err } @@ -61,7 +61,7 @@ var deleteCviCmd = &cobra.Command{ ), RunE: func(cmd *cobra.Command, args []string) error { imgsDir := fmt.Sprintf("%s/cvi", RepoDir) - err := DeleteImage(virtv2.ClusterVirtualImageKind, imgsDir, cmd, args) + err := DeleteImage(v1alpha2.ClusterVirtualImageKind, imgsDir, cmd, args) if err != nil { return err } @@ -131,9 +131,9 @@ func removeImageDir(imgType, imgsDir, imgName string) error { err := os.RemoveAll(imgDir) if err != nil { switch imgType { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: return fmt.Errorf("cannot delete `%s` %q in %q namespace: %w", imgType, imgName, NamespaceFlag, err) - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: return fmt.Errorf("cannot delete `%s` %q: %w", imgType, imgName, err) default: return fmt.Errorf("unknown image type: %s", imgType) diff --git a/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/ls.go b/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/ls.go index 200f7f869d..27fc844aaf 100644 --- a/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/ls.go +++ b/images/dvcr-artifact/cmd/dvcr-cleaner/cmd/ls.go @@ -22,7 +22,7 @@ import ( "os" "text/tabwriter" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/spf13/cobra" ) @@ -49,7 +49,7 @@ var lsViCmd = &cobra.Command{ ), RunE: func(cmd *cobra.Command, args []string) error { imgsDir := fmt.Sprintf("%s/vi", RepoDir) - err := ListImage(virtv2.VirtualImageKind, imgsDir, cmd, args) + err := ListImage(v1alpha2.VirtualImageKind, imgsDir, cmd, args) if err != nil { return err } @@ -69,7 +69,7 @@ var lsCviCmd = &cobra.Command{ ), RunE: func(cmd *cobra.Command, args []string) error { imgsDir := fmt.Sprintf("%s/cvi", RepoDir) - err := ListImage(virtv2.ClusterVirtualImageKind, imgsDir, cmd, args) + err := ListImage(v1alpha2.ClusterVirtualImageKind, imgsDir, cmd, args) if err != nil { return err } @@ -92,7 +92,7 @@ func ListImage(imgType, imgsDir string, cmd *cobra.Command, args []string) error ) imgName := args[0] switch imgType { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: path := fmt.Sprintf("%s/%s/%s", imgsDir, NamespaceFlag, imgName) fileInfo, err = os.Stat(path) if err != nil { @@ -101,7 +101,7 @@ func ListImage(imgType, imgsDir string, cmd *cobra.Command, args []string) error } return fmt.Errorf("cannot get the `%s` %q in the %q namespace: %w", imgType, imgName, NamespaceFlag, err) } - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: path := fmt.Sprintf("%s/%s", imgsDir, imgName) fileInfo, err = os.Stat(path) if err != nil { @@ -122,7 +122,7 @@ func ListImage(imgType, imgsDir string, cmd *cobra.Command, args []string) error if AllImagesFlag { switch imgType { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: imgs, err := listAllVirtualImages(imgsDir, NamespaceFlag) if err != nil { return err @@ -133,7 +133,7 @@ func ListImage(imgType, imgsDir string, cmd *cobra.Command, args []string) error fmt.Fprintf(w, "%s\t\n", img.Name) } w.Flush() - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: imgs, err := os.ReadDir(imgsDir) if err != nil { return fmt.Errorf("cannot get the list of all `ClusterVirtualImages`: %w", err) @@ -150,7 +150,7 @@ func ListImage(imgType, imgsDir string, cmd *cobra.Command, args []string) error return nil } - if imgType == virtv2.VirtualImageKind && AllNamespacesFlag { + if imgType == v1alpha2.VirtualImageKind && AllNamespacesFlag { namespaces, err := os.ReadDir(imgsDir) if err != nil { return fmt.Errorf("cannot get the list of all namespaces: %w", err) diff --git a/images/dvcr-artifact/docker-fuzz.sh b/images/dvcr-artifact/docker-fuzz.sh index b0378b4e44..4faa7ab9e9 100755 --- a/images/dvcr-artifact/docker-fuzz.sh +++ b/images/dvcr-artifact/docker-fuzz.sh @@ -62,6 +62,8 @@ echo docker run --rm \ --platform linux/amd64 \ + -v $(pwd)/fuzzartifact/.cache/go-build:/root/.cache/go-build \ + -v $(pwd)/fuzzartifact/logs:/tmp/fuzz \ $(docker build --platform linux/amd64 -q -f "$DOCKERFILE" .) echo diff --git a/images/dvcr-artifact/fuzz.Dockerfile b/images/dvcr-artifact/fuzz.Dockerfile index d1e168293f..a0a24af452 100644 --- a/images/dvcr-artifact/fuzz.Dockerfile +++ b/images/dvcr-artifact/fuzz.Dockerfile @@ -1,8 +1,9 @@ -FROM golang:1.23-bookworm +FROM golang:1.24-bookworm RUN apt update -y && apt install -y \ build-essential \ - libnbd-dev + libnbd-dev \ + qemu-utils WORKDIR /app diff --git a/images/dvcr-artifact/fuzz.sh b/images/dvcr-artifact/fuzz.sh index 50656a3e1b..db90a9315c 100755 --- a/images/dvcr-artifact/fuzz.sh +++ b/images/dvcr-artifact/fuzz.sh @@ -32,10 +32,10 @@ cleanup() { done # kill workers if they are still running - pids=$(ps aux | grep 'fuzzworker' | awk '{print $2}') + pids=$(pgrep -f fuzzworker) if [[ ! -z "$pids" ]]; then - echo "$pids" | xargs kill 2>/dev/null || true - sleep 1 # wait a moment for them to terminate + echo "$pids" | xargs kill -2 2>/dev/null || true + sleep 10 # wait a moment for them to terminate echo "$pids" | xargs kill -9 2>/dev/null || true fi @@ -85,10 +85,10 @@ for file in ${files}; do wait "$fuzz_pid" 2>/dev/null || true # kill workers if they are still running - pids=$(ps aux | grep 'fuzzworker' | awk '{print $2}') + pids=$(pgrep -f fuzzworker) if [[ ! -z "$pids" ]]; then - echo "$pids" | xargs kill 2>/dev/null || true - sleep 1 # wait a moment for them to terminate + echo "$pids" | xargs kill -2 2>/dev/null || true + sleep 10 # wait a moment for them to terminate echo "$pids" | xargs kill -9 2>/dev/null || true fi diff --git a/images/dvcr-artifact/go.mod b/images/dvcr-artifact/go.mod index 03a97ead3e..ce1b1dfb34 100644 --- a/images/dvcr-artifact/go.mod +++ b/images/dvcr-artifact/go.mod @@ -40,7 +40,7 @@ require ( github.com/containers/storage v1.55.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/docker v28.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -97,7 +97,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vmware/govmomi v0.23.1 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/images/dvcr-artifact/go.sum b/images/dvcr-artifact/go.sum index fb9cfa07a9..a7bb733a12 100644 --- a/images/dvcr-artifact/go.sum +++ b/images/dvcr-artifact/go.sum @@ -65,8 +65,8 @@ github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2 github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM= +github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -390,8 +390,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vmware/govmomi v0.23.1 h1:vU09hxnNR/I7e+4zCJvW+5vHu5dO64Aoe2Lw7Yi/KRg= diff --git a/images/dvcr-artifact/pkg/fuzz/http.go b/images/dvcr-artifact/pkg/fuzz/http.go index 3cfed46a04..470b690fe7 100644 --- a/images/dvcr-artifact/pkg/fuzz/http.go +++ b/images/dvcr-artifact/pkg/fuzz/http.go @@ -20,7 +20,6 @@ import ( "bytes" "fmt" "net/http" - "os" "regexp" "strconv" "strings" @@ -31,19 +30,19 @@ import ( "github.com/hashicorp/go-cleanhttp" ) -func ProcessRequests(t *testing.T, data []byte, addr string, methods ...string) { - t.Helper() +func ProcessRequests(tb *testing.T, data []byte, addr string, methods ...string) { + tb.Helper() if len(methods) == 0 { - t.Fatalf("no methods specified") + tb.Fatalf("no methods specified") } for _, method := range methods { - ProcessRequest(t, data, addr, method) + ProcessRequest(tb, data, addr, method) } } -func ProcessRequest(t testing.TB, data []byte, addr, method string) { - t.Helper() +func ProcessRequest(tb testing.TB, data []byte, addr, method string) { + tb.Helper() switch method { case @@ -57,26 +56,26 @@ func ProcessRequest(t testing.TB, data []byte, addr, method string) { http.MethodOptions, http.MethodTrace: - req := newFuzzRequest().Fuzz(t, data, method, addr) + req := newFuzzRequest().Fuzz(tb, data, method, addr) defer req.Body.Close() - resp := fuzzHTTPRequest(t, req) + resp := fuzzHTTPRequest(tb, req) if resp != nil { if resp.StatusCode > 500 { - t.Errorf("resp: %v", resp) + tb.Errorf("resp: %v", resp) } defer resp.Body.Close() } default: - t.Errorf("Unsupported HTTP method: %s", method) + tb.Errorf("Unsupported HTTP method: %s", method) } } -func fuzzHTTPRequest(t testing.TB, fuzzReq *http.Request) *http.Response { - t.Helper() +func fuzzHTTPRequest(tb testing.TB, fuzzReq *http.Request) *http.Response { + tb.Helper() if fuzzReq == nil { - t.Skip("Skipping test because fuzzReq is nil") + tb.Skip("Skipping test because fuzzReq is nil") } client := cleanhttp.DefaultClient() client.Timeout = time.Second @@ -98,11 +97,11 @@ func fuzzHTTPRequest(t testing.TB, fuzzReq *http.Request) *http.Response { return nil } - t.Logf("fuzzing request, %s, %s", fuzzReq.Method, fuzzReq.URL) + tb.Logf("fuzzing request, %s, %s", fuzzReq.Method, fuzzReq.URL) resp, err := client.Do(fuzzReq) if err != nil && !strings.Contains(err.Error(), "checkRedirect disabled for test") { - t.Logf("err: %s", err) + tb.Logf("err: %s", err) } return resp @@ -114,14 +113,14 @@ func newFuzzRequest() *fuzzRequest { return &fuzzRequest{} } -func (s *fuzzRequest) Fuzz(t testing.TB, data []byte, method, addr string) *http.Request { - t.Helper() +func (s *fuzzRequest) Fuzz(tb testing.TB, data []byte, method, addr string) *http.Request { + tb.Helper() bodyReader := bytes.NewBuffer(data) req, err := http.NewRequest(method, addr, bodyReader) if err != nil { - t.Skipf("Skipping test: not enough data for fuzzing: %s", err.Error()) + tb.Skipf("Skipping test: not enough data for fuzzing: %s", err.Error()) } // Get the address of the local listener in order to attach it to an Origin header. @@ -130,10 +129,13 @@ func (s *fuzzRequest) Fuzz(t testing.TB, data []byte, method, addr string) *http fuzzConsumer := fuzz.NewConsumer(data) var headersMap map[string]string - fuzzConsumer.FuzzMap(&headersMap) + err = fuzzConsumer.FuzzMap(&headersMap) + if err != nil { + tb.Skipf("Skipping test: not enough data for fuzzing: %s", err.Error()) + } for k, v := range headersMap { - for i := 0; i < len(v); i++ { + for range len(v) { req.Header.Add(k, v) } } @@ -144,16 +146,3 @@ func (s *fuzzRequest) Fuzz(t testing.TB, data []byte, method, addr string) *http return req } - -func GetPortFromEnv(env string, defaultPort int) (port int, err error) { - portEnv := os.Getenv(env) - if portEnv == "" { - return defaultPort, nil - } - - port, err = strconv.Atoi(portEnv) - if err != nil { - return 0, fmt.Errorf("failed to parse port env var %s: %w", env, err) - } - return port, nil -} diff --git a/images/dvcr-artifact/pkg/retry/backoff.go b/images/dvcr-artifact/pkg/retry/backoff.go index 806588499f..1622252f4c 100644 --- a/images/dvcr-artifact/pkg/retry/backoff.go +++ b/images/dvcr-artifact/pkg/retry/backoff.go @@ -106,10 +106,11 @@ func (b *Backoff) Step() time.Duration { // In all other cases, ErrWaitTimeout is returned. func ExponentialBackoff(ctx context.Context, f Fn, backoff Backoff) error { const ( - dvcrNoSpaceError = "no space left on device" - dvcrInternalErrorPattern = "UNKNOWN: unknown error;" - dvcrNoSpaceErrMessage = "DVCR is overloaded" - internalDvcrErrMessage = "Internal DVCR error (could it be overloaded?)" + dvcrNoSpaceError = "no space left on device" + dvcrInternalErrorPattern = "UNKNOWN: unknown error;" + dvcrNoSpaceErrMessage = "DVCR is overloaded" + internalDvcrErrMessage = "Internal DVCR error (could it be overloaded?)" + datasourceCreatingErrMessage = "error creating data source" ) var err error @@ -124,6 +125,8 @@ func ExponentialBackoff(ctx context.Context, f Fn, backoff Backoff) error { return fmt.Errorf("%s: %w", dvcrNoSpaceErrMessage, err) case strings.Contains(err.Error(), dvcrInternalErrorPattern): return fmt.Errorf("%s: %w", internalDvcrErrMessage, err) + case strings.Contains(err.Error(), datasourceCreatingErrMessage): + return err } if backoff.Steps == 1 { diff --git a/images/dvcr-artifact/pkg/uploader/uploader.go b/images/dvcr-artifact/pkg/uploader/uploader.go index 6fbaec2ad2..e3fb281761 100644 --- a/images/dvcr-artifact/pkg/uploader/uploader.go +++ b/images/dvcr-artifact/pkg/uploader/uploader.go @@ -345,11 +345,11 @@ func (app *uploadServerApp) processUpload(irc imageReadCloser, w http.ResponseWr w.WriteHeader(http.StatusBadRequest) } - err = app.upload(readCloser, cdiContentType, dvContentType, parseHTTPHeader(r)) - app.mutex.Lock() defer app.mutex.Unlock() + err = app.upload(readCloser, cdiContentType, dvContentType, parseHTTPHeader(r)) + if err != nil { klog.Errorf("Saving stream failed: %s", err) w.WriteHeader(http.StatusInternalServerError) diff --git a/images/dvcr/werf.inc.yaml b/images/dvcr/werf.inc.yaml index b1a24c19a6..5fb79e3816 100644 --- a/images/dvcr/werf.inc.yaml +++ b/images/dvcr/werf.inc.yaml @@ -14,7 +14,9 @@ shell: - | mkdir -p ~/.ssh && echo "StrictHostKeyChecking accept-new" > ~/.ssh/config echo "Git clone CDI repository..." - git clone --depth 1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch v{{ $version }} /distribution + mkdir -p /src + git clone --depth 1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch v{{ $version }} /src/distribution + --- image: {{ .ModuleNamePrefix }}{{ .ImageName }} @@ -39,13 +41,13 @@ imageSpec: --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-builder final: false -fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.23" "builder/alt-go-svace" }} +fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.24" "builder/alt-go-svace" }} mount: - fromPath: ~/go-pkg-cache to: /go/pkg import: - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /distribution + add: /src/distribution to: /distribution before: install secrets: diff --git a/images/hooks/werf.inc.yaml b/images/hooks/werf.inc.yaml index 7749a8da83..6091dab834 100644 --- a/images/hooks/werf.inc.yaml +++ b/images/hooks/werf.inc.yaml @@ -4,36 +4,30 @@ final: false fromImage: builder/src git: - add: {{ .ModuleDir }}/images/{{ .ImageName }} - to: /app/images/hooks + to: /src/images/hooks stageDependencies: install: - - go.mod - - go.sum - setup: - - "**/*.go" + - "**/*" - add: {{ .ModuleDir }}/images/virtualization-artifact - to: /app/images/virtualization-artifact + to: /src/images/virtualization-artifact stageDependencies: install: - - go.mod - - go.sum - setup: - - "**/*.go" + - "**/*" - add: {{ .ModuleDir }}/api - to: /app/api + to: /src/api stageDependencies: install: - - go.mod - - go.sum - setup: - - "**/*.go" + - "**/*" +shell: + install: + - cd /src --- image: {{ .ModuleNamePrefix }}go-hooks-artifact final: false fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.24" "builder/alt-go-svace" }} import: - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /app + add: /src to: /app before: install mount: diff --git a/images/kube-api-rewriter/local/test-controller/main.go b/images/kube-api-rewriter/local/test-controller/main.go index 289d28553b..f602da2ed5 100644 --- a/images/kube-api-rewriter/local/test-controller/main.go +++ b/images/kube-api-rewriter/local/test-controller/main.go @@ -24,7 +24,7 @@ import ( "runtime" "strconv" - virtv1alpha2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/go-logr/logr" "go.uber.org/zap/zapcore" corev1 "k8s.io/api/core/v1" @@ -32,7 +32,7 @@ import ( apiruntime "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -55,8 +55,8 @@ var ( resourcesSchemeFuncs = []func(*apiruntime.Scheme) error{ clientgoscheme.AddToScheme, extv1.AddToScheme, - kvv1.AddToScheme, - virtv1alpha2.AddToScheme, + virtv1.AddToScheme, + v1alpha2.AddToScheme, } ) @@ -178,7 +178,7 @@ func (i *InitialLister) Start(ctx context.Context) error { cl := i.client // List VMs, Pods, CRDs before starting manager. - vms := virtv1alpha2.VirtualMachineList{} + vms := v1alpha2.VirtualMachineList{} err := cl.List(ctx, &vms) if err != nil { i.log.Error(err, "list VMs") @@ -251,8 +251,8 @@ func NewController( // SetupWatches subscripts controller to Pods, CRDs and DVP VMs. func SetupWatches(ctx context.Context, mgr manager.Manager, ctr controller.Controller, log logr.Logger) error { - if err := ctr.Watch(source.Kind(mgr.GetCache(), &virtv1alpha2.VirtualMachine{}), &handler.EnqueueRequestForObject{}, - //if err := ctr.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), &handler.EnqueueRequestForObject{}, + if err := ctr.Watch(source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}), &handler.EnqueueRequestForObject{}, + // if err := ctr.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { log.Info(fmt.Sprintf("Got CREATE event for VM %s/%s gvk %v", e.Object.GetNamespace(), e.Object.GetName(), e.Object.GetObjectKind().GroupVersionKind())) @@ -314,7 +314,7 @@ func SetupWatches(ctx context.Context, mgr manager.Manager, ctr controller.Contr func SetupWebhooks(ctx context.Context, mgr manager.Manager, validator admission.CustomValidator) error { return builder.WebhookManagedBy(mgr). - For(&kvv1.VirtualMachine{}). + For(&virtv1.VirtualMachine{}). WithValidator(validator). Complete() } @@ -333,7 +333,7 @@ func (r *VMReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re } func (r *VMReconciler) ValidateCreate(ctx context.Context, obj apiruntime.Object) (admission.Warnings, error) { - vm, ok := obj.(*kvv1.VirtualMachine) + vm, ok := obj.(*virtv1.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -345,7 +345,7 @@ func (r *VMReconciler) ValidateCreate(ctx context.Context, obj apiruntime.Object } func (r *VMReconciler) ValidateUpdate(ctx context.Context, _, newObj apiruntime.Object) (admission.Warnings, error) { - vm, ok := newObj.(*kvv1.VirtualMachine) + vm, ok := newObj.(*virtv1.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", newObj) } @@ -357,7 +357,7 @@ func (r *VMReconciler) ValidateUpdate(ctx context.Context, _, newObj apiruntime. } func (v *VMReconciler) ValidateDelete(_ context.Context, obj apiruntime.Object) (admission.Warnings, error) { - vm, ok := obj.(*kvv1.VirtualMachine) + vm, ok := obj.(*virtv1.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a deleted VirtualMachine but got a %T", obj) } diff --git a/images/libvirt/werf.inc.yaml b/images/libvirt/werf.inc.yaml index 07e87be4fb..677d24d730 100644 --- a/images/libvirt/werf.inc.yaml +++ b/images/libvirt/werf.inc.yaml @@ -8,7 +8,7 @@ final: false fromImage: builder/src git: - add: {{ .ModuleDir }}/images/{{ .ImageName }} - to: / + to: /src includePaths: - install-libvirt.sh - patches @@ -23,9 +23,9 @@ secrets: shell: install: - | - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /{{ $gitRepoName }}-{{ $version }} + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src/{{ $gitRepoName }}-{{ $version }} - cd /{{ $gitRepoName }}-{{ $version }} + cd /src/{{ $gitRepoName }}-{{ $version }} if [[ "$(cat /run/secrets/SOURCE_REPO)" =~ "github.com" ]] ; then echo "Checkout submodules" @@ -72,11 +72,9 @@ altLibraries: - libdevmapper-devel - ceph-devel - libiscsi-devel libglusterfs-devel -- libgnutls-devel - libsystemd-devel - systemtap-sdt-devel -- libacl-devel glib2-devel glibc-utils -- libgio-devel libxml2-devel +- glibc-utils - wireshark-devel - libclocale - libslirp-devel @@ -92,6 +90,8 @@ packages: - selinux - cyrus-sasl2 - libtasn1 libtirpc +- glib2 acl libunistring libxml2 +- gnutls {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -100,15 +100,15 @@ final: false fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/alt" "builder/alt-go-svace" }} import: - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /{{ $gitRepoName }}-{{ $version }} + add: /src/{{ $gitRepoName }}-{{ $version }} to: /{{ $gitRepoName }}-{{ $version }} before: install - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /patches + add: /src/patches to: /patches before: install - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /install-libvirt.sh + add: /src/install-libvirt.sh to: /install-libvirt.sh before: install {{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} diff --git a/images/packages/e2fsprogs/werf.inc.yaml b/images/packages/e2fsprogs/werf.inc.yaml index 2edcbd116b..a3e0096056 100644 --- a/images/packages/e2fsprogs/werf.inc.yaml +++ b/images/packages/e2fsprogs/werf.inc.yaml @@ -27,10 +27,9 @@ shell: altPackages: - gcc git make libtool gettext-tools - libuuid-devel libarchive-devel -- glib2-devel - tree packages: -- util-linux acl +- glib2 util-linux acl {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -59,7 +58,6 @@ shell: install: - | # Install packages - echo "Install packages" PKGS="{{ $builderDependencies.packages | join " " }}" for pkg in $PKGS; do cp -a /$pkg/. / diff --git a/images/packages/gcc/README.md b/images/packages/gcc/README.md new file mode 100644 index 0000000000..79f5e26b2e --- /dev/null +++ b/images/packages/gcc/README.md @@ -0,0 +1,1649 @@ +# gcc +``` +└── [drwxr-xr-x 6] usr + ├── [drwxr-xr-x 20] bin + │ ├── [-rwxr-xr-x 1.6M] c++-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] cpp-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] g++-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] gcc-13.2.0 + │ ├── [-rwxr-xr-x 35K] gcc-ar-13.2.0 + │ ├── [-rwxr-xr-x 35K] gcc-nm-13.2.0 + │ ├── [-rwxr-xr-x 35K] gcc-ranlib-13.2.0 + │ ├── [-rwxr-xr-x 1.0M] gcov-13.2.0 + │ ├── [-rwxr-xr-x 976K] gcov-dump-13.2.0 + │ ├── [-rwxr-xr-x 936K] gcov-tool-13.2.0 + │ ├── [-rwxr-xr-x 31M] lto-dump-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] x86_64-linux-gnu-c++-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] x86_64-linux-gnu-g++-13.2.0 + │ ├── [-rwxr-xr-x 1.6M] x86_64-linux-gnu-gcc-13.2.0 + │ ├── [-rwxr-xr-x 35K] x86_64-linux-gnu-gcc-ar-13.2.0 + │ ├── [-rwxr-xr-x 35K] x86_64-linux-gnu-gcc-nm-13.2.0 + │ ├── [-rwxr-xr-x 35K] x86_64-linux-gnu-gcc-ranlib-13.2.0 + │ └── [-rwxr-xr-x 1.6M] x86_64-linux-gnu-gcc-tmp + ├── [drwxr-xr-x 3] include + │ └── [drwxr-xr-x 3] c++ + │ └── [drwxr-xr-x 126] 13.2.0 + │ ├── [-rw-r--r-- 3.0K] algorithm + │ ├── [-rw-r--r-- 18K] any + │ ├── [-rw-r--r-- 14K] array + │ ├── [-rw-r--r-- 50K] atomic + │ ├── [drwxr-xr-x 10] backward + │ │ ├── [-rw-r--r-- 11K] auto_ptr.h + │ │ ├── [-rw-r--r-- 2.4K] backward_warning.h + │ │ ├── [-rw-r--r-- 7.1K] binders.h + │ │ ├── [-rw-r--r-- 4.1K] hash_fun.h + │ │ ├── [-rw-r--r-- 17K] hash_map + │ │ ├── [-rw-r--r-- 17K] hash_set + │ │ ├── [-rw-r--r-- 33K] hashtable.h + │ │ └── [-rw-r--r-- 7.3K] strstream + │ ├── [-rw-r--r-- 7.9K] barrier + │ ├── [-rw-r--r-- 14K] bit + │ ├── [drwxr-xr-x 157] bits + │ │ ├── [-rw-r--r-- 24K] algorithmfwd.h + │ │ ├── [-rw-r--r-- 3.6K] align.h + │ │ ├── [-rw-r--r-- 30K] alloc_traits.h + │ │ ├── [-rw-r--r-- 3.3K] allocated_ptr.h + │ │ ├── [-rw-r--r-- 8.6K] allocator.h + │ │ ├── [-rw-r--r-- 59K] atomic_base.h + │ │ ├── [-rw-r--r-- 12K] atomic_futex.h + │ │ ├── [-rw-r--r-- 2.3K] atomic_lockfree_defines.h + │ │ ├── [-rw-r--r-- 13K] atomic_timed_wait.h + │ │ ├── [-rw-r--r-- 12K] atomic_wait.h + │ │ ├── [-rw-r--r-- 16K] basic_ios.h + │ │ ├── [-rw-r--r-- 5.7K] basic_ios.tcc + │ │ ├── [-rw-r--r-- 156K] basic_string.h + │ │ ├── [-rw-r--r-- 30K] basic_string.tcc + │ │ ├── [-rw-r--r-- 29K] boost_concept_check.h + │ │ ├── [-rw-r--r-- 1.4K] c++0x_warning.h + │ │ ├── [-rw-r--r-- 29K] char_traits.h + │ │ ├── [-rw-r--r-- 3.6K] charconv.h + │ │ ├── [-rw-r--r-- 47K] chrono.h + │ │ ├── [-rw-r--r-- 74K] chrono_io.h + │ │ ├── [-rw-r--r-- 25K] codecvt.h + │ │ ├── [-rw-r--r-- 3.3K] concept_check.h + │ │ ├── [-rw-r--r-- 131K] cow_string.h + │ │ ├── [-rw-r--r-- 15K] cpp_type_traits.h + │ │ ├── [-rw-r--r-- 1.8K] cxxabi_forced.h + │ │ ├── [-rw-r--r-- 2.2K] cxxabi_init_exception.h + │ │ ├── [-rw-r--r-- 41K] deque.tcc + │ │ ├── [-rw-r--r-- 12K] enable_special_members.h + │ │ ├── [-rw-r--r-- 2.1K] erase_if.h + │ │ ├── [-rw-r--r-- 2.4K] exception.h + │ │ ├── [-rw-r--r-- 1.6K] exception_defines.h + │ │ ├── [-rw-r--r-- 8.1K] exception_ptr.h + │ │ ├── [-rw-r--r-- 50K] forward_list.h + │ │ ├── [-rw-r--r-- 14K] forward_list.tcc + │ │ ├── [-rw-r--r-- 17K] fs_dir.h + │ │ ├── [-rw-r--r-- 11K] fs_fwd.h + │ │ ├── [-rw-r--r-- 10K] fs_ops.h + │ │ ├── [-rw-r--r-- 41K] fs_path.h + │ │ ├── [-rw-r--r-- 33K] fstream.tcc + │ │ ├── [-rw-r--r-- 4.0K] functexcept.h + │ │ ├── [-rw-r--r-- 8.8K] functional_hash.h + │ │ ├── [-rw-r--r-- 5.4K] gslice.h + │ │ ├── [-rw-r--r-- 7.7K] gslice_array.h + │ │ ├── [-rw-r--r-- 2.1K] hash_bytes.h + │ │ ├── [-rw-r--r-- 88K] hashtable.h + │ │ ├── [-rw-r--r-- 64K] hashtable_policy.h + │ │ ├── [-rw-r--r-- 7.7K] indirect_array.h + │ │ ├── [-rw-r--r-- 6.1K] invoke.h + │ │ ├── [-rw-r--r-- 32K] ios_base.h + │ │ ├── [-rw-r--r-- 32K] istream.tcc + │ │ ├── [-rw-r--r-- 34K] iterator_concepts.h + │ │ ├── [-rw-r--r-- 18K] list.tcc + │ │ ├── [-rw-r--r-- 25K] locale_classes.h + │ │ ├── [-rw-r--r-- 11K] locale_classes.tcc + │ │ ├── [-rw-r--r-- 19K] locale_conv.h + │ │ ├── [-rw-r--r-- 92K] locale_facets.h + │ │ ├── [-rw-r--r-- 40K] locale_facets.tcc + │ │ ├── [-rw-r--r-- 69K] locale_facets_nonio.h + │ │ ├── [-rw-r--r-- 56K] locale_facets_nonio.tcc + │ │ ├── [-rw-r--r-- 5.8K] localefwd.h + │ │ ├── [-rw-r--r-- 7.8K] mask_array.h + │ │ ├── [-rw-r--r-- 22K] max_size_type.h + │ │ ├── [-rw-r--r-- 16K] memory_resource.h + │ │ ├── [-rw-r--r-- 2.5K] memoryfwd.h + │ │ ├── [-rw-r--r-- 7.5K] mofunc_impl.h + │ │ ├── [-rw-r--r-- 6.5K] move.h + │ │ ├── [-rw-r--r-- 6.1K] move_only_function.h + │ │ ├── [-rw-r--r-- 7.5K] nested_exception.h + │ │ ├── [-rw-r--r-- 7.2K] new_allocator.h + │ │ ├── [-rw-r--r-- 11K] node_handle.h + │ │ ├── [-rw-r--r-- 12K] ostream.tcc + │ │ ├── [-rw-r--r-- 4.0K] ostream_insert.h + │ │ ├── [-rw-r--r-- 7.8K] parse_numbers.h + │ │ ├── [-rw-r--r-- 7.3K] postypes.h + │ │ ├── [-rw-r--r-- 9.9K] predefined_ops.h + │ │ ├── [-rw-r--r-- 8.3K] ptr_traits.h + │ │ ├── [-rw-r--r-- 5.0K] quoted_string.h + │ │ ├── [-rw-r--r-- 179K] random.h + │ │ ├── [-rw-r--r-- 103K] random.tcc + │ │ ├── [-rw-r--r-- 12K] range_access.h + │ │ ├── [-rw-r--r-- 129K] ranges_algo.h + │ │ ├── [-rw-r--r-- 18K] ranges_algobase.h + │ │ ├── [-rw-r--r-- 29K] ranges_base.h + │ │ ├── [-rw-r--r-- 5.9K] ranges_cmp.h + │ │ ├── [-rw-r--r-- 18K] ranges_uninitialized.h + │ │ ├── [-rw-r--r-- 25K] ranges_util.h + │ │ ├── [-rw-r--r-- 13K] refwrap.h + │ │ ├── [-rw-r--r-- 104K] regex.h + │ │ ├── [-rw-r--r-- 16K] regex.tcc + │ │ ├── [-rw-r--r-- 11K] regex_automaton.h + │ │ ├── [-rw-r--r-- 7.6K] regex_automaton.tcc + │ │ ├── [-rw-r--r-- 16K] regex_compiler.h + │ │ ├── [-rw-r--r-- 18K] regex_compiler.tcc + │ │ ├── [-rw-r--r-- 15K] regex_constants.h + │ │ ├── [-rw-r--r-- 5.4K] regex_error.h + │ │ ├── [-rw-r--r-- 8.8K] regex_executor.h + │ │ ├── [-rw-r--r-- 18K] regex_executor.tcc + │ │ ├── [-rw-r--r-- 6.9K] regex_scanner.h + │ │ ├── [-rw-r--r-- 15K] regex_scanner.tcc + │ │ ├── [-rw-r--r-- 1.4K] requires_hosted.h + │ │ ├── [-rw-r--r-- 7.7K] semaphore_base.h + │ │ ├── [-rw-r--r-- 38K] shared_ptr.h + │ │ ├── [-rw-r--r-- 23K] shared_ptr_atomic.h + │ │ ├── [-rw-r--r-- 65K] shared_ptr_base.h + │ │ ├── [-rw-r--r-- 9.4K] slice_array.h + │ │ ├── [-rw-r--r-- 46K] specfun.h + │ │ ├── [-rw-r--r-- 9.9K] sstream.tcc + │ │ ├── [-rw-r--r-- 4.6K] std_abs.h + │ │ ├── [-rw-r--r-- 23K] std_function.h + │ │ ├── [-rw-r--r-- 6.7K] std_mutex.h + │ │ ├── [-rw-r--r-- 9.8K] std_thread.h + │ │ ├── [-rw-r--r-- 212K] stl_algo.h + │ │ ├── [-rw-r--r-- 75K] stl_algobase.h + │ │ ├── [-rw-r--r-- 41K] stl_bvector.h + │ │ ├── [-rw-r--r-- 8.5K] stl_construct.h + │ │ ├── [-rw-r--r-- 76K] stl_deque.h + │ │ ├── [-rw-r--r-- 44K] stl_function.h + │ │ ├── [-rw-r--r-- 20K] stl_heap.h + │ │ ├── [-rw-r--r-- 93K] stl_iterator.h + │ │ ├── [-rw-r--r-- 8.7K] stl_iterator_base_funcs.h + │ │ ├── [-rw-r--r-- 9.5K] stl_iterator_base_types.h + │ │ ├── [-rw-r--r-- 71K] stl_list.h + │ │ ├── [-rw-r--r-- 55K] stl_map.h + │ │ ├── [-rw-r--r-- 43K] stl_multimap.h + │ │ ├── [-rw-r--r-- 37K] stl_multiset.h + │ │ ├── [-rw-r--r-- 14K] stl_numeric.h + │ │ ├── [-rw-r--r-- 36K] stl_pair.h + │ │ ├── [-rw-r--r-- 28K] stl_queue.h + │ │ ├── [-rw-r--r-- 3.9K] stl_raw_storage_iter.h + │ │ ├── [-rw-r--r-- 4.5K] stl_relops.h + │ │ ├── [-rw-r--r-- 37K] stl_set.h + │ │ ├── [-rw-r--r-- 14K] stl_stack.h + │ │ ├── [-rw-r--r-- 8.7K] stl_tempbuf.h + │ │ ├── [-rw-r--r-- 72K] stl_tree.h + │ │ ├── [-rw-r--r-- 36K] stl_uninitialized.h + │ │ ├── [-rw-r--r-- 69K] stl_vector.h + │ │ ├── [-rw-r--r-- 8.2K] stream_iterator.h + │ │ ├── [-rw-r--r-- 4.6K] streambuf.tcc + │ │ ├── [-rw-r--r-- 16K] streambuf_iterator.h + │ │ ├── [-rw-r--r-- 7.0K] string_view.tcc + │ │ ├── [-rw-r--r-- 2.6K] stringfwd.h + │ │ ├── [-rw-r--r-- 3.2K] this_thread_sleep.h + │ │ ├── [-rw-r--r-- 13K] uniform_int_dist.h + │ │ ├── [-rw-r--r-- 6.2K] unique_lock.h + │ │ ├── [-rw-r--r-- 36K] unique_ptr.h + │ │ ├── [-rw-r--r-- 75K] unordered_map.h + │ │ ├── [-rw-r--r-- 62K] unordered_set.h + │ │ ├── [-rw-r--r-- 6.9K] uses_allocator.h + │ │ ├── [-rw-r--r-- 8.5K] uses_allocator_args.h + │ │ ├── [-rw-r--r-- 8.2K] utility.h + │ │ ├── [-rw-r--r-- 23K] valarray_after.h + │ │ ├── [-rw-r--r-- 21K] valarray_array.h + │ │ ├── [-rw-r--r-- 7.1K] valarray_array.tcc + │ │ ├── [-rw-r--r-- 19K] valarray_before.h + │ │ └── [-rw-r--r-- 31K] vector.tcc + │ ├── [-rw-r--r-- 49K] bitset + │ ├── [-rw-r--r-- 1.6K] cassert + │ ├── [-rw-r--r-- 1.3K] ccomplex + │ ├── [-rw-r--r-- 2.4K] cctype + │ ├── [-rw-r--r-- 1.7K] cerrno + │ ├── [-rw-r--r-- 2.0K] cfenv + │ ├── [-rw-r--r-- 1.8K] cfloat + │ ├── [-rw-r--r-- 29K] charconv + │ ├── [-rw-r--r-- 93K] chrono + │ ├── [-rw-r--r-- 2.1K] cinttypes + │ ├── [-rw-r--r-- 1.4K] ciso646 + │ ├── [-rw-r--r-- 1.9K] climits + │ ├── [-rw-r--r-- 1.9K] clocale + │ ├── [-rw-r--r-- 92K] cmath + │ ├── [-rw-r--r-- 5.2K] codecvt + │ ├── [-rw-r--r-- 37K] compare + │ ├── [-rw-r--r-- 74K] complex + │ ├── [-rw-r--r-- 1.6K] complex.h + │ ├── [-rw-r--r-- 13K] concepts + │ ├── [-rw-r--r-- 13K] condition_variable + │ ├── [-rw-r--r-- 9.4K] coroutine + │ ├── [-rw-r--r-- 1.9K] csetjmp + │ ├── [-rw-r--r-- 1.8K] csignal + │ ├── [-rw-r--r-- 1.4K] cstdalign + │ ├── [-rw-r--r-- 1.8K] cstdarg + │ ├── [-rw-r--r-- 1.4K] cstdbool + │ ├── [-rw-r--r-- 6.5K] cstddef + │ ├── [-rw-r--r-- 3.8K] cstdint + │ ├── [-rw-r--r-- 4.3K] cstdio + │ ├── [-rw-r--r-- 6.4K] cstdlib + │ ├── [-rw-r--r-- 3.1K] cstring + │ ├── [-rw-r--r-- 1.3K] ctgmath + │ ├── [-rw-r--r-- 2.2K] ctime + │ ├── [-rw-r--r-- 2.8K] cuchar + │ ├── [-rw-r--r-- 6.4K] cwchar + │ ├── [-rw-r--r-- 2.7K] cwctype + │ ├── [-rw-r--r-- 22K] cxxabi.h + │ ├── [drwxr-xr-x 34] debug + │ │ ├── [-rw-r--r-- 2.3K] assertions.h + │ │ ├── [-rw-r--r-- 13K] bitset + │ │ ├── [-rw-r--r-- 6.0K] debug.h + │ │ ├── [-rw-r--r-- 18K] deque + │ │ ├── [-rw-r--r-- 18K] formatter.h + │ │ ├── [-rw-r--r-- 28K] forward_list + │ │ ├── [-rw-r--r-- 15K] functions.h + │ ��� ├── [-rw-r--r-- 10K] helper_functions.h + │ │ ├── [-rw-r--r-- 27K] list + │ │ ├── [-rw-r--r-- 20K] macros.h + │ │ ├── [-rw-r--r-- 1.6K] map + │ │ ├── [-rw-r--r-- 23K] map.h + │ │ ├── [-rw-r--r-- 20K] multimap.h + │ │ ├── [-rw-r--r-- 19K] multiset.h + │ │ ├── [-rw-r--r-- 9.1K] safe_base.h + │ │ ├── [-rw-r--r-- 3.8K] safe_container.h + │ │ ├── [-rw-r--r-- 31K] safe_iterator.h + │ │ ├── [-rw-r--r-- 19K] safe_iterator.tcc + │ │ ├── [-rw-r--r-- 13K] safe_local_iterator.h + │ │ ├── [-rw-r--r-- 2.8K] safe_local_iterator.tcc + │ │ ├── [-rw-r--r-- 5.0K] safe_sequence.h + │ │ ├── [-rw-r--r-- 4.9K] safe_sequence.tcc + │ │ ├── [-rw-r--r-- 6.7K] safe_unordered_base.h + │ │ ├── [-rw-r--r-- 5.9K] safe_unordered_container.h + │ │ ├── [-rw-r--r-- 3.2K] safe_unordered_container.tcc + │ │ ├── [-rw-r--r-- 1.6K] set + │ │ ├── [-rw-r--r-- 19K] set.h + │ │ ├── [-rw-r--r-- 5.4K] stl_iterator.h + │ │ ├── [-rw-r--r-- 36K] string + │ │ ├── [-rw-r--r-- 46K] unordered_map + │ │ ├── [-rw-r--r-- 41K] unordered_set + │ │ └── [-rw-r--r-- 24K] vector + │ ├── [drwxr-xr-x 4] decimal + │ │ ├── [-rw-r--r-- 17K] decimal + │ │ └── [-rw-r--r-- 17K] decimal.h + │ ├── [-rw-r--r-- 4.4K] deque + │ ├── [-rw-r--r-- 5.3K] exception + │ ├── [-rw-r--r-- 1.8K] execution + │ ├── [-rw-r--r-- 48K] expected + │ ├── [drwxr-xr-x 45] experimental + │ │ ├── [-rw-r--r-- 3.7K] algorithm + │ │ ├── [-rw-r--r-- 16K] any + │ │ ├── [-rw-r--r-- 3.4K] array + │ │ ├── [drwxr-xr-x 22] bits + │ │ │ ├── [-rw-r--r-- 11K] fs_dir.h + │ │ │ ├── [-rw-r--r-- 8.8K] fs_fwd.h + │ │ │ ├── [-rw-r--r-- 10K] fs_ops.h + │ │ │ ├── [-rw-r--r-- 38K] fs_path.h + │ │ │ ├── [-rw-r--r-- 2.2K] lfts_config.h + │ │ │ ├── [-rw-r--r-- 10K] net.h + │ │ │ ├── [-rw-r--r-- 16K] numeric_traits.h + │ │ │ ├── [-rw-r--r-- 20K] shared_ptr.h + │ │ │ ├── [-rw-r--r-- 173K] simd.h + │ │ │ ├── [-rw-r--r-- 108K] simd_builtin.h + │ │ │ ├── [-rw-r--r-- 13K] simd_converter.h + │ │ │ ├── [-rw-r--r-- 9.9K] simd_detail.h + │ │ │ ├── [-rw-r--r-- 71K] simd_fixed_size.h + │ │ │ ├── [-rw-r--r-- 57K] simd_math.h + │ │ │ ├── [-rw-r--r-- 16K] simd_neon.h + │ │ │ ├── [-rw-r--r-- 4.8K] simd_ppc.h + │ │ │ ├── [-rw-r--r-- 23K] simd_scalar.h + │ │ │ ├── [-rw-r--r-- 195K] simd_x86.h + │ │ │ ├── [-rw-r--r-- 81K] simd_x86_conversions.h + │ │ │ └── [-rw-r--r-- 6.7K] string_view.tcc + │ │ ├── [-rw-r--r-- 28K] buffer + │ │ ├── [-rw-r--r-- 2.0K] chrono + │ │ ├── [-rw-r--r-- 2.5K] contract + │ │ ├── [-rw-r--r-- 2.3K] deque + │ │ ├── [-rw-r--r-- 55K] executor + │ │ ├── [-rw-r--r-- 1.6K] filesystem + │ │ ├── [-rw-r--r-- 2.4K] forward_list + │ │ ├── [-rw-r--r-- 12K] functional + │ │ ├── [-rw-r--r-- 68K] internet + │ │ ├── [-rw-r--r-- 22K] io_context + │ │ ├── [-rw-r--r-- 3.5K] iterator + │ │ ├── [-rw-r--r-- 2.3K] list + │ │ ├── [-rw-r--r-- 2.8K] map + │ │ ├── [-rw-r--r-- 6.0K] memory + │ │ ├── [-rw-r--r-- 18K] memory_resource + │ │ ├── [-rw-r--r-- 1.6K] net + │ │ ├── [-rw-r--r-- 3.7K] netfwd + │ │ ├── [-rw-r--r-- 3.4K] numeric + │ │ ├── [-rw-r--r-- 26K] optional + │ │ ├── [-rw-r--r-- 17K] propagate_const + │ │ ├── [-rw-r--r-- 2.6K] random + │ │ ├── [-rw-r--r-- 2.5K] ratio + │ │ ├── [-rw-r--r-- 2.1K] regex + │ │ ├── [-rw-r--r-- 14K] scope + │ │ ├── [-rw-r--r-- 2.7K] set + │ │ ├── [-rw-r--r-- 2.8K] simd + │ │ ├── [-rw-r--r-- 76K] socket + │ │ ├── [-rw-r--r-- 2.8K] source_location + │ │ ├── [-rw-r--r-- 2.9K] string + │ │ ├── [-rw-r--r-- 22K] string_view + │ │ ├── [-rw-r--r-- 3.3K] synchronized_value + │ │ ├── [-rw-r--r-- 2.1K] system_error + │ │ ├── [-rw-r--r-- 5.7K] timer + │ │ ├── [-rw-r--r-- 2.5K] tuple + │ │ ├── [-rw-r--r-- 12K] type_traits + │ │ ├── [-rw-r--r-- 3.1K] unordered_map + │ │ ├── [-rw-r--r-- 2.9K] unordered_set + │ │ ├── [-rw-r--r-- 1.8K] utility + │ │ └── [-rw-r--r-- 2.4K] vector + │ ├── [drwxr-xr-x 46] ext + │ │ ├── [-rw-r--r-- 19K] algorithm + │ │ ├── [-rw-r--r-- 4.0K] aligned_buffer.h + │ │ ├── [-rw-r--r-- 6.5K] alloc_traits.h + │ │ ├── [-rw-r--r-- 3.7K] atomicity.h + │ │ ├── [-rw-r--r-- 31K] bitmap_allocator.h + │ │ ├── [-rw-r--r-- 4.3K] cast.h + │ │ ├── [-rw-r--r-- 6.5K] cmath + │ │ ├── [-rw-r--r-- 16K] codecvt_specializations.h + │ │ ├── [-rw-r--r-- 7.4K] concurrence.h + │ │ ├── [-rw-r--r-- 5.8K] debug_allocator.h + │ │ ├── [-rw-r--r-- 2.3K] enc_filebuf.h + │ │ ├── [-rw-r--r-- 6.3K] extptr_allocator.h + │ │ ├── [-rw-r--r-- 14K] functional + │ │ ├── [-rw-r--r-- 17K] hash_map + │ │ ├── [-rw-r--r-- 17K] hash_set + │ │ ├── [-rw-r--r-- 3.9K] iterator + │ │ ├── [-rw-r--r-- 6.0K] malloc_allocator.h + │ │ ├── [-rw-r--r-- 7.1K] memory + │ │ ├── [-rw-r--r-- 23K] mt_allocator.h + │ │ ├── [-rw-r--r-- 2.5K] new_allocator.h + │ │ ├── [-rw-r--r-- 4.7K] numeric + │ │ ├── [-rw-r--r-- 8.0K] numeric_traits.h + │ │ ├── [drwxr-xr-x 11] pb_ds + │ │ │ ├── [-rw-r--r-- 29K] assoc_container.hpp + │ │ │ ├── [drwxr-xr-x 33] detail + │ │ │ │ ├── [drwxr-xr-x 17] bin_search_tree_ + │ │ │ │ │ ├── [-rw-r--r-- 12K] bin_search_tree_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.4K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.9K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.6K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.4K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.8K] node_iterators.hpp + │ │ │ │ │ ├── [-rw-r--r-- 8.8K] point_iterators.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] r_erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.1K] rotate_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.9K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 6.2K] traits.hpp + │ │ │ │ ├── [drwxr-xr-x 18] binary_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 8.8K] binary_heap_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.2K] const_iterator.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.1K] ↵ + + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] entry_cmp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] entry_pred.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.4K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.9K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.3K] point_const_iterator.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.0K] resize_policy.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.8K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.4K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 5] binomial_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 3.8K] binomial_heap_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 1.9K] debug_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 9] binomial_heap_base_ + │ │ │ │ │ ├── [-rw-r--r-- 6.0K] binomial_heap_base_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.4K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.4K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.2K] insert_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 5.3K] split_join_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 5] branch_policy + │ │ │ │ │ ├── [-rw-r--r-- 3.9K] branch_policy.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] null_node_metadata.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.2K] traits.hpp + │ │ │ │ ├── [drwxr-xr-x 28] cc_hash_table_map_ + │ │ │ │ │ ├── [-rw-r--r-- 20K] cc_ht_map_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] cmp_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] cond_key_dtor_entry_dealtor.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.7K] constructor_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] ↵ +constructor_destructor_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] ↵ +constructor_destructor_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.0K] debug_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] debug_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] entry_list_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] erase_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] erase_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.7K] find_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] insert_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] insert_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.0K] resize_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] resize_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] resize_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] size_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.3K] trace_fn_imps.hpp + │ │ │ │ ├── [-rw-r--r-- 2.7K] cond_dealtor.hpp + │ │ │ │ ├── [-rw-r--r-- 13K] container_base_dispatch.hpp + │ │ │ │ ├── [-rw-r--r-- 8.5K] debug_map_base.hpp + │ │ │ │ ├── [drwxr-xr-x 4] eq_fn + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] eq_by_less.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.7K] hash_eq_fn.hpp + │ │ │ │ ├── [drwxr-xr-x 25] gp_hash_table_map_ + │ │ │ │ │ ├── [-rw-r--r-- 6.5K] constructor_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] ↵ + +constructor_destructor_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] ↵ + +constructor_destructor_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] debug_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] debug_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] erase_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] erase_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] find_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.7K] find_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 20K] gp_ht_map_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.7K] insert_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.0K] insert_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] iterator_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.1K] resize_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] resize_no_store_hash_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] resize_store_hash_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.4K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 15] hash_fn + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] direct_mask_range_hashing_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] direct_mod_range_hashing_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] linear_probe_fn_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] mask_based_range_hashing.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] mod_based_range_hashing.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.0K] probe_fn_base.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] quadratic_probe_fn_imp.hpp + │ │�� │ │ │ ├── [-rw-r--r-- 10K] ranged_hash_fn.hpp + │ │ │ │ │ ├── [-rw-r--r-- 10K] ranged_probe_fn.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] sample_probe_fn.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.4K] sample_range_hashing.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.4K] sample_ranged_hash_fn.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.5K] sample_ranged_probe_fn.hpp + │ │ │ │ ├── [drwxr-xr-x 14] left_child_next_sibling_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 4.8K] const_iterator.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.0K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.0K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.1K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 8.0K] left_child_next_sibling_heap_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] node.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.3K] point_const_iterator.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] policy_access_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.8K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 12] list_update_map_ + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] constructor_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] entry_metadata_base.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.4K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 10K] lu_map_.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.0K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 4] list_update_policy + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] lu_counter_metadata.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.6K] sample_update_policy.hpp + │ │ │ │ ├── [drwxr-xr-x 13] ov_tree_map_ + │ │ │ │ │ ├── [-rw-r--r-- 6.8K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.3K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 8.4K] node_iterators.hpp + │ │ │ │ │ ├── [-rw-r--r-- 15K] ov_tree_map_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.7K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 4.5K] traits.hpp + │ │ │ │ ├── [drwxr-xr-x 9] pairing_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 2.5K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.0K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.1K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.4K] pairing_heap_.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.7K] split_join_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 19] pat_trie_ + │ │ │ │ │ ├── [-rw-r--r-- 5.6K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.7K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.8K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.7K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.1K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 14K] insert_join_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.4K] iterators_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 16K] pat_trie_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 36K] pat_trie_base.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.2K] policy_access_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] r_erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.3K] rotate_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.6K] split_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.6K] synth_access_traits.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.4K] trace_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.2K] traits.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.0K] update_fn_imps.hpp + │ │ │ │ ├── [-rw-r--r-- 4.1K] priority_queue_base_dispatch.hpp + │ │ │ │ ├── [drwxr-xr-x 12] rb_tree_map_ + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.7K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.8K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.8K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.6K] node.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.8K] rb_tree_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.7K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.2K] traits.hpp + │ │ │ │ ├── [drwxr-xr-x 10] rc_binomial_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 2.4K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.2K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.1K] rc.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.2K] rc_binomial_heap_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.4K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 1.9K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 11] resize_policy + │ │ │ │ │ ├── [-rw-r--r-- 4.8K] ↵ +cc_hash_max_collision_check_resize_trigger_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.7K] hash_exponential_size_policy_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.6K] hash_load_check_resize_trigger_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] ↵ + +hash_load_check_resize_trigger_size_base.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.3K] hash_prime_size_policy_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 6.2K] hash_standard_resize_policy_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] sample_resize_policy.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.8K] sample_resize_trigger.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.4K] sample_size_policy.hpp + │ │ │ │ ├── [drwxr-xr-x 13] splay_tree_ + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.4K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.2K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.3K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.6K] info_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.3K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.5K] node.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.9K] splay_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 9.1K] splay_tree_.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.6K] split_join_fn_imps.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.3K] traits.hpp + │ │ │ │ ├── [-rw-r--r-- 4.9K] standard_policies.hpp + │ │ │ │ ├── [drwxr-xr-x 10] thin_heap_ + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] constructors_destructor_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.8K] debug_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.9K] erase_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 1.9K] find_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 7.3K] insert_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.1K] split_join_fn_imps.hpp + │ │ │ │ │ ├── [-rw-r--r-- 8.2K] thin_heap_.hpp + │ │ │ │ │ └── [-rw-r--r-- 1.9K] trace_fn_imps.hpp + │ │ │ │ ├── [drwxr-xr-x 5] tree_policy + │ │ │ │ │ ├── [-rw-r--r-- 3.3K] node_metadata_selector.hpp + │ │ │ │ │ ├── [-rw-r--r-- 3.7K] order_statistics_imp.hpp + │ │ │ │ │ └── [-rw-r--r-- 2.3K] sample_tree_node_update.hpp + │ │ │ │ ├── [-rw-r--r-- 5.0K] tree_trace_base.hpp + │ │ │ │ ├── [drwxr-xr-x 9] trie_policy + │ │ │ │ │ ├── [-rw-r--r-- 3.3K] node_metadata_selector.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.6K] order_statistics_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 4.4K] prefix_search_node_update_imp.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] sample_trie_access_traits.hpp + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] sample_trie_node_update.hpp + │ │ │ │ │ ├── [-rw-r--r-- 5.7K] trie_policy_base.hpp + │ │ │ │ │ └── [-rw-r--r-- 3.0K] trie_string_access_traits_imp.hpp + │ │ │ │ ├── [-rw-r--r-- 4.3K] type_utils.hpp + │ │ │ │ ├── [-rw-r--r-- 6.3K] types_traits.hpp + │ │ │ │ └── [drwxr-xr-x 6] unordered_iterator + │ │ │ │ ├── [-rw-r--r-- 3.4K] const_iterator.hpp + │ │ │ │ ├── [-rw-r--r-- 3.9K] iterator.hpp + │ │ │ │ ├── [-rw-r--r-- 3.9K] point_const_iterator.hpp + │ │ │ │ └── [-rw-r--r-- 3.7K] point_iterator.hpp + │ │ │ ├── [-rw-r--r-- 2.9K] exception.hpp + │ │ │ ├── [-rw-r--r-- 16K] hash_policy.hpp + │ │ │ ├── [-rw-r--r-- 4.2K] list_update_policy.hpp + │ │ │ ├── [-rw-r--r-- 5.3K] priority_queue.hpp + │ │ │ ├── [-rw-r--r-- 12K] tag_and_trait.hpp + │ │ │ ├── [-rw-r--r-- 5.4K] tree_policy.hpp + │ │ │ └── [-rw-r--r-- 12K] trie_policy.hpp + │ │ ├── [-rw-r--r-- 5.5K] pod_char_traits.h + │ │ ├── [-rw-r--r-- 20K] pointer.h + │ │ ├── [-rw-r--r-- 8.8K] pool_allocator.h + │ │ ├── [-rw-r--r-- 112K] random + │ │ ├── [-rw-r--r-- 59K] random.tcc + │ │ ├── [-rw-r--r-- 3.3K] rb_tree + │ │ ├── [-rw-r--r-- 23K] rc_string_base.h + │ │ ├── [-rw-r--r-- 88K] rope + │ │ ├── [-rw-r--r-- 48K] ropeimpl.h + │ │ ├── [-rw-r--r-- 29K] slist + │ │ ├── [-rw-r--r-- 16K] sso_string_base.h + │ │ ├── [-rw-r--r-- 5.6K] stdio_filebuf.h + │ │ ├── [-rw-r--r-- 8.6K] stdio_sync_filebuf.h + │ │ ├── [-rw-r--r-- 3.6K] string_conversions.h + │ │ ├── [-rw-r--r-- 25K] throw_allocator.h + │ │ ├── [-rw-r--r-- 7.1K] type_traits.h + │ │ ├── [-rw-r--r-- 16K] typelist.h + │ │ ├── [-rw-r--r-- 108K] vstring.h + │ │ ├── [-rw-r--r-- 23K] vstring.tcc + │ │ ├── [-rw-r--r-- 3.1K] vstring_fwd.h + │ │ └── [-rw-r--r-- 5.8K] vstring_util.h + │ ├── [-rw-r--r-- 2.0K] fenv.h + │ ├── [-rw-r--r-- 1.7K] filesystem + │ ├── [-rw-r--r-- 111K] format + │ ├── [-rw-r--r-- 2.7K] forward_list + │ ├── [-rw-r--r-- 40K] fstream + │ ├── [-rw-r--r-- 45K] functional + │ ├── [-rw-r--r-- 52K] future + │ ├── [-rw-r--r-- 2.9K] initializer_list + │ ├── [-rw-r--r-- 16K] iomanip + │ ├── [-rw-r--r-- 1.6K] ios + │ ├── [-rw-r--r-- 8.2K] iosfwd + │ ├── [-rw-r--r-- 3.0K] iostream + │ ├── [-rw-r--r-- 35K] istream + │ ├── [-rw-r--r-- 2.8K] iterator + │ ├── [-rw-r--r-- 2.7K] latch + │ ├── [-rw-r--r-- 76K] limits + │ ├── [-rw-r--r-- 3.6K] list + │ ├── [-rw-r--r-- 1.5K] locale + │ ├── [-rw-r--r-- 4.0K] map + │ ├── [-rw-r--r-- 4.5K] math.h + │ ├── [-rw-r--r-- 4.8K] memory + │ ├── [-rw-r--r-- 14K] memory_resource + │ ├── [-rw-r--r-- 27K] mutex + │ ├── [-rw-r--r-- 8.4K] new + │ ├── [-rw-r--r-- 6.8K] numbers + │ ├── [-rw-r--r-- 25K] numeric + │ ├── [-rw-r--r-- 43K] optional + │ ├── [-rw-r--r-- 27K] ostream + │ ├── [drwxr-xr-x 45] parallel + │ │ ├── [-rw-r--r-- 79K] algo.h + │ │ ├── [-rw-r--r-- 18K] algobase.h + │ │ ├── [-rw-r--r-- 1.3K] algorithm + │ │ ├── [-rw-r--r-- 32K] algorithmfwd.h + │ │ ├── [-rw-r--r-- 17K] balanced_quicksort.h + │ │ ├── [-rw-r--r-- 12K] base.h + │ │ ├── [-rw-r--r-- 1.5K] basic_iterator.h + │ │ ├── [-rw-r--r-- 2.2K] checkers.h + │ │ ├── [-rw-r--r-- 3.7K] compatibility.h + │ │ ├── [-rw-r--r-- 2.8K] compiletime_settings.h + │ │ ├── [-rw-r--r-- 3.3K] equally_split.h + │ │ ├── [-rw-r--r-- 3.5K] features.h + │ │ ├── [-rw-r--r-- 13K] find.h + │ │ ├── [-rw-r--r-- 6.8K] find_selectors.h + │ │ ├── [-rw-r--r-- 3.9K] for_each.h + │ │ ├── [-rw-r--r-- 10K] for_each_selectors.h + │ │ ├── [-rw-r--r-- 5.5K] iterator.h + │ │ ├── [-rw-r--r-- 6.4K] list_partition.h + │ │ ├── [-rw-r--r-- 28K] losertree.h + │ │ ├── [-rw-r--r-- 9.4K] merge.h + │ │ ├── [-rw-r--r-- 22K] multiseq_selection.h + │ │ ├── [-rw-r--r-- 69K] multiway_merge.h + │ │ ├── [-rw-r--r-- 15K] multiway_mergesort.h + │ │ ├── [-rw-r--r-- 20K] numeric + │ │ ├── [-rw-r--r-- 7.3K] numericfwd.h + │ │ ├── [-rw-r--r-- 3.9K] omp_loop.h + │ │ ├── [-rw-r--r-- 4.0K] omp_loop_static.h + │ │ ├── [-rw-r--r-- 4.4K] par_loop.h + │ │�� ├── [-rw-r--r-- 1.5K] parallel.h + │ │ ├── [-rw-r--r-- 7.3K] partial_sum.h + │ │ ├── [-rw-r--r-- 15K] partition.h + │ │ ├── [-rw-r--r-- 5.4K] queue.h + │ │ ├── [-rw-r--r-- 6.0K] quicksort.h + │ │ ├── [-rw-r--r-- 4.1K] random_number.h + │ │ ├── [-rw-r--r-- 18K] random_shuffle.h + │ │ ├── [-rw-r--r-- 5.3K] search.h + │ │ ├── [-rw-r--r-- 14K] set_operations.h + │ │ ├── [-rw-r--r-- 12K] settings.h + │ │ ├── [-rw-r--r-- 7.5K] sort.h + │ │ ├── [-rw-r--r-- 5.8K] tags.h + │ │ ├── [-rw-r--r-- 3.6K] types.h + │ │ ├── [-rw-r--r-- 6.0K] unique_copy.h + │ │ └── [-rw-r--r-- 9.4K] workstealing.h + │ ├── [drwxr-xr-x 24] pstl + │ │ ├── [-rw-r--r-- 67K] algorithm_fwd.h + │ │ ├── [-rw-r--r-- 170K] algorithm_impl.h + │ │ ├── [-rw-r--r-- 3.7K] execution_defs.h + │ │ ├── [-rw-r--r-- 4.7K] execution_impl.h + │ │ ├── [-rw-r--r-- 32K] glue_algorithm_defs.h + │ │ ├── [-rw-r--r-- 63K] glue_algorithm_impl.h + │ │ ├── [-rw-r--r-- 1.5K] glue_execution_defs.h + │ │ ├── [-rw-r--r-- 3.8K] glue_memory_defs.h + │ │ ├── [-rw-r--r-- 19K] glue_memory_impl.h + │ │ ├── [-rw-r--r-- 6.5K] glue_numeric_defs.h + │ │ ├── [-rw-r--r-- 12K] glue_numeric_impl.h + │ │ ├── [-rw-r--r-- 4.0K] memory_impl.h + │ │ ├── [-rw-r--r-- 7.7K] numeric_fwd.h + │ │ ├── [-rw-r--r-- 18K] numeric_impl.h + │ │ ├── [-rw-r--r-- 845] parallel_backend.h + │ │ ├── [-rw-r--r-- 3.9K] parallel_backend_serial.h + │ │ ├── [-rw-r--r-- 43K] parallel_backend_tbb.h + │ │ ├── [-rw-r--r-- 8.9K] parallel_backend_utils.h + │ │ ├── [-rw-r--r-- 4.0K] parallel_impl.h + │ │ ├── [-rw-r--r-- 7.2K] pstl_config.h + │ │ ├── [-rw-r--r-- 29K] unseq_backend_simd.h + │ │ └── [-rw-r--r-- 4.5K] utils.h + │ ├── [-rw-r--r-- 2.5K] queue + │ ├── [-rw-r--r-- 1.7K] random + │ ├── [-rw-r--r-- 254K] ranges + │ ├── [-rw-r--r-- 20K] ratio + │ ├── [-rw-r--r-- 3.1K] regex + │ ├── [-rw-r--r-- 17K] scoped_allocator + │ ├── [-rw-r--r-- 3.0K] semaphore + │ ├── [-rw-r--r-- 3.9K] set + │ ├── [-rw-r--r-- 25K] shared_mutex + │ ├── [-rw-r--r-- 2.7K] source_location + │ ├── [-rw-r--r-- 13K] span + │ ├── [-rw-r--r-- 12K] spanstream + │ ├── [-rw-r--r-- 39K] sstream + │ ├── [-rw-r--r-- 2.4K] stack + │ ├── [-rw-r--r-- 21K] stacktrace + │ ├── [-rw-r--r-- 4.0K] stdatomic.h + │ ├── [-rw-r--r-- 9.6K] stdexcept + │ ├── [-rw-r--r-- 1.7K] stdfloat + │ ├── [-rw-r--r-- 2.3K] stdlib.h + │ ├── [-rw-r--r-- 16K] stop_token + │ ├── [-rw-r--r-- 29K] streambuf + │ ├── [-rw-r--r-- 3.9K] string + │ ├── [-rw-r--r-- 27K] string_view + │ ├── [-rw-r--r-- 8.2K] syncstream + │ ├── [-rw-r--r-- 18K] system_error + │ ├── [-rw-r--r-- 1.3K] tgmath.h + │ ├── [-rw-r--r-- 7.3K] thread + │ ├── [drwxr-xr-x 64] tr1 + │ │ ├── [-rw-r--r-- 6.9K] array + │ │ ├── [-rw-r--r-- 22K] bessel_function.tcc + │ │ ├── [-rw-r--r-- 5.9K] beta_function.tcc + │ │ ├── [-rw-r--r-- 1.3K] ccomplex + │ │ ├── [-rw-r--r-- 1.5K] cctype + │ │ ├── [-rw-r--r-- 2.1K] cfenv + │ │ ├── [-rw-r--r-- 1.4K] cfloat + │ │ ├── [-rw-r--r-- 2.3K] cinttypes + │ │ ├── [-rw-r--r-- 1.5K] climits + │ │ ├── [-rw-r--r-- 43K] cmath + │ │ ├── [-rw-r--r-- 12K] complex + │ │ ├── [-rw-r--r-- 1.3K] complex.h + │ │ ├── [-rw-r--r-- 1.3K] cstdarg + │ │ ├── [-rw-r--r-- 1.4K] cstdbool + │ │ ├── [-rw-r--r-- 2.7K] cstdint + │ │ ├── [-rw-r--r-- 1.6K] cstdio + │ │ ├── [-rw-r--r-- 1.9K] cstdlib + │ │ ├── [-rw-r--r-- 1.3K] ctgmath + │ │ ├── [-rw-r--r-- 1.2K] ctime + │ │ ├── [-rw-r--r-- 1.2K] ctype.h + │ │ ├── [-rw-r--r-- 1.8K] cwchar + │ │ ├── [-rw-r--r-- 1.5K] cwctype + │ │ ├── [-rw-r--r-- 26K] ell_integral.tcc + │ │ ├── [-rw-r--r-- 16K] exp_integral.tcc + │ │ ├── [-rw-r--r-- 1.2K] fenv.h + │ │ ├── [-rw-r--r-- 1.2K] float.h + │ │ ├── [-rw-r--r-- 69K] functional + │ │ ├── [-rw-r--r-- 6.1K] functional_hash.h + │ │ ├── [-rw-r--r-- 14K] gamma.tcc + │ │ ├── [-rw-r--r-- 41K] hashtable.h + │ │ ├── [-rw-r--r-- 24K] hashtable_policy.h + │ │ ├── [-rw-r--r-- 27K] hypergeometric.tcc + │ │ ├── [-rw-r--r-- 1.3K] inttypes.h + │ │ ├── [-rw-r--r-- 10K] legendre_function.tcc + │ │ ├── [-rw-r--r-- 1.2K] limits.h + │ │ ├── [-rw-r--r-- 4.5K] math.h + │ │ ├── [-rw-r--r-- 1.8K] memory + │ │ ├── [-rw-r--r-- 16K] modified_bessel_func.tcc + │ │ ├── [-rw-r--r-- 3.8K] poly_hermite.tcc + │ │ ├── [-rw-r--r-- 11K] poly_laguerre.tcc + │ │ ├── [-rw-r--r-- 1.6K] random + │ │ ├── [-rw-r--r-- 70K] random.h + │ │ ├── [-rw-r--r-- 53K] random.tcc + │ │ ├── [-rw-r--r-- 91K] regex + │ │ ├── [-rw-r--r-- 14K] riemann_zeta.tcc + │ │ ├── [-rw-r--r-- 32K] shared_ptr.h + │ │ ├── [-rw-r--r-- 4.9K] special_function_util.h + │ │ ├── [-rw-r--r-- 1.2K] stdarg.h + │ │ ├── [-rw-r--r-- 1.2K] stdbool.h + │ │ ├── [-rw-r--r-- 1.2K] stdint.h + │ │ ├── [-rw-r--r-- 1.2K] stdio.h + │ │ ├── [-rw-r--r-- 1.5K] stdlib.h + │ │ ├── [-rw-r--r-- 1.3K] tgmath.h + │ │ ├── [-rw-r--r-- 12K] tuple + │ │ ├── [-rw-r--r-- 19K] type_traits + │ │ ├── [-rw-r--r-- 1.6K] unordered_map + │ │ ├── [-rw-r--r-- 10.0K] unordered_map.h + │ │ ├── [-rw-r--r-- 1.6K] unordered_set + │ │ ├── [-rw-r--r-- 9.3K] unordered_set.h + │ │ ├── [-rw-r--r-- 3.2K] utility + │ │ ├── [-rw-r--r-- 1.3K] wchar.h + │ │ └── [-rw-r--r-- 1.3K] wctype.h + │ ├── [drwxr-xr-x 8] tr2 + │ │ ├── [-rw-r--r-- 7.2K] bool_set + │ │ ├── [-rw-r--r-- 8.1K] bool_set.tcc + │ │ ├── [-rw-r--r-- 34K] dynamic_bitset + │ │ ├── [-rw-r--r-- 8.7K] dynamic_bitset.tcc + │ │ ├── [-rw-r--r-- 2.1K] ratio + │ │ └── [-rw-r--r-- 2.6K] type_traits + │ ├── [-rw-r--r-- 76K] tuple + │ ├── [-rw-r--r-- 112K] type_traits + │ ├── [-rw-r--r-- 3.4K] typeindex + │ ├── [-rw-r--r-- 8.1K] typeinfo + │ ├── [-rw-r--r-- 3.4K] unordered_map + │ ├── [-rw-r--r-- 3.2K] unordered_set + │ ├── [-rw-r--r-- 7.0K] utility + │ ├── [-rw-r--r-- 40K] valarray + │ ├── [-rw-r--r-- 64K] variant + │ ├── [-rw-r--r-- 4.7K] vector + │ ├── [-rw-r--r-- 12K] version + │ └── [drwxr-xr-x 4] x86_64-linux-gnu + │ ├── [drwxr-xr-x 24] bits + │ │ ├── [-rw-r--r-- 1.5K] atomic_word.h + │ │ ├── [-rw-r--r-- 3.5K] basic_file.h + │ │ ├── [-rw-r--r-- 2.0K] c++allocator.h + │ │ ├── [-rw-r--r-- 67K] c++config.h + │ │ ├── [-rw-r--r-- 1.9K] c++io.h + │ │ ├── [-rw-r--r-- 3.5K] c++locale.h + │ │ ├── [-rw-r--r-- 1.3K] cpu_defines.h + │ │ ├── [-rw-r--r-- 2.3K] ctype_base.h + │ │ ├── [-rw-r--r-- 2.2K] ctype_inline.h + │ │ ├── [-rw-r--r-- 2.0K] cxxabi_tweaks.h + │ │ ├── [-rw-r--r-- 4.9K] error_constants.h + │ │ ├── [-rw-r--r-- 2.6K] extc++.h + │ │ ├── [-rw-r--r-- 24K] gthr-default.h + │ │ ├── [-rw-r--r-- 24K] gthr-posix.h + │ │ ├── [-rw-r--r-- 6.6K] gthr-single.h + │ │ ├── [-rw-r--r-- 5.5K] gthr.h + │ │ ├── [-rw-r--r-- 4.4K] messages_members.h + │ │ ├── [-rw-r--r-- 6.0K] opt_random.h + │ │ ├── [-rw-r--r-- 3.2K] os_defines.h + │ │ ├── [-rw-r--r-- 4.6K] stdc++.h + │ │ ├── [-rw-r--r-- 1.7K] stdtr1c++.h + │ │ └── [-rw-r--r-- 2.9K] time_members.h + │ └── [drwxr-xr-x 3] ext + │ └── [-rw-r--r-- 4.6K] opt_random.h + ├── [drwxr-xr-x 80] lib64 + │ ├── [drwxr-xr-x 3] gcc + │ │ └── [drwxr-xr-x 3] x86_64-linux-gnu + │ │ └── [drwxr-xr-x 18] 13.2.0 + │ │ ├── [-rw-r--r-- 2.4K] crtbegin.o + │ │ ├── [-rw-r--r-- 2.7K] crtbeginS.o + │ │ ├── [-rw-r--r-- 2.9K] crtbeginT.o + │ │ ├── [-rw-r--r-- 1.2K] crtend.o + │ │ ├── [-rw-r--r-- 1.2K] crtendS.o + │ │ ├── [-rw-r--r-- 3.7K] crtfastmath.o + │ │ ├── [-rw-r--r-- 3.4K] crtprec32.o + │ │ ├── [-rw-r--r-- 3.4K] crtprec64.o + │ │ ├── [-rw-r--r-- 3.4K] crtprec80.o + │ │ ├── [drwxr-xr-x 129] include + │ │ │ ├── [-rw-r--r-- 7.3K] acc_prof.h + │ │ │ ├── [-rw-r--r-- 2.8K] adxintrin.h + │ │ │ ├── [-rw-r--r-- 3.1K] ammintrin.h + │ │ │ ├── [-rw-r--r-- 1.8K] amxbf16intrin.h + │ │ │ ├── [-rw-r--r-- 2.1K] amxcomplexintrin.h + │ │ │ ├── [-rw-r--r-- 1.6K] amxfp16intrin.h + │ │ │ ├── [-rw-r--r-- 2.0K] amxint8intrin.h + │ │ │ ├── [-rw-r--r-- 3.1K] amxtileintrin.h + │ │ │ ├── [-rw-r--r-- 57K] avx2intrin.h + │ │ │ ├── [-rw-r--r-- 6.4K] avx5124fmapsintrin.h + │ │ │ ├── [-rw-r--r-- 4.2K] avx5124vnniwintrin.h + │ │ │ ├── [-rw-r--r-- 4.9K] avx512bf16intrin.h + │ │ │ ├── [-rw-r--r-- 7.8K] avx512bf16vlintrin.h + │ │ │ ├── [-rw-r--r-- 8.6K] avx512bitalgintrin.h + │ │ │ ├── [-rw-r--r-- 100K] avx512bwintrin.h + │ │ │ ├── [-rw-r--r-- 5.7K] avx512cdintrin.h + │ │ │ ├── [-rw-r--r-- 91K] avx512dqintrin.h + │ │ │ ├── [-rw-r--r-- 17K] avx512erintrin.h + │ │ │ ├── [-rw-r--r-- 514K] avx512fintrin.h + │ │ │ ├── [-rw-r--r-- 210K] avx512fp16intrin.h + │ │ │ ├── [-rw-r--r-- 94K] avx512fp16vlintrin.h + │ │ │ ├── [-rw-r--r-- 3.3K] avx512ifmaintrin.h + │ │ │ ├── [-rw-r--r-- 4.7K] avx512ifmavlintrin.h + │ │ │ ├── [-rw-r--r-- 10K] avx512pfintrin.h + │ │ │ ├── [-rw-r--r-- 19K] avx512vbmi2intrin.h + │ │ │ ├── [-rw-r--r-- 36K] avx512vbmi2vlintrin.h + │ │ │ ├── [-rw-r--r-- 4.8K] avx512vbmiintrin.h + │ │ │ ├── [-rw-r--r-- 8.2K] avx512vbmivlintrin.h + │ │ │ ├── [-rw-r--r-- 142K] avx512vlbwintrin.h + │ │ │ ├── [-rw-r--r-- 60K] avx512vldqintrin.h + │ │ │ ├── [-rw-r--r-- 420K] avx512vlintrin.h + │ │ │ ├── [-rw-r--r-- 4.9K] avx512vnniintrin.h + │ │ │ ├── [-rw-r--r-- 7.2K] avx512vnnivlintrin.h + │ │ │ ├── [-rw-r--r-- 2.1K] avx512vp2intersectintrin.h + │ │ │ ├── [-rw-r--r-- 2.6K] avx512vp2intersectvlintrin.h + │ │ │ ├── [-rw-r--r-- 3.0K] avx512vpopcntdqintrin.h + │ │ │ ├── [-rw-r--r-- 4.6K] avx512vpopcntdqvlintrin.h + │ │ │ ├── [-rw-r--r-- 2.5K] avxifmaintrin.h + │ │ │ ├── [-rw-r--r-- 52K] avxintrin.h + │ │ │ ├── [-rw-r--r-- 4.3K] avxneconvertintrin.h + │ │ │ ├── [-rw-r--r-- 4.4K] avxvnniint8intrin.h + │ │ │ ├── [-rw-r--r-- 3.5K] avxvnniintrin.h + │ │ │ ├── [-rw-r--r-- 3.3K] bmi2intrin.h + │ │ │ ├── [-rw-r--r-- 6.0K] bmiintrin.h + │ │ │ ├── [-rw-r--r-- 1.1K] bmmintrin.h + │ │ │ ├── [-rw-r--r-- 2.6K] cet.h + │ │ │ ├── [-rw-r--r-- 3.3K] cetintrin.h + │ │ │ ├── [-rw-r--r-- 1.6K] cldemoteintrin.h + │ │ │ ├── [-rw-r--r-- 1.6K] clflushoptintrin.h + │ │ │ ├── [-rw-r--r-- 1.5K] clwbintrin.h + │ │ │ ├── [-rw-r--r-- 1.5K] clzerointrin.h + │ │ │ ├── [-rw-r--r-- 2.8K] cmpccxaddintrin.h + │ │ │ ├── [-rw-r--r-- 9.9K] cpuid.h + │ │ │ ├── [-rw-r--r-- 2.5K] cross-stdarg.h + │ │ │ ├── [-rw-r--r-- 51K] emmintrin.h + │ │ │ ├── [-rw-r--r-- 1.8K] enqcmdintrin.h + │ │ │ ├── [-rw-r--r-- 3.3K] f16cintrin.h + │ │ │ ├── [-rw-r--r-- 20K] float.h + │ │ │ ├── [-rw-r--r-- 8.9K] fma4intrin.h + │ │ │ ├── [-rw-r--r-- 9.9K] fmaintrin.h + │ │ │ ├── [-rw-r--r-- 2.0K] fxsrintrin.h + │ │ │ ├── [-rw-r--r-- 2.9K] gcov.h + │ │ │ ├── [-rw-r--r-- 15K] gfniintrin.h + │ │ │ ├── [-rw-r--r-- 1.6K] hresetintrin.h + │ │ │ ├── [-rw-r--r-- 7.7K] ia32intrin.h + │ │ │ ├── [-rw-r--r-- 2.7K] immintrin.h + │ │ │ ├── [-rw-r--r-- 1.2K] iso646.h + │ │ │ ├── [-rw-r--r-- 4.3K] keylockerintrin.h + │ │ │ ├── [-rw-r--r-- 6.2K] limits.h + │ │ │ ├── [-rw-r--r-- 3.3K] lwpintrin.h + │ │ │ ├── [-rw-r--r-- 2.3K] lzcntintrin.h + │ │ │ ├── [-rw-r--r-- 6.9K] mm3dnow.h + │ │ │ ├── [-rw-r--r-- 1.7K] mm_malloc.h + │ │ │ ├── [-rw-r--r-- 31K] mmintrin.h + │ │ │ ├── [-rw-r--r-- 2.3K] movdirintrin.h + │ │ │ ├── [-rw-r--r-- 1.7K] mwaitintrin.h + │ │ │ ├── [-rw-r--r-- 1.7K] mwaitxintrin.h + │ │ │ ├── [-rw-r--r-- 1.3K] nmmintrin.h + │ │ │ ├── [-rw-r--r-- 12K] omp.h + │ │ │ ├── [-rw-r--r-- 6.3K] openacc.h + │ │ │ ├── [-rw-r--r-- 2.3K] pconfigintrin.h + │ │ │ ├── [-rw-r--r-- 1.7K] pkuintrin.h + │ │ │ ├── [-rw-r--r-- 3.9K] pmmintrin.h + │ │ │ ├── [-rw-r--r-- 1.7K] popcntintrin.h + │ │ │ ├── [-rw-r--r-- 1.8K] prfchiintrin.h + │ │ │ ├── [-rw-r--r-- 1.4K] prfchwintrin.h + │ │ │ ├── [-rw-r--r-- 9.1K] quadmath.h + │ │ │ ├── [-rw-r--r-- 3.1K] quadmath_weak.h + │ │ │ ├── [-rw-r--r-- 2.9K] raointintrin.h + │ │ │ ├── [-rw-r--r-- 2.0K] rdseedintrin.h + │ │ │ ├── [-rw-r--r-- 2.7K] rtmintrin.h + │ │ │ ├── [drwxr-xr-x 7] sanitizer + │ │ │ │ ├── [-rw-r--r-- 12K] asan_interface.h + │ │ │ │ ├── [-rw-r--r-- 16K] common_interface_defs.h + │ │ │ │ ├── [-rw-r--r-- 4.2K] hwasan_interface.h + │ │ │ │ ├── [-rw-r--r-- 3.8K] lsan_interface.h + │ │ │ │ └── [-rw-r--r-- 7.6K] tsan_interface.h + │ │ │ ├── [-rw-r--r-- 1.6K] serializeintrin.h + │ │ │ ├── [-rw-r--r-- 6.9K] sgxintrin.h + │ │ │ ├── [-rw-r--r-- 3.1K] shaintrin.h + │ │ │ ├── [-rw-r--r-- 28K] smmintrin.h + │ │ │ ├── [drwxr-xr-x 6] ssp + │ │ │ │ ├── [-rw-r--r-- 2.3K] ssp.h + │ │ │ │ ├── [-rw-r--r-- 3.4K] stdio.h + │ │ │ │ ├── [-rw-r--r-- 5.6K] string.h + │ │ │ │ └── [-rw-r--r-- 2.7K] unistd.h + │ │ │ ├── [-rw-r--r-- 1.3K] stdalign.h + │ │ │ ├── [-rw-r--r-- 4.2K] stdarg.h + │ │ │ ├── [-rw-r--r-- 9.5K] stdatomic.h + │ │ │ ├── [-rw-r--r-- 1.5K] stdbool.h + │ │ │ ├── [-rw-r--r-- 13K] stddef.h + │ │ │ ├── [-rw-r--r-- 5.9K] stdfix.h + │ │ │ ├── [-rw-r--r-- 9.4K] stdint-gcc.h + │ │ │ ├── [-rw-r--r-- 328] stdint.h + │ │ │ ├── [-rw-r--r-- 1.1K] stdnoreturn.h + │ │ │ ├── [-rw-r--r-- 330] syslimits.h + │ │ │ ├── [-rw-r--r-- 5.1K] tbmintrin.h + │ │ │ ├── [-rw-r--r-- 8.1K] tmmintrin.h + │ │ │ ├── [-rw-r--r-- 1.7K] tsxldtrkintrin.h + │ │ │ ├── [-rw-r--r-- 2.3K] uintrintrin.h + │ │ │ ├── [-rw-r--r-- 11K] unwind.h + │ │ │ ├── [-rw-r--r-- 3.4K] vaesintrin.h + │ │ │ ├── [-rw-r--r-- 139] varargs.h + │ │ │ ├── [-rw-r--r-- 2.7K] vpclmulqdqintrin.h + │ │ │ ├── [-rw-r--r-- 2.0K] waitpkgintrin.h + │ │ │ ├── [-rw-r--r-- 1.6K] wbnoinvdintrin.h + │ │ │ ├── [-rw-r--r-- 4.5K] wmmintrin.h + │ │ │ ├── [-rw-r--r-- 6.0K] x86gprintrin.h + │ │ │ ├── [-rw-r--r-- 1.3K] x86intrin.h + │ │ │ ├── [-rw-r--r-- 44K] xmmintrin.h + │ │ │ ├── [-rw-r--r-- 28K] xopintrin.h + │ │ │ ├── [-rw-r--r-- 1.8K] xsavecintrin.h + │ │ │ ├── [-rw-r--r-- 2.4K] xsaveintrin.h + │ │ │ ├── [-rw-r--r-- 1.8K] xsaveoptintrin.h + │ │ │ ├── [-rw-r--r-- 2.1K] xsavesintrin.h + │ │ │ └── [-rw-r--r-- 1.7K] xtestintrin.h + │ │ ├── [drwxr-xr-x 6] include-fixed + │ │ │ ├── [-rw-r--r-- 750] README + │ │ │ ├── [drwxr-xr-x 3] c++ + │ │ │ │ └── [drwxr-xr-x 2] 13 + │ │ │ ├── [drwxr-xr-x 2] linux-default + │ │ │ └── [-rw-r--r-- 47K] pthread.h + │ │ ├── [drwxr-xr-x 7] install-tools + │ │ │ ├── [-rw-r--r-- 2] fixinc_list + │ │ │ ├── [-rw-r--r-- 330] gsyslimits.h + │ │ │ ├── [drwxr-xr-x 4] include + │ │ │ │ ├── [-rw-r--r-- 750] README + │ │ │ │ └── [-rw-r--r-- 6.2K] limits.h + │ │ │ ├── [-rw-r--r-- 11] macro_list + │ │ │ └── [-rw-r--r-- 85] mkheaders.conf + │ │ ├── [-rw-r--r-- 5.6M] libgcc.a + │ │ ├── [-rw-r--r-- 316K] libgcc_eh.a + │ │ ├── [-rw-r--r-- 283K] libgcov.a + │ │ └── [drwxr-xr-x 12] plugin + │ │ ├── [-rw-r--r-- 1.3M] gtype.state + │ │ ├── [drwxr-xr-x 441] include + │ │ │ ├── [drwxr-xr-x 3] ada + │ │ │ │ └── [drwxr-xr-x 3] gcc-interface + │ │ │ │ └── [-rw-r--r-- 4.9K] ada-tree.def + │ │ │ ├── [-rw-r--r-- 2.9K] addresses.h + │ │ │ ├── [-rw-r--r-- 2.1K] alias.h + │ │ │ ├── [-rw-r--r-- 2.4K] align.h + │ │ │ ├── [-rw-r--r-- 224] all-tree.def + │ │ │ ├── [-rw-r--r-- 15K] alloc-pool.h + │ │ │ ├── [-rw-r--r-- 12K] ansidecl.h + │ │ │ ├── [-rw-r--r-- 1.5K] array-traits.h + │ │ │ ├── [-rw-r--r-- 8.6K] asan.h + │ │ │ ├── [-rw-r--r-- 8.5K] attr-fnspec.h + │ │ │ ├── [-rw-r--r-- 14K] attribs.h + │ │ │ ├── [-rw-r--r-- 58K] auto-host.h + │ │ │ ├── [-rw-r--r-- 1.1K] auto-profile.h + │ │ │ ├── [-rw-r--r-- 40K] b-header-vars + │ │ │ ├── [-rw-r--r-- 1.0K] backend.h + │ │ │ ├── [-rw-r--r-- 18K] basic-block.h + │ │ │ ├── [-rw-r--r-- 1.2K] bb-reorder.h + │ │ │ ├── [-rw-r--r-- 37K] bitmap.h + │ │ │ ├── [-rw-r--r-- 20K] builtin-attrs.def + │ │ │ ├── [-rw-r--r-- 52K] builtin-types.def + │ │ │ ├── [-rw-r--r-- 94K] builtins.def + │ │ │ ├── [-rw-r--r-- 7.0K] builtins.h + │ │ │ ├── [-rw-r--r-- 171] bversion.h + │ │ │ ├── [drwxr-xr-x 7] c-family + │ │ │ │ ├── [-rw-r--r-- 4.1K] c-common.def + │ │ │ │ ├── [-rw-r--r-- 58K] c-common.h + │ │ │ │ ├── [-rw-r--r-- 7.8K] c-objc.h + │ │ │ │ ├── [-rw-r--r-- 9.3K] c-pragma.h + │ │ │ │ └── [-rw-r--r-- 5.2K] c-pretty-print.h + │ │ │ ├── [-rw-r--r-- 34K] c-tree.h + │ │ │ ├── [-rw-r--r-- 5.2K] calls.h + │ │ │ ├── [-rw-r--r-- 845] ccmp.h + │ │ │ ├── [-rw-r--r-- 6.7K] cfg-flags.def + │ │ │ ├── [-rw-r--r-- 6.3K] cfg.h + │ │ │ ├── [-rw-r--r-- 3.4K] cfganal.h + │ │ │ ├── [-rw-r--r-- 1016] cfgbuild.h + │ │ │ ├── [-rw-r--r-- 1.3K] cfgcleanup.h + │ │ │ ├── [-rw-r--r-- 966] cfgexpand.h + │ │ │ ├── [-rw-r--r-- 11K] cfghooks.h + │ │ │ ├── [-rw-r--r-- 27K] cfgloop.h + │ │ �� │ ├── [-rw-r--r-- 2.4K] cfgloopmanip.h + │ │ │ ├── [-rw-r--r-- 2.6K] cfgrtl.h + │ │ │ ├── [-rw-r--r-- 121K] cgraph.h + │ │ │ ├── [-rw-r--r-- 5.5K] cif-code.def + │ │ │ ├── [-rw-r--r-- 1.7K] collect-utils.h + │ │ │ ├── [-rw-r--r-- 8.4K] collect2-aix.h + │ │ │ ├── [-rw-r--r-- 1.3K] collect2.h + │ │ │ ├── [-rw-r--r-- 4.8K] color-macros.h + │ │ │ ├── [drwxr-xr-x 3] common + │ │ │ │ └── [drwxr-xr-x 3] config + │ │ │ │ └── [drwxr-xr-x 3] i386 + │ │ │ │ └── [-rw-r--r-- 5.9K] i386-cpuinfo.h + │ │ │ ├── [-rw-r--r-- 2.7K] conditions.h + │ │ │ ├── [drwxr-xr-x 11] config + │ │ │ │ ├── [-rw-r--r-- 18K] elfos.h + │ │ │ │ ├── [-rw-r--r-- 2.8K] glibc-stdint.h + │ │ │ │ ├── [-rw-r--r-- 5.8K] gnu-user.h + │ │ │ │ ├── [drwxr-xr-x 16] i386 + │ │ │ │ │ ├── [-rw-r--r-- 3.1K] att.h + │ │ │ │ │ ├── [-rw-r--r-- 1.3K] biarch64.h + │ │ │ │ │ ├── [-rw-r--r-- 2.6K] gnu-user-common.h + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] gnu-user64.h + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] i386-isa.def + │ │ │ │ │ ├── [-rw-r--r-- 3.2K] i386-opts.h + │ │ │ │ │ ├── [-rw-r--r-- 17K] i386-protos.h + │ │ │ │ │ ├── [-rw-r--r-- 111K] i386.h + │ │ │ │ │ ├── [-rw-r--r-- 2.3K] linux-common.h + │ │ │ │ │ ├── [-rw-r--r-- 1.6K] linux64.h + │ │ │ │ │ ├── [-rw-r--r-- 1.0K] stringop.def + │ │ │ │ │ ├── [-rw-r--r-- 2.8K] unix.h + │ │ │ │ │ ├── [-rw-r--r-- 2.9K] x86-64.h + │ │ │ │ │ └── [-rw-r--r-- 33K] x86-tune.def + │ │ │ │ ├── [-rw-r--r-- 1.6K] initfini-array.h + │ │ │ │ ├── [-rw-r--r-- 1.9K] linux-android.h + │ │ │ │ ├── [-rw-r--r-- 811] linux-protos.h + │ │ │ │ ├── [-rw-r--r-- 8.3K] linux.h + │ │ │ │ └── [-rw-r--r-- 1.6K] vxworks-dummy.h + │ │ │ ├── [-rw-r--r-- 217] config.h + │ │ │ ├── [-rw-r--r-- 586] configargs.h + │ │ │ ├── [-rw-r--r-- 1.7K] context.h + │ │ │ ├── [-rw-r--r-- 1.8K] convert.h + │ │ │ ├── [-rw-r--r-- 15K] coretypes.h + │ │ │ ├── [-rw-r--r-- 1.9K] coroutine-builtins.def + │ │ │ ├── [-rw-r--r-- 2.3K] coverage.h + │ │ │ ├── [drwxr-xr-x 10] cp + │ │ │ │ ├── [-rw-r--r-- 9.5K] contracts.h + │ │ │ │ ├── [-rw-r--r-- 4.8K] cp-trait.def + │ │ │ │ ├── [-rw-r--r-- 27K] cp-tree.def + │ │ │ │ ├── [-rw-r--r-- 342K] cp-tree.h + │ │ │ │ ├── [-rw-r--r-- 4.8K] cxx-pretty-print.h + │ │ │ │ ├── [-rw-r--r-- 18K] name-lookup.h + │ │ │ │ ├── [-rw-r--r-- 6.7K] operators.def + │ │ │ │ └── [-rw-r--r-- 1.8K] type-utils.h + │ │ │ ├── [-rw-r--r-- 1.1K] cppbuiltin.h + │ │ │ ├── [-rw-r--r-- 2.9K] cppdefault.h + │ │ │ ├── [-rw-r--r-- 56K] cpplib.h + │ │ │ ├── [-rw-r--r-- 4.5K] cselib.h + │ │ │ ├── [-rw-r--r-- 15K] ctfc.h + │ │ │ ├── [drwxr-xr-x 3] d + │ │ │ │ └── [-rw-r--r-- 1.4K] d-tree.def + │ │ │ ├── [-rw-r--r-- 11K] data-streamer.h + │ │ │ ├── [-rw-r--r-- 6.5K] dbgcnt.def + │ │ │ ├── [-rw-r--r-- 1.2K] dbgcnt.h + │ │ │ ├── [-rw-r--r-- 877] dce.h + │ │ │ ├── [-rw-r--r-- 5.3K] ddg.h + │ │ │ ├── [-rw-r--r-- 11K] debug.h + │ │ │ ├── [-rw-r--r-- 41K] defaults.h + │ │ │ ├── [-rw-r--r-- 47K] df.h + │ │ │ ├── [-rw-r--r-- 2.3K] dfp.h + │ │ │ ├── [-rw-r--r-- 3.5K] diagnostic-client-data-hooks.h + │ │ │ ├── [-rw-r--r-- 2.2K] diagnostic-color.h + │ │ │ ├── [-rw-r--r-- 5.1K] diagnostic-core.h + │ │ │ ├── [-rw-r--r-- 2.0K] diagnostic-event-id.h + │ │ │ ├── [-rw-r--r-- 2.2K] diagnostic-metadata.h + │ │ │ ├── [-rw-r--r-- 6.7K] diagnostic-path.h + │ │ │ ├── [-rw-r--r-- 3.5K] diagnostic-spec.h + │ │ │ ├── [-rw-r--r-- 1.5K] diagnostic-url.h + │ │ │ ├── [-rw-r--r-- 2.6K] diagnostic.def + │ │ │ ├── [-rw-r--r-- 22K] diagnostic.h + │ │ │ ├── [-rw-r--r-- 6.6K] digraph.h + │ │ │ ├── [-rw-r--r-- 2.9K] dojump.h + │ │ │ ├── [-rw-r--r-- 3.6K] dominance.h + │ │ │ ├── [-rw-r--r-- 4.5K] domwalk.h + │ │ │ ├── [-rw-r--r-- 13K] double-int.h + │ │ │ ├── [-rw-r--r-- 9.1K] dump-context.h + │ │ │ ├── [-rw-r--r-- 23K] dumpfile.h + │ │ │ ├── [-rw-r--r-- 3.1K] dwarf2asm.h + │ │ │ ├── [-rw-r--r-- 1.9K] dwarf2ctf.h + │ │ │ ├── [-rw-r--r-- 16K] dwarf2out.h + │ │ │ ├── [-rw-r--r-- 2.1K] edit-context.h + │ │ │ ├── [-rw-r--r-- 20K] emit-rtl.h + │ │ │ ├── [-rw-r--r-- 1.6K] errors.h + │ │ │ ├── [-rw-r--r-- 1.3K] escaped_string.h + │ │ │ ├── [-rw-r--r-- 2.6K] et-forest.h + │ │ │ ├── [-rw-r--r-- 12K] except.h + ��� │ │ ├── [-rw-r--r-- 5.6K] explow.h + │ │ │ ├── [-rw-r--r-- 21K] expmed.h + │ │ │ ├── [-rw-r--r-- 13K] expr.h + │ │ │ ├── [-rw-r--r-- 16K] fibonacci_heap.h + │ │ │ ├── [-rw-r--r-- 1.7K] file-find.h + │ │ │ ├── [-rw-r--r-- 1.2K] file-prefix-map.h + │ │ │ ├── [-rw-r--r-- 3.4K] filenames.h + │ │ │ ├── [-rw-r--r-- 4.1K] fixed-value.h + │ │ │ ├── [-rw-r--r-- 15K] flag-types.h + │ │ │ ├── [-rw-r--r-- 3.5K] flags.h + │ │ │ ├── [-rw-r--r-- 1.0K] fold-const-call.h + │ │ │ ├── [-rw-r--r-- 13K] fold-const.h + │ │ │ ├── [-rw-r--r-- 11K] function-abi.h + │ │ │ ├── [-rw-r--r-- 26K] function.h + │ │ │ ├── [-rw-r--r-- 1.2K] gcc-plugin.h + │ │ │ ├── [-rw-r--r-- 6.3K] gcc-rich-location.h + │ │ │ ├── [-rw-r--r-- 942] gcc-symtab.h + │ │ │ ├── [-rw-r--r-- 3.0K] gcc.h + │ │ │ ├── [-rw-r--r-- 1.8K] gcov-counter.def + │ │ │ ├── [-rw-r--r-- 15K] gcov-io.h + │ │ │ ├── [-rw-r--r-- 1.4K] gcse-common.h + │ │ │ ├── [-rw-r--r-- 1.5K] gcse.h + │ │ │ ├── [-rw-r--r-- 1.2K] generic-match.h + │ │ │ ├── [-rw-r--r-- 17K] gengtype.h + │ │ │ ├── [-rw-r--r-- 46K] genrtl.h + │ │ │ ├── [-rw-r--r-- 6.8K] gensupport.h + │ │ │ ├── [-rw-r--r-- 3.8K] ggc-internal.h + │ │ │ ├── [-rw-r--r-- 11K] ggc.h + │ │ │ ├── [-rw-r--r-- 1.5K] gimple-array-bounds.h + │ │ │ ├── [-rw-r--r-- 1.5K] gimple-builder.h + │ │ │ ├── [-rw-r--r-- 5.0K] gimple-expr.h + │ │ │ ├── [-rw-r--r-- 11K] gimple-fold.h + │ │ │ ├── [-rw-r--r-- 10K] gimple-iterator.h + │ │ │ ├── [-rw-r--r-- 981] gimple-low.h + │ │ │ ├── [-rw-r--r-- 8.8K] gimple-match.h + │ │ │ ├── [-rw-r--r-- 4.9K] gimple-predicate-analysis.h + │ │ │ ├── [-rw-r--r-- 2.5K] gimple-predict.h + │ │ │ ├── [-rw-r--r-- 1.6K] gimple-pretty-print.h + │ │ │ ├── [-rw-r--r-- 4.0K] gimple-range-cache.h + │ │ │ ├── [-rw-r--r-- 2.1K] gimple-range-edge.h + │ │ │ ├── [-rw-r--r-- 6.2K] gimple-range-fold.h + │ │ │ ├── [-rw-r--r-- 8.5K] gimple-range-gori.h + │ │ │ ├── [-rw-r--r-- 3.0K] gimple-range-infer.h + │ │ │ ├── [-rw-r--r-- 2.0K] gimple-range-op.h + │ │ │ ├── [-rw-r--r-- 4.3K] gimple-range-path.h + │ │ │ ├── [-rw-r--r-- 2.3K] gimple-range-trace.h + │ │ │ ├── [-rw-r--r-- 3.8K] gimple-range.h + │ │ │ ├── [-rw-r--r-- 1.9K] gimple-ssa-warn-access.h + │ ��� │ ├── [-rw-r--r-- 1.1K] gimple-ssa-warn-restrict.h + │ │ │ ├── [-rw-r--r-- 5.3K] gimple-ssa.h + │ │ │ ├── [-rw-r--r-- 1.1K] gimple-streamer.h + │ │ │ ├── [-rw-r--r-- 4.2K] gimple-walk.h + │ │ │ ├── [-rw-r--r-- 17K] gimple.def + │ │ │ ├── [-rw-r--r-- 158K] gimple.h + │ │ │ ├── [-rw-r--r-- 1.5K] gimplify-me.h + │ │ │ ├── [-rw-r--r-- 3.5K] gimplify.h + │ │ │ ├── [-rw-r--r-- 4.6K] glimits.h + │ │ │ ├── [-rw-r--r-- 15K] gomp-constants.h + │ │ │ ├── [-rw-r--r-- 951] graph.h + │ │ │ ├── [-rw-r--r-- 2.2K] graphds.h + │ │ │ ├── [-rw-r--r-- 12K] graphite.h + │ │ │ ├── [-rw-r--r-- 1.5K] graphviz.h + │ │ │ ├── [-rw-r--r-- 2.4K] gsstruct.def + │ │ │ ├── [-rw-r--r-- 1.7K] gsyms.h + │ │ │ ├── [-rw-r--r-- 330] gsyslimits.h + │ │ │ ├── [-rw-r--r-- 9.9K] gtm-builtins.def + │ │ │ ├── [-rw-r--r-- 173K] gtype-desc.h + │ │ │ ├── [-rw-r--r-- 16K] hard-reg-set.h + │ │ │ ├── [-rw-r--r-- 5.5K] hash-map-traits.h + │ │ │ ├── [-rw-r--r-- 11K] hash-map.h + │ │ │ ├── [-rw-r--r-- 5.6K] hash-set.h + │ │ │ ├── [-rw-r--r-- 39K] hash-table.h + │ │ │ ├── [-rw-r--r-- 11K] hash-traits.h + │ │ │ ├── [-rw-r--r-- 7.2K] hashtab.h + │ │ │ ├── [-rw-r--r-- 1.1K] highlev-plugin-common.h + │ │ │ ├── [-rw-r--r-- 6.1K] hooks.h + │ │ │ ├── [-rw-r--r-- 1.8K] hosthooks-def.h + │ │ │ ├── [-rw-r--r-- 1.9K] hosthooks.h + │ │ │ ├── [-rw-r--r-- 5.5K] hw-doloop.h + │ │ │ ├── [-rw-r--r-- 10K] hwint.h + │ │ │ ├── [-rw-r--r-- 4.2K] ifcvt.h + │ │ │ ├── [-rw-r--r-- 5.1K] inchash.h + │ │ │ ├── [-rw-r--r-- 1.7K] incpath.h + │ │ │ ├── [-rw-r--r-- 8.9K] input.h + │ │ │ ├── [-rw-r--r-- 1.8K] insn-addr.h + │ │ │ ├── [-rw-r--r-- 5.5K] insn-attr-common.h + │ │ │ ├── [-rw-r--r-- 11K] insn-attr.h + │ │ │ ├── [-rw-r--r-- 303K] insn-codes.h + │ │ │ ├── [-rw-r--r-- 526] insn-config.h + │ │ │ ├── [-rw-r--r-- 11K] insn-constants.h + │ │ │ ├── [-rw-r--r-- 1.3M] insn-flags.h + │ │ │ ├── [-rw-r--r-- 24K] insn-modes-inline.h + │ │ │ ├── [-rw-r--r-- 28K] insn-modes.h + │ │ │ ├── [-rw-r--r-- 3.5K] insn-notes.def + │ │ │ ├── [-rw-r--r-- 2.6K] int-vector-builder.h + │ │ │ ├── [-rw-r--r-- 22K] internal-fn.def + │ │ │ ├── [-rw-r--r-- 8.8K] internal-fn.h + │ │ │ ├── [-rw-r--r-- 2.0K] intl.h + │ │ │ ├── [-rw-r--r-- 16K] ipa-fnsummary.h + │ │ │ ├── [-rw-r--r-- 11K] ipa-icf-gimple.h + │ │ │ ├── [-rw-r--r-- 21K] ipa-icf.h + │ │ │ ├── [-rw-r--r-- 4.2K] ipa-inline.h + │ │ │ ├── [-rw-r--r-- 22K] ipa-modref-tree.h + │ │ │ ├── [-rw-r--r-- 4.9K] ipa-modref.h + │ │ │ ├── [-rw-r--r-- 18K] ipa-param-manipulation.h + │ │ │ ├── [-rw-r--r-- 8.7K] ipa-predicate.h + │ │ │ ├── [-rw-r--r-- 40K] ipa-prop.h + │ │ │ ├── [-rw-r--r-- 3.4K] ipa-ref.h + │ │ │ ├── [-rw-r--r-- 1.1K] ipa-reference.h + │ │ │ ├── [-rw-r--r-- 9.5K] ipa-utils.h + │ │ │ ├── [-rw-r--r-- 61K] ira-int.h + │ │ │ ├── [-rw-r--r-- 9.7K] ira.h + │ │ │ ├── [-rw-r--r-- 7.5K] is-a.h + │ │ │ ├── [-rw-r--r-- 5.8K] iterator-utils.h + │ │ │ ├── [-rw-r--r-- 4.6K] json.h + │ │ │ ├── [-rw-r--r-- 17K] langhooks-def.h + │ │ │ ├── [-rw-r--r-- 27K] langhooks.h + │ │ │ ├── [-rw-r--r-- 1.3K] lcm.h + │ │ │ ├── [-rw-r--r-- 2.5K] libfuncs.h + │ │ │ ├── [-rw-r--r-- 27K] libiberty.h + │ │ │ ├── [-rw-r--r-- 1.4K] limitx.h + │ │ │ ├── [-rw-r--r-- 270] limity.h + │ │ │ ├── [-rw-r--r-- 72K] line-map.h + │ │ │ ├── [-rw-r--r-- 2.4K] logical-location.h + │ │ │ ├── [-rw-r--r-- 893] loop-unroll.h + │ │ │ ├── [-rw-r--r-- 2.0K] lower-subreg.h + │ │ │ ├── [-rw-r--r-- 18K] lra-int.h + │ │ │ ├── [-rw-r--r-- 1.3K] lra.h + │ │ │ ├── [-rw-r--r-- 1.6K] lto-compress.h + │ │ │ ├── [-rw-r--r-- 1.6K] lto-section-names.h + │ │ │ ├── [-rw-r--r-- 37K] lto-streamer.h + │ │ │ ├── [drwxr-xr-x 3] m2 + │ │ │ │ └── [-rw-r--r-- 975] m2-tree.def + │ │ │ ├── [-rw-r--r-- 11K] machmode.def + │ │ │ ├── [-rw-r--r-- 35K] machmode.h + │ │ │ ├── [-rw-r--r-- 1.5K] make-unique.h + │ │ │ ├── [-rw-r--r-- 5.3K] md5.h + │ │ │ ├── [-rw-r--r-- 1.2K] mem-stats-traits.h + │ │ │ ├── [-rw-r--r-- 18K] mem-stats.h + │ │ │ ├── [-rw-r--r-- 3.3K] memmodel.h + │ │ │ ├── [-rw-r--r-- 2.4K] memory-block.h + │ │ │ ├── [-rw-r--r-- 2.0K] mode-classes.def + │ │ │ ├── [-rw-r--r-- 7.4K] mux-utils.h + │ │ │ ├── [drwxr-xr-x 3] objc + │ │ │ │ └── [-rw-r--r-- 3.3K] objc-tree.def + │ │ │ ├── [-rw-r--r-- 2.4K] obstack-utils.h + │ │ │ ├── [-rw-r--r-- 22K] obstack.h + │ │ │ ├── [-rw-r--r-- 22K] omp-builtins.def + │ │ │ ├── [-rw-r--r-- 1.1K] omp-expand.h + │ │ │ ├── [-rw-r--r-- 5.8K] omp-general.h + │ │ │ ├── [-rw-r--r-- 1.1K] omp-low.h + │ │ │ ├── [-rw-r--r-- 1.2K] omp-offload.h + │ │ │ ├── [-rw-r--r-- 880] omp-simd-clone.h + │ │ │ ├── [-rw-r--r-- 9.2K] opt-problem.h + │ │ │ ├── [-rw-r--r-- 2.5K] opt-suggestions.h + │ │ │ ├── [-rw-r--r-- 3.4K] optabs-libfuncs.h + │ │ │ ├── [-rw-r--r-- 6.9K] optabs-query.h + │ │ │ ├── [-rw-r--r-- 1.8K] optabs-tree.h + │ │ │ ├── [-rw-r--r-- 22K] optabs.def + │ │ │ ├── [-rw-r--r-- 14K] optabs.h + │ │ │ ├── [-rw-r--r-- 2.0K] optinfo-emit-json.h + │ │ │ ├── [-rw-r--r-- 5.0K] optinfo.h + │ │ │ ├── [-rw-r--r-- 492K] options.h + │ │ │ ├── [-rw-r--r-- 1.0K] opts-diagnostic.h + │ │ │ ├── [-rw-r--r-- 1.7K] opts-jobserver.h + │ │ │ ├── [-rw-r--r-- 20K] opts.h + │ │ │ ├── [-rw-r--r-- 4.9K] ordered-hash-map.h + │ │ │ ├── [-rw-r--r-- 25K] output.h + │ │ │ ├── [-rw-r--r-- 24K] pass-instances.def + │ │ │ ├── [-rw-r--r-- 4.0K] pass_manager.h + │ │ │ ├── [-rw-r--r-- 22K] passes.def + │ │ │ ├── [-rw-r--r-- 18K] plugin-api.h + │ │ │ ├── [-rw-r--r-- 595] plugin-version.h + │ │ │ ├── [-rw-r--r-- 3.3K] plugin.def + │ │ │ ├── [-rw-r--r-- 6.4K] plugin.h + │ │ │ ├── [-rw-r--r-- 9.5K] pointer-query.h + │ │ │ ├── [-rw-r--r-- 4.2K] poly-int-types.h + │ │ │ ├── [-rw-r--r-- 80K] poly-int.h + │ │ │ ├── [-rw-r--r-- 9.9K] predict.def + │ │ │ ├── [-rw-r--r-- 4.6K] predict.h + │ │ │ ├── [-rw-r--r-- 1.2K] prefix.h + │ │ │ ├── [-rw-r--r-- 16K] pretty-print.h + │ │ │ ├── [-rw-r--r-- 5.5K] print-rtl.h + │ │ │ ├── [-rw-r--r-- 1.9K] print-tree.h + │ │ │ ├── [-rw-r--r-- 38K] profile-count.h + │ │ │ ├── [-rw-r--r-- 2.3K] profile.h + │ │ │ ├── [-rw-r--r-- 11K] range-op.h + │ │ │ ├── [-rw-r--r-- 1.7K] range.h + │ │ │ ├── [-rw-r--r-- 13K] read-md.h + │ │ │ ├── [-rw-r--r-- 1002] read-rtl-function.h + │ │ │ ├── [-rw-r--r-- 21K] real.h + │ │ │ ├── [-rw-r--r-- 1.3K] realmpfr.h + │ │ │ ├── [-rw-r--r-- 18K] recog.h + │ │ │ ├── [-rw-r--r-- 11K] reg-notes.def + │ │ │ ├── [-rw-r--r-- 877] regcprop.h + │ │ │ ├── [-rw-r--r-- 3.5K] regrename.h + │�� │ │ ├── [-rw-r--r-- 12K] regs.h + │ │ │ ├── [-rw-r--r-- 4.7K] regset.h + │ │ │ ├── [-rw-r--r-- 17K] reload.h + │ │ │ ├── [-rw-r--r-- 1.9K] resource.h + │ │ │ ├── [-rw-r--r-- 1.0K] rtl-error.h + │ │ │ ├── [-rw-r--r-- 8.2K] rtl-iter.h + │ │ │ ├── [-rw-r--r-- 1.9K] rtl-ssa.h + │ │ │ ├── [-rw-r--r-- 60K] rtl.def + │ │ │ ├── [-rw-r--r-- 156K] rtl.h + │ │ │ ├── [-rw-r--r-- 10K] rtlanal.h + │ │ │ ├── [-rw-r--r-- 850] rtlhash.h + │ │ │ ├── [-rw-r--r-- 1.8K] rtlhooks-def.h + │ │ │ ├── [-rw-r--r-- 3.8K] rtx-vector-builder.h + │ │ │ ├── [-rw-r--r-- 884] run-rtl-passes.h + │ │ │ ├── [-rw-r--r-- 5.5K] safe-ctype.h + │ │ │ ├── [-rw-r--r-- 33K] sanitizer.def + │ │ │ ├── [-rw-r--r-- 10K] sbitmap.h + │ │ │ ├── [-rw-r--r-- 60K] sched-int.h + │ │ │ ├── [-rw-r--r-- 6.8K] sel-sched-dump.h + │ │ │ ├── [-rw-r--r-- 48K] sel-sched-ir.h + │ │ │ ├── [-rw-r--r-- 920] sel-sched.h + │ │ │ ├── [-rw-r--r-- 1.5K] selftest-diagnostic.h + │ │ │ ├── [-rw-r--r-- 3.2K] selftest-rtl.h + │ │ │ ├── [-rw-r--r-- 15K] selftest.h + │ │ │ ├── [-rw-r--r-- 7.3K] sese.h + │ │ │ ├── [-rw-r--r-- 6.1K] shortest-paths.h + │ │ │ ├── [-rw-r--r-- 1.1K] shrink-wrap.h + │ │ │ ├── [-rw-r--r-- 1.0K] signop.h + │ │ │ ├── [-rw-r--r-- 6.7K] sparseset.h + │ │ │ ├── [-rw-r--r-- 1.4K] spellcheck-tree.h + │ │ │ ├── [-rw-r--r-- 7.2K] spellcheck.h + │ │ │ ├── [-rw-r--r-- 16K] splay-tree-utils.h + │ │ │ ├── [-rw-r--r-- 6.1K] splay-tree.h + │ │ │ ├── [-rw-r--r-- 6.4K] sreal.h + │ │ │ ├── [-rw-r--r-- 29K] ssa-iterators.h + │ │ │ ├── [-rw-r--r-- 1.0K] ssa.h + │ │ │ ├── [-rw-r--r-- 2.8K] statistics.h + │ │ │ ├── [-rw-r--r-- 2.0K] stmt.h + │ │ │ ├── [-rw-r--r-- 5.0K] stor-layout.h + │ │ │ ├── [-rw-r--r-- 3.6K] streamer-hooks.h + │ │ │ ├── [-rw-r--r-- 1.5K] stringpool.h + │ │ │ ├── [-rw-r--r-- 4.6K] substring-locations.h + │ │ │ ├── [-rw-r--r-- 27K] symbol-summary.h + │ │ │ ├── [-rw-r--r-- 2.0K] symtab-clones.h + │ │ │ ├── [-rw-r--r-- 4.9K] symtab-thunks.h + │ │ │ ├── [-rw-r--r-- 3.6K] symtab.h + │ │ │ ├── [-rw-r--r-- 27K] sync-builtins.def + │ │ │ ├── [-rw-r--r-- 41K] system.h + │ │ │ ├── [-rw-r--r-- 4.2K] target-def.h + │ │ │ ���── [-rw-r--r-- 3.3K] target-globals.h + │ │ │ ├── [-rw-r--r-- 4.0K] target-hooks-macros.h + │ │ │ ├── [-rw-r--r-- 5.3K] target-insns.def + │ │ │ ├── [-rw-r--r-- 317K] target.def + │ │ │ ├── [-rw-r--r-- 9.1K] target.h + │ │ │ ├── [-rw-r--r-- 14K] targhooks.h + │ │ │ ├── [-rw-r--r-- 18K] timevar.def + │ │ │ ├── [-rw-r--r-- 7.6K] timevar.h + │ │ │ ├── [-rw-r--r-- 15K] tm-preds.h + │ │ │ ├── [-rw-r--r-- 1.2K] tm.h + │ │ │ ├── [-rw-r--r-- 178] tm_p.h + │ │ │ ├── [-rw-r--r-- 2.8K] toplev.h + │ │ │ ├── [-rw-r--r-- 903] tracer.h + │ │ │ ├── [-rw-r--r-- 1.9K] trans-mem.h + │ │ │ ├── [-rw-r--r-- 3.8K] tree-affine.h + │ │ │ ├── [-rw-r--r-- 5.8K] tree-cfg.h + │ │ │ ├── [-rw-r--r-- 1.2K] tree-cfgcleanup.h + │ │ │ ├── [-rw-r--r-- 23K] tree-check.h + │ │ │ ├── [-rw-r--r-- 7.1K] tree-chrec.h + │ │ │ ├── [-rw-r--r-- 68K] tree-core.h + │ │ │ ├── [-rw-r--r-- 25K] tree-data-ref.h + │ │ │ ├── [-rw-r--r-- 1.9K] tree-dfa.h + │ │ │ ├── [-rw-r--r-- 2.7K] tree-diagnostic.h + │ │ │ ├── [-rw-r--r-- 2.8K] tree-dump.h + │ │ │ ├── [-rw-r--r-- 2.4K] tree-eh.h + │ │ │ ├── [-rw-r--r-- 1.2K] tree-hash-traits.h + │ │ │ ├── [-rw-r--r-- 1.9K] tree-hasher.h + │ │ │ ├── [-rw-r--r-- 845] tree-if-conv.h + │ │ │ ├── [-rw-r--r-- 8.6K] tree-inline.h + │ │ │ ├── [-rw-r--r-- 1.9K] tree-into-ssa.h + │ │ │ ├── [-rw-r--r-- 4.1K] tree-iterator.h + │ │ │ ├── [-rw-r--r-- 2.2K] tree-logical-location.h + │ │ │ ├── [-rw-r--r-- 2.7K] tree-nested.h + │ │ │ ├── [-rw-r--r-- 1.1K] tree-object-size.h + │ │ │ ├── [-rw-r--r-- 2.7K] tree-outof-ssa.h + │ │ │ ├── [-rw-r--r-- 864] tree-parloops.h + │ │ │ ├── [-rw-r--r-- 33K] tree-pass.h + │ │ │ ├── [-rw-r--r-- 2.2K] tree-phinodes.h + │ │ │ ├── [-rw-r--r-- 2.5K] tree-pretty-print.h + │ │ │ ├── [-rw-r--r-- 2.6K] tree-scalar-evolution.h + │ │ │ ├── [-rw-r--r-- 1.1K] tree-sra.h + │ │ │ ├── [-rw-r--r-- 1.6K] tree-ssa-address.h + │ │ │ ├── [-rw-r--r-- 1.3K] tree-ssa-alias-compare.h + │ │ │ ├── [-rw-r--r-- 7.7K] tree-ssa-alias.h + │ │ │ ├── [-rw-r--r-- 1.1K] tree-ssa-ccp.h + │ │ │ ├── [-rw-r--r-- 925] tree-ssa-coalesce.h + │ │ │ ├── [-rw-r--r-- 783] tree-ssa-dce.h + │ │ │ ├── [-rw-r--r-- 866] tree-ssa-dom.h + │ │ │ ├── [-rw-r--r-- 1.2K] tree-ssa-dse.h + │ │ │ ├── [-rw-r--r-- 9.6K] tree-ssa-live.h + │ │ │ ├── [-rw-r--r-- 1.5K] tree-ssa-loop-ivopts.h + │ │ │ ├── [-rw-r--r-- 2.2K] tree-ssa-loop-manip.h + │ │ │ ├── [-rw-r--r-- 3.0K] tree-ssa-loop-niter.h + │ │ │ ├── [-rw-r--r-- 2.8K] tree-ssa-loop.h + │ │ │ ├── [-rw-r--r-- 948] tree-ssa-math-opts.h + │ │ │ ├── [-rw-r--r-- 3.9K] tree-ssa-operands.h + │ │ │ ├── [-rw-r--r-- 4.1K] tree-ssa-propagate.h + │ │ │ ├── [-rw-r--r-- 1.3K] tree-ssa-reassoc.h + │ │ │ ├── [-rw-r--r-- 10K] tree-ssa-sccvn.h + │ │ │ ├── [-rw-r--r-- 6.8K] tree-ssa-scopedtables.h + │ │ │ ├── [-rw-r--r-- 1.5K] tree-ssa-strlen.h + │ │ │ ├── [-rw-r--r-- 917] tree-ssa-ter.h + │ │ │ ├── [-rw-r--r-- 4.1K] tree-ssa-threadedge.h + │ │ │ ├── [-rw-r--r-- 4.7K] tree-ssa-threadupdate.h + │ │ │ ├── [-rw-r--r-- 3.5K] tree-ssa.h + │ │ │ ├── [-rw-r--r-- 4.7K] tree-ssanames.h + │ │ │ ├── [-rw-r--r-- 1.1K] tree-stdarg.h + │ │ │ ├── [-rw-r--r-- 4.2K] tree-streamer.h + │ │ │ ├── [-rw-r--r-- 28K] tree-switch-conversion.h + │ │ │ ├── [-rw-r--r-- 4.3K] tree-vector-builder.h + │ │ │ ├── [-rw-r--r-- 92K] tree-vectorizer.h + │ │ │ ├── [-rw-r--r-- 1.7K] tree-vrp.h + │ │ │ ├── [-rw-r--r-- 71K] tree.def + │ │ │ ├── [-rw-r--r-- 257K] tree.h + │ │ │ ├── [-rw-r--r-- 2.8K] treestruct.def + │ │ │ ├── [-rw-r--r-- 2.0K] tristate.h + │ │ │ ├── [-rw-r--r-- 876] tsan.h + │ │ │ ├── [-rw-r--r-- 3.8K] tsystem.h + │ │ │ ├── [-rw-r--r-- 1.5K] typeclass.h + │ │ │ ├── [-rw-r--r-- 16K] typed-splay-tree.h + │ │ │ ├── [-rw-r--r-- 2.4K] ubsan.h + │ │ │ ├── [-rw-r--r-- 4.5K] valtrack.h + │ │ │ ├── [-rw-r--r-- 2.0K] value-pointer-equiv.h + │ │ │ ├── [-rw-r--r-- 4.7K] value-prof.h + │ │ │ ├── [-rw-r--r-- 5.2K] value-query.h + │ │ │ ├── [-rw-r--r-- 1.4K] value-range-pretty-print.h + │ │ │ ├── [-rw-r--r-- 6.4K] value-range-storage.h + │ │ │ ├── [-rw-r--r-- 33K] value-range.h + │ │ │ ├── [-rw-r--r-- 17K] value-relation.h + │ │ │ ├── [-rw-r--r-- 3.3K] varasm.h + │ │ │ ├── [-rw-r--r-- 5.3K] vec-perm-indices.h + │ │ │ ├── [-rw-r--r-- 67K] vec.h + │ │ │ ├── [-rw-r--r-- 20K] vector-builder.h + │ │ │ ├── [-rw-r--r-- 843] version.h + │ │ │ ├── [-rw-r--r-- 6.4K] vmsdbg.h + │ │ │ ├── [-rw-r--r-- 3.3K] vr-values.h + │ │ │ ���── [-rw-r--r-- 6.7K] vtable-verify.h + │ │ │ ├── [-rw-r--r-- 3.3K] wide-int-bitmask.h + │ │ │ ├── [-rw-r--r-- 1.4K] wide-int-print.h + │ │ │ ├── [-rw-r--r-- 111K] wide-int.h + │ │ │ └── [-rw-r--r-- 1.1K] xcoff.h + │ │ ├── [-rwxr-xr-x 1.0K] libcc1plugin.la + │ │ ├── [lrwxrwxrwx 21] libcc1plugin.so -> libcc1plugin.so.0.0.0 + │ │ ├── [lrwxrwxrwx 21] libcc1plugin.so.0 -> ↵ + + │ │ ├── [-rwxr-xr-x 55K] libcc1plugin.so.0.0.0 + │ │ ├── [-rwxr-xr-x 1.0K] libcp1plugin.la + │ │ ├── [lrwxrwxrwx 21] libcp1plugin.so -> libcp1plugin.so.0.0.0 + │ │ ├── [lrwxrwxrwx 21] libcp1plugin.so.0 -> libcp1plugin.so.0.0.0 + │ │ └── [-rwxr-xr-x 120K] libcp1plugin.so.0.0.0 + │ ├── [-rw-r--r-- 2.9M] libasan.a + │ ├── [-rwxr-xr-x 1000] libasan.la + │ ├── [lrwxrwxrwx 16] libasan.so -> libasan.so.8.0.0 + │ ├── [lrwxrwxrwx 16] libasan.so.8 -> libasan.so.8.0.0 + │ ├── [-rwxr-xr-x 1.4M] libasan.so.8.0.0 + │ ├── [-rw-r--r-- 9.3K] libasan_preinit.o + │ ├── [-rw-r--r-- 147K] libatomic.a + │ ├── [-rwxr-xr-x 964] libatomic.la + │ ├── [lrwxrwxrwx 18] libatomic.so -> libatomic.so.1.2.0 + │ ├── [lrwxrwxrwx 18] libatomic.so.1 -> libatomic.so.1.2.0 + │ ├── [-rwxr-xr-x 30K] libatomic.so.1.2.0 + │ ├── [-rwxr-xr-x 962] libcc1.la + │ ├── [lrwxrwxrwx 15] libcc1.so -> libcc1.so.0.0.0 + │ ├── [lrwxrwxrwx 15] libcc1.so.0 -> libcc1.so.0.0.0 + │ ├── [-rwxr-xr-x 123K] libcc1.so.0.0.0 + │ ├── [-rw-r--r-- 132] libgcc_s.so + │ ├── [-rw-r--r-- 711K] libgcc_s.so.1 + │ ├── [-rw-r--r-- 531K] libgomp.a + │ ├── [-rwxr-xr-x 946] libgomp.la + │ ├── [lrwxrwxrwx 16] libgomp.so -> libgomp.so.1.0.0 + │ ├── [lrwxrwxrwx 16] libgomp.so.1 -> libgomp.so.1.0.0 + │ ├── [-rwxr-xr-x 296K] libgomp.so.1.0.0 + │ ├── [-rw-r--r-- 164] libgomp.spec + │ ├── [-rw-r--r-- 1.1M] libhwasan.a + │ ├── [-rwxr-xr-x 1014] libhwasan.la + │ ├── [lrwxrwxrwx 18] libhwasan.so -> libhwasan.so.0.0.0 + │ ├── [lrwxrwxrwx 18] libhwasan.so.0 -> libhwasan.so.0.0.0 + │ ├── [-rwxr-xr-x 461K] libhwasan.so.0.0.0 + │ ├── [-rw-r--r-- 4.9K] libhwasan_preinit.o + │ ├── [-rw-r--r-- 201K] libitm.a + │ ├── [-rwxr-xr-x 934] libitm.la + │ ├── [lrwxrwxrwx 15] libitm.so -> libitm.so.1.0.0 + │ ├── [lrwxrwxrwx 15] libitm.so.1 -> libitm.so.1.0.0 + │ ├── [-rwxr-xr-x 98K] libitm.so.1.0.0 + │ ├── [-rw-r--r-- 162] libitm.spec + │ ├── [-rw-r--r-- 1.1M] liblsan.a + │ ├── [-rwxr-xr-x 1000] liblsan.la + │ ├── [lrwxrwxrwx 16] liblsan.so -> liblsan.so.0.0.0 + │ ├── [lrwxrwxrwx 16] liblsan.so.0 -> liblsan.so.0.0.0 + │ ├── [-rwxr-xr-x 445K] liblsan.so.0.0.0 + │ ├── [-rw-r--r-- 4.8K] liblsan_preinit.o + │ ├── [-rw-r--r-- 624K] libquadmath.a + │ ├── [-rwxr-xr-x 973] libquadmath.la + │ ├── [lrwxrwxrwx 20] libquadmath.so -> libquadmath.so.0.0.0 + │ ├── [lrwxrwxrwx 20] libquadmath.so.0 -> libquadmath.so.0.0.0 + │ ├── [-rwxr-xr-x 283K] libquadmath.so.0.0.0 + │ ├── [-rw-r--r-- 362] libsanitizer.spec + │ ├── [-rw-r--r-- 26K] libssp.a + │ ├── [-rwxr-xr-x 934] libssp.la + │ ├── [lrwxrwxrwx 15] libssp.so -> libssp.so.0.0.0 + │ ├── [lrwxrwxrwx 15] libssp.so.0 -> libssp.so.0.0.0 + │ ├── [-rwxr-xr-x 14K] libssp.so.0.0.0 + │ ├── [-rw-r--r-- 1.6K] libssp_nonshared.a + │ ├── [-rwxr-xr-x 916] libssp_nonshared.la + │ ├── [-rw-r--r-- 6.2M] libstdc++.a + │ ├── [-rwxr-xr-x 961] libstdc++.la + │ ├── [lrwxrwxrwx 19] libstdc++.so -> libstdc++.so.6.0.32 + │ ├── [lrwxrwxrwx 19] libstdc++.so.6 -> libstdc++.so.6.0.32 + │ ├── [-rwxr-xr-x 2.3M] libstdc++.so.6.0.32 + │ ├── [-rw-r--r-- 2.3K] libstdc++.so.6.0.32-gdb.py + │ ├── [-rw-r--r-- 5.3K] libstdc++exp.a + │ ├── [-rwxr-xr-x 904] libstdc++exp.la + │ ├── [-rw-r--r-- 705K] libstdc++fs.a + │ ├── [-rwxr-xr-x 901] libstdc++fs.la + │ ├── [-rw-r--r-- 370K] libsupc++.a + │ ├── [-rwxr-xr-x 895] libsupc++.la + │ ├── [-rw-r--r-- 2.3M] libtsan.a + │ ├── [-rwxr-xr-x 1000] libtsan.la + │ ├── [lrwxrwxrwx 16] libtsan.so -> libtsan.so.2.0.0 + │ ├── [lrwxrwxrwx 16] libtsan.so.2 -> libtsan.so.2.0.0 + │ ├── [-rwxr-xr-x 1.1M] libtsan.so.2.0.0 + │ ├── [-rw-r--r-- 3.9K] libtsan_preinit.o + │ ├── [-rw-r--r-- 997K] libubsan.a + │ ├── [-rwxr-xr-x 1007] libubsan.la + │ ├── [lrwxrwxrwx 17] libubsan.so -> libubsan.so.1.0.0 + │ ├── [lrwxrwxrwx 17] libubsan.so.1 -> libubsan.so.1.0.0 + │ └── [-rwxr-xr-x 426K] libubsan.so.1.0.0 + └── [drwxr-xr-x 3] libexec + └── [drwxr-xr-x 3] gcc + └── [drwxr-xr-x 3] x86_64-linux-gnu + └── [drwxr-xr-x 12] 13.2.0 + ├── [-rwxr-xr-x 32M] cc1 + ├── [-rwxr-xr-x 34M] cc1plus + ├── [-rwxr-xr-x 969K] collect2 + ├── [-rwxr-xr-x 215K] g++-mapper-server + ├── [drwxr-xr-x 6] install-tools + │ ├── [-rwxr-xr-x 14K] fixinc.sh + │ ├── [-rwxr-xr-x 169K] fixincl + │ ├── [-rwxr-xr-x 3.6K] mkheaders + │ └── [-rwxr-xr-x 3.5K] mkinstalldirs + ├── [-rwxr-xr-x 989] liblto_plugin.la + ├── [-rwxr-xr-x 82K] liblto_plugin.so + ├── [-rwxr-xr-x 1.5M] lto-wrapper + ├── [-rwxr-xr-x 31M] lto1 + └── [drwxr-xr-x 3] plugin + └── [-rwxr-xr-x 196K] gengtype + +78 directories, 1556 files +``` \ No newline at end of file diff --git a/images/packages/gcc/werf.inc.yaml b/images/packages/gcc/werf.inc.yaml index aca13e9d13..d64bb55201 100644 --- a/images/packages/gcc/werf.inc.yaml +++ b/images/packages/gcc/werf.inc.yaml @@ -9,6 +9,7 @@ import: before: setup includePaths: - usr/lib64/libgcc_s.so.1 + - usr/lib64/libstdc++.so.6 --- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ $.ImageName }} final: false @@ -31,11 +32,8 @@ secrets: value: {{ $.SOURCE_REPO_GIT }} shell: install: - - | - mkdir -p ~/.ssh && echo "StrictHostKeyChecking accept-new" > ~/.ssh/config + - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src - --- {{- $name := print $.ImageName "-dependencies" -}} {{- define "$name" -}} @@ -46,7 +44,7 @@ altPackages: - autogen dejagnu glibc-devel-static - tree packages: -- zlib +- zlib - zstd {{- end -}} @@ -78,7 +76,7 @@ shell: cp -a /$pkg/. / rm -rf /$pkg done - + OUTDIR=/out cd /src @@ -105,4 +103,4 @@ shell: make DESTDIR=$OUTDIR install-strip rm -rf $OUTDIR/usr/share - tree -sp $OUTDIR + tree -hp $OUTDIR diff --git a/images/packages/glib2/werf.inc.yaml b/images/packages/glib2/werf.inc.yaml index 7d33c9b5cd..6aa2190e7b 100644 --- a/images/packages/glib2/werf.inc.yaml +++ b/images/packages/glib2/werf.inc.yaml @@ -4,8 +4,10 @@ altPackages: - gcc gcc-c++ - git pkg-config meson ninja-build cmake - libunwind-devel libelf-devel sysprof-devel libgvdb-devel +- tree packages: -- libffi zlib pcre2 +- libffi zlib pcre2 util-linux +- selinux {{- end -}} {{- $builderDependencies := include "$name" . | fromYaml }} @@ -90,6 +92,11 @@ shell: -Dlibdir=/usr/lib64 \ -Dgtk_doc=false \ -Dbuildtype=release \ - -Dstrip=true + -Dstrip=true \ + --default-library=both \ + -Dselinux=enabled \ + -Dlibmount=enabled meson compile -C _build DESTDIR=${OUTDIR} meson install -C _build + + tree -hp $OUTDIR diff --git a/images/packages/gnutls/werf.inc.yaml b/images/packages/gnutls/werf.inc.yaml index 735f14270a..b363996d8e 100644 --- a/images/packages/gnutls/werf.inc.yaml +++ b/images/packages/gnutls/werf.inc.yaml @@ -45,12 +45,11 @@ altPackages: - make autoconf automake libtool makeinfo gettext-devel patch - perl-Net-SSLeay perl-IPC-Cmd perl-Pod-Html - gem-gettext-devel gettext po4a -- libunistring-devel - libtpm2-tss-devel libtrousers-devel - libunbound-devel bison gtk-doc texinfo texlive - libev4 libev-devel libopencdk-devel - liboqs-devel -- libssl-devel iproute2-devel +- iproute2-devel - wget packages: - libbrotli libidn2 libgcrypt libgmp @@ -58,6 +57,7 @@ packages: - openssl - readline - libtasn1 +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/libcurl/werf.inc.yaml b/images/packages/libcurl/werf.inc.yaml index 85a0f84958..7fce5b1400 100644 --- a/images/packages/libcurl/werf.inc.yaml +++ b/images/packages/libcurl/werf.inc.yaml @@ -47,6 +47,7 @@ packages: - libpsl - cyrus-sasl2 - ngtcp2 libtasn1 +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/libidn/werf.inc.yaml b/images/packages/libidn/werf.inc.yaml index cae01146f5..94d27ea6a0 100644 --- a/images/packages/libidn/werf.inc.yaml +++ b/images/packages/libidn/werf.inc.yaml @@ -4,11 +4,13 @@ {{- $name := print $.ImageName "-dependencies" -}} {{- define "$name" -}} -packages: +altPackages: - gcc git make makeinfo automake libtool patch gperf help2man gengetopt - python3 python3-module-docutils gtk-doc -- gettext texinfo indent glibc-gconv-modules libabigail-devel libunistring-devel +- gettext texinfo indent glibc-gconv-modules libabigail-devel - cvs +packages: +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -32,7 +34,6 @@ secrets: shell: install: - | - mkdir -p ~/.ssh && echo "StrictHostKeyChecking accept-new" > ~/.ssh/config git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src cd /src @@ -51,17 +52,25 @@ import: add: /src to: /src before: install +{{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} shell: beforeInstall: {{- include "alt packages proxy" . | nindent 2 }} - | apt-get install -y \ - {{ $builderDependencies.packages | join " " }} + {{ $builderDependencies.altPackages | join " " }} {{- include "alt packages clean" . | nindent 2 }} install: - | + # Install packages + PKGS="{{ $builderDependencies.packages | join " " }}" + for pkg in $PKGS; do + cp -a /$pkg/. / + rm -rf /$pkg + done + OUTDIR=/out cd /src diff --git a/images/packages/libjson-glib/werf.inc.yaml b/images/packages/libjson-glib/werf.inc.yaml index 007c9abfb4..4f0b5d5691 100644 --- a/images/packages/libjson-glib/werf.inc.yaml +++ b/images/packages/libjson-glib/werf.inc.yaml @@ -28,8 +28,9 @@ shell: {{- define "$name" -}} altPackages: - gcc git make libtool gettext-tools meson ninja-build -- glib2-devel libgio-devel - tree +packages: +- glib2 util-linux {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -46,6 +47,7 @@ import: add: /src to: /src before: install +{{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} shell: beforeInstall: {{- include "alt packages proxy" . | nindent 2 }} @@ -57,6 +59,13 @@ shell: install: - | + # Install packages + PKGS="{{ $builderDependencies.packages | join " " }}" + for pkg in $PKGS; do + cp -a /$pkg/. / + rm -rf /$pkg + done + OUTDIR=/out cd /src mkdir _build diff --git a/images/packages/libmnl/werf.inc.yaml b/images/packages/libmnl/werf.inc.yaml index 1c135879e5..e80625eca3 100644 --- a/images/packages/libmnl/werf.inc.yaml +++ b/images/packages/libmnl/werf.inc.yaml @@ -29,9 +29,11 @@ shell: altPackages: - gcc make git autoconf libtool gettext-tools - automake pkgconf glibc-devel systemd-devel -- texinfo indent glibc-gconv-modules -- libabigail-devel libunistring-devel cvs +- texinfo indent glibc-gconv-modules +- libabigail-devel cvs - tree +packages: +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -44,6 +46,7 @@ import: add: /src to: /src before: install +{{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} shell: beforeInstall: {{- include "alt packages proxy" . | nindent 2 }} @@ -55,6 +58,13 @@ shell: install: - | + # Install packages + PKGS="{{ $builderDependencies.packages | join " " }}" + for pkg in $PKGS; do + cp -a /$pkg/. / + rm -rf /$pkg + done + OUTDIR=/out cd /src diff --git a/images/packages/libnbd/werf.inc.yaml b/images/packages/libnbd/werf.inc.yaml index 97a43c0f42..4fd6f5eeeb 100644 --- a/images/packages/libnbd/werf.inc.yaml +++ b/images/packages/libnbd/werf.inc.yaml @@ -30,12 +30,12 @@ altPackages: - gcc gcc-c++ make git autoconf libtool gettext-tools - automake pkgconf glibc-devel - bash-completion hardlink -- liburing-devel perl-podlators +- perl-podlators - ocaml ocaml-findlib ocaml-ocamldoc packages: - gnutls libidn2 libgmp ubdsrv xz libxml2 - libtasn1 -- libfuse3 +- liburing libfuse3 libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -80,7 +80,7 @@ shell: --libdir=/usr/lib64 \ --disable-ocaml \ --disable-golang \ - --disable-rust + --disable-rust make -j$(nproc) diff --git a/images/packages/libpsl/werf.inc.yaml b/images/packages/libpsl/werf.inc.yaml index 414ae2a2bf..abd827ec98 100644 --- a/images/packages/libpsl/werf.inc.yaml +++ b/images/packages/libpsl/werf.inc.yaml @@ -28,14 +28,13 @@ altPackages: - gcc git make libtool gettext-tools meson ninja-build - rpm-build-python3 - libicu-devel -- glib2-devel libgio-devel -- libunistring-devel - gtk-doc xsltproc - publicsuffix-list - publicsuffix-list-dafsa - tree packages: -- libidn2 +- glib2 +- libidn2 libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/libunistring/README.md b/images/packages/libunistring/README.md new file mode 100644 index 0000000000..9013b004a9 --- /dev/null +++ b/images/packages/libunistring/README.md @@ -0,0 +1,34 @@ +# libunistring +``` +└── [drwxr-xr-x 4] usr + ├── [drwxr-xr-x 16] include + │ ├── [-rw-r--r-- 20K] unicase.h + │ ├── [-rw-r--r-- 7.4K] uniconv.h + │ ├── [-rw-r--r-- 48K] unictype.h + │ ├── [-rw-r--r-- 11K] unigbrk.h + │ ├── [-rw-r--r-- 6.8K] unilbrk.h + │ ├── [-rw-r--r-- 1.2K] unimetadata.h + │ ├── [-rw-r--r-- 1.9K] uniname.h + │ ├── [-rw-r--r-- 11K] uninorm.h + │ ├── [-rw-r--r-- 10.0K] unistdio.h + │ ├── [-rw-r--r-- 45K] unistr.h + │ ├── [drwxr-xr-x 9] unistring + │ │ ├── [-rw-r--r-- 4.5K] cdefs.h + │ │ ├── [-rw-r--r-- 1.4K] iconveh.h + │ │ ├── [-rw-r--r-- 2.6K] inline.h + │ │ ├── [-rw-r--r-- 6.0K] localcharset.h + │ │ ├── [-rw-r--r-- 4.6K] stdint.h + │ │ ├── [-rw-r--r-- 1.6K] version.h + │ │ └── [-rw-r--r-- 1.4K] woe32dll.h + │ ├── [-rw-r--r-- 2.3K] unitypes.h + │ ├── [-rw-r--r-- 3.2K] uniwbrk.h + │ └── [-rw-r--r-- 2.1K] uniwidth.h + └── [drwxr-xr-x 7] lib64 + ├── [-rw-r--r-- 6.0M] libunistring.a + ├── [-rwxr-xr-x 947] libunistring.la + ├── [lrwxrwxrwx 21] libunistring.so -> libunistring.so.5.2.0 + ├── [lrwxrwxrwx 21] libunistring.so.5 -> libunistring.so.5.2.0 + └── [-rwxr-xr-x 1.9M] libunistring.so.5.2.0 + +5 directories, 25 files +``` \ No newline at end of file diff --git a/images/packages/libunistring/werf.inc.yaml b/images/packages/libunistring/werf.inc.yaml new file mode 100644 index 0000000000..660ad25325 --- /dev/null +++ b/images/packages/libunistring/werf.inc.yaml @@ -0,0 +1,84 @@ +--- +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }} +final: false +fromImage: builder/scratch +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder + add: /out + to: /{{ $.ImageName }} + before: setup + +--- +{{- $version := get .PackageVersion .ImageName }} +{{- $gitRepoUrl := "gnu/libunistring.git" }} +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact +final: false +fromImage: builder/src +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +shell: + install: + - | + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src + cd /src + git clone $(cat /run/secrets/SOURCE_REPO)/gnulib.git ./gnulib + +--- + +{{- $name := print $.ImageName "-dependencies" -}} +{{- define "$name" -}} +altPackages: +- gcc gcc-c++ git make makeinfo libtool gettext-tools +- patch gperf +- tree +{{- end -}} + +{{ $builderDependencies := include "$name" . | fromYaml }} + + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder +final: false +fromImage: builder/alt +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact + add: /src + to: /src + before: install +shell: + beforeInstall: + {{- include "alt packages proxy" . | nindent 2 }} + - | + apt-get install -y \ + {{ $builderDependencies.altPackages | join " " }} + + {{- include "alt packages clean" . | nindent 2 }} + + install: + - | + OUTDIR=/out + cd /src + export GNULIB_SRCDIR=$(pwd)/gnulib + export GNULIB_TOOL=$(pwd)/gnulib/gnulib-tool + + ./autogen.sh + touch tests/Makefile.in + ./configure \ + --prefix=/usr \ + --libdir=/usr/lib64 + + # remove non-existing tests from build to prevent failure + sed -i 's/SUBDIRS = doc gnulib-local lib tests/SUBDIRS = doc gnulib-local lib/' Makefile + make -j$(nproc) + make DESTDIR=$OUTDIR install + rm -rf $OUTDIR/usr/share + find $OUTDIR -type f -executable | while read -r execfile; do + if strip "$execfile"; then + echo "Stripped: $execfile" + fi + done + + tree -hp $OUTDIR diff --git a/images/packages/liburing/README.md b/images/packages/liburing/README.md new file mode 100644 index 0000000000..cf5a7815b1 --- /dev/null +++ b/images/packages/liburing/README.md @@ -0,0 +1,25 @@ +# liburing +``` +└── [drwxr-xr-x 4] usr + ├── [drwxr-xr-x 4] include + │ ├── [drwxr-xr-x 6] liburing + │ │ ├── [-rw-r--r-- 2.4K] barrier.h + │ │ ├── [-rw-r--r-- 276] compat.h + │ │ ├── [-rw-r--r-- 19K] io_uring.h + │ │ └── [-rw-r--r-- 164] io_uring_version.h + │ └── [-rw-r--r-- 44K] liburing.h + └── [drwxr-xr-x 11] lib64 + ├── [-rw-r--r-- 48K] liburing-ffi.a + ├── [lrwxrwxrwx 19] liburing-ffi.so -> liburing-ffi.so.2.6 + ├── [lrwxrwxrwx 19] liburing-ffi.so.2 -> liburing-ffi.so.2.6 + ├── [-rwxr-xr-x 42K] liburing-ffi.so.2.6 + ├── [-rw-r--r-- 28K] liburing.a + ├── [lrwxrwxrwx 15] liburing.so -> liburing.so.2.6 + ├── [lrwxrwxrwx 15] liburing.so.2 -> liburing.so.2.6 + ├── [-rwxr-xr-x 25K] liburing.so.2.6 + └── [drwxr-xr-x 4] pkgconfig + ├── [-rw-r--r-- 236] liburing-ffi.pc + └── [-rw-r--r-- 228] liburing.pc + +6 directories, 15 files +``` \ No newline at end of file diff --git a/images/packages/liburing/werf.inc.yaml b/images/packages/liburing/werf.inc.yaml new file mode 100644 index 0000000000..1d0f71af4e --- /dev/null +++ b/images/packages/liburing/werf.inc.yaml @@ -0,0 +1,73 @@ +--- +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }} +final: false +fromImage: builder/scratch +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder + add: /out + to: /{{ $.ImageName }} + before: setup + +--- +{{- $version := get .PackageVersion .ImageName }} +{{- $gitRepoUrl := "axboe/liburing.git" }} +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact +final: false +fromImage: builder/src +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +shell: + install: + - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src +--- + +{{- $name := print $.ImageName "-dependencies" -}} +{{- define "$name" -}} +altPackages: +- gcc gcc-c++ git make libtool gettext-tools +- tree +{{- end -}} + +{{ $builderDependencies := include "$name" . | fromYaml }} + + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder +final: false +fromImage: builder/alt +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact + add: /src + to: /src + before: install +shell: + beforeInstall: + {{- include "alt packages proxy" . | nindent 2 }} + - | + apt-get install -y \ + {{ $builderDependencies.altPackages | join " " }} + + {{- include "alt packages clean" . | nindent 2 }} + + install: + - | + OUTDIR=/out + cd /src + ./configure \ + --prefix=/usr \ + --libdir=/usr/lib64 \ + --libdevdir=/usr/lib64 + + make -j$(nproc) + make DESTDIR=$OUTDIR install + rm -rf $OUTDIR/usr/man + find $OUTDIR -type f -executable | while read -r execfile; do + if strip "$execfile"; then + echo "Stripped: $execfile" + fi + done + + tree -shp $OUTDIR diff --git a/images/packages/libuserspace-rcu/README.md b/images/packages/libuserspace-rcu/README.md new file mode 100644 index 0000000000..1c47a5c5fc --- /dev/null +++ b/images/packages/libuserspace-rcu/README.md @@ -0,0 +1,151 @@ +# libuserspace-rcu +``` + └── [drwxr-xr-x 4] usr + ├── [drwxr-xr-x 10] include + │ ├── [drwxr-xr-x 43] urcu + │ │ ├── [drwxr-xr-x 18] arch + │ │ │ ├── [-rw-r--r-- 1.7K] aarch64.h + │ │ │ ├── [-rw-r--r-- 1.5K] alpha.h + │ │ │ ├── [-rw-r--r-- 2.6K] arm.h + │ │ │ ├── [-rw-r--r-- 1.2K] gcc.h + │ │ │ ├── [-rw-r--r-- 4.6K] generic.h + │ │ │ ├── [-rw-r--r-- 1.5K] hppa.h + │ │ │ ├── [-rw-r--r-- 1.4K] ia64.h + │ │ │ ├── [-rw-r--r-- 1.3K] m68k.h + │ │ │ ├── [-rw-r--r-- 1.3K] mips.h + │ │ │ ├── [-rw-r--r-- 1.1K] nios2.h + │ │ │ ├── [-rw-r--r-- 3.2K] ppc.h + │ │ │ ├── [-rw-r--r-- 1.4K] riscv.h + │ │ │ ├── [-rw-r--r-- 2.1K] s390.h + │ │ │ ├── [-rw-r--r-- 1.8K] sparc64.h + │ │ │ ├── [-rw-r--r-- 1.5K] tile.h + │ │ │ └── [-rw-r--r-- 4.0K] x86.h + │ │ ├── [-rw-r--r-- 4.7K] arch.h + │ │ ├── [-rw-r--r-- 1.6K] assert.h + │ │ ├── [-rw-r--r-- 2.8K] call-rcu.h + │ │ ├── [-rw-r--r-- 1.2K] cds.h + │ │ ├── [-rw-r--r-- 3.9K] compiler.h + │ │ ├── [-rw-r--r-- 973] config.h + │ │ ├── [-rw-r--r-- 979] debug.h + │ │ ├── [-rw-r--r-- 1.9K] defer.h + │ │ ├── [-rw-r--r-- 2.9K] flavor.h + │ │ ├── [-rw-r--r-- 5.5K] futex.h + │ │ ├── [-rw-r--r-- 3.3K] hlist.h + │ │ ├── [-rw-r--r-- 9.4K] lfstack.h + │ │ ├── [-rw-r--r-- 6.0K] list.h + │ │ ├── [drwxr-xr-x 9] map + │ │ │ ├── [-rw-r--r-- 2.3K] clear.h + │ │ │ ├── [-rw-r--r-- 6.4K] urcu-bp.h + │ │ │ ├── [-rw-r--r-- 6.1K] urcu-mb.h + │ │ │ ├── [-rw-r--r-- 6.4K] urcu-memb.h + │ │ │ ├── [-rw-r--r-- 6.4K] urcu-qsbr.h + │ │ │ ├── [-rw-r--r-- 6.5K] urcu-signal.h + │ │ │ └── [-rw-r--r-- 1.4K] urcu.h + │ │ ├── [-rw-r--r-- 4.0K] pointer.h + │ │ ├── [-rw-r--r-- 2.7K] rcuhlist.h + │ │ ├── [-rw-r--r-- 21K] rculfhash.h + │ │ ├── [-rw-r--r-- 2.5K] rculfqueue.h + │ │ ├── [-rw-r--r-- 2.6K] rculfstack.h + │ │ ├── [-rw-r--r-- 2.8K] rculist.h + │ │ ├── [-rw-r--r-- 2.2K] ref.h + │ │ ├── [drwxr-xr-x 17] static + │ │ │ ├── [-rw-r--r-- 9.4K] lfstack.h + │ │ │ ├── [-rw-r--r-- 6.7K] pointer.h + │ │ │ ├── [-rw-r--r-- 6.1K] rculfqueue.h + │ │ │ ├── [-rw-r--r-- 3.9K] rculfstack.h + │ │ │ ├── [-rw-r--r-- 6.6K] urcu-bp.h + │ │ │ ├── [-rw-r--r-- 3.6K] urcu-common.h + │ │ │ ├── [-rw-r--r-- 5.0K] urcu-mb.h + │ │ │ ├── [-rw-r--r-- 5.5K] urcu-memb.h + │ │ │ ├── [-rw-r--r-- 7.1K] urcu-qsbr.h + │ │ │ ├── [-rw-r--r-- 1.4K] urcu-signal-nr.h + │ │ │ ├── [-rw-r--r-- 5.1K] urcu-signal.h + │ │ │ ├── [-rw-r--r-- 1.5K] urcu.h + │ │ │ ├── [-rw-r--r-- 20K] wfcqueue.h + │ │ │ ├── [-rw-r--r-- 4.4K] wfqueue.h + │ │ │ └── [-rw-r--r-- 12K] wfstack.h + │ │ ├── [-rw-r--r-- 1.6K] syscall-compat.h + │ │ ├── [-rw-r--r-- 1.6K] system.h + │ │ ├── [-rw-r--r-- 4.9K] tls-compat.h + │ │ ├── [drwxr-xr-x 18] uatomic + │ │ │ ├── [-rw-r--r-- 1.3K] aarch64.h + │ │ │ ├── [-rw-r--r-- 1.4K] alpha.h + │ │ │ ├── [-rw-r--r-- 1.7K] arm.h + │ │ │ ├── [-rw-r--r-- 1.5K] gcc.h + │ │ │ ├── [-rw-r--r-- 13K] generic.h + │ │ │ ├── [-rw-r--r-- 229] hppa.h + │ │ │ ├── [-rw-r--r-- 1.3K] ia64.h + │ │ │ ├── [-rw-r--r-- 1.5K] m68k.h + │ │ │ ├── [-rw-r--r-- 1.4K] mips.h + │ │ │ ├── [-rw-r--r-- 1.4K] nios2.h + │ │ │ ├── [-rw-r--r-- 5.8K] ppc.h + │ │ │ ├── [-rw-r--r-- 1.5K] riscv.h + │ │ │ ├── [-rw-r--r-- 4.7K] s390.h + │ │ │ ├── [-rw-r--r-- 2.2K] sparc64.h + │ │ │ ├── [-rw-r--r-- 1.3K] tile.h + │ │ │ └── [-rw-r--r-- 14K] x86.h + │ │ ├── [-rw-r--r-- 1.8K] uatomic.h + │ │ ├── [-rw-r--r-- 111] uatomic_arch.h + │ │ ├── [-rw-r--r-- 5.6K] urcu-bp.h + │ │ ├── [-rw-r--r-- 105] urcu-futex.h + │ │ ├── [-rw-r--r-- 3.3K] urcu-mb.h + │ │ ├── [-rw-r--r-- 3.4K] urcu-memb.h + │ │ ├── [-rw-r--r-- 1009] urcu-poll.h + │ │ ├── [-rw-r--r-- 4.2K] urcu-qsbr.h + │ │ ├── [-rw-r--r-- 3.4K] urcu-signal.h + │ │ ├── [-rw-r--r-- 1.4K] urcu.h + │ │ ├── [-rw-r--r-- 99] urcu_ref.h + │ │ ├── [-rw-r--r-- 19K] wfcqueue.h + │ │ ├── [-rw-r--r-- 3.1K] wfqueue.h + │ │ └── [-rw-r--r-- 13K] wfstack.h + │ ├── [-rw-r--r-- 47] urcu-bp.h + │ ├── [-rw-r--r-- 27] urcu-call-rcu.h + │ ├── [-rw-r--r-- 24] urcu-defer.h + │ ├── [-rw-r--r-- 25] urcu-flavor.h + │ ├── [-rw-r--r-- 26] urcu-pointer.h + │ ├── [-rw-r--r-- 49] urcu-qsbr.h + │ └── [-rw-r--r-- 44] urcu.h + └── [drwxr-xr-x 35] lib64 + ├── [-rwxr-xr-x 959] liburcu-bp.la + ├── [lrwxrwxrwx 19] liburcu-bp.so -> liburcu-bp.so.8.1.0 + ├── [lrwxrwxrwx 19] liburcu-bp.so.8 -> liburcu-bp.so.8.1.0 + ├── [-rwxr-xr-x 39K] liburcu-bp.so.8.1.0 + ├── [-rwxr-xr-x 965] liburcu-cds.la + ├── [lrwxrwxrwx 20] liburcu-cds.so -> liburcu-cds.so.8.1.0 + ├── [lrwxrwxrwx 20] liburcu-cds.so.8 -> liburcu-cds.so.8.1.0 + ├── [-rwxr-xr-x 43K] liburcu-cds.so.8.1.0 + ├── [-rwxr-xr-x 954] liburcu-common.la + ├── [lrwxrwxrwx 23] liburcu-common.so -> liburcu-common.so.8.1.0 + ├── [lrwxrwxrwx 23] liburcu-common.so.8 -> liburcu-common.so.8.1.0 + ├── [-rwxr-xr-x 22K] liburcu-common.so.8.1.0 + ├── [-rwxr-xr-x 959] liburcu-mb.la + ├── [lrwxrwxrwx 19] liburcu-mb.so -> liburcu-mb.so.8.1.0 + ├── [lrwxrwxrwx 19] liburcu-mb.so.8 -> liburcu-mb.so.8.1.0 + ├── [-rwxr-xr-x 35K] liburcu-mb.so.8.1.0 + ├── [-rwxr-xr-x 971] liburcu-memb.la + ├── [lrwxrwxrwx 21] liburcu-memb.so -> liburcu-memb.so.8.1.0 + ├── [lrwxrwxrwx 21] liburcu-memb.so.8 -> liburcu-memb.so.8.1.0 + ├── [-rwxr-xr-x 35K] liburcu-memb.so.8.1.0 + ├── [-rwxr-xr-x 971] liburcu-qsbr.la + ├── [lrwxrwxrwx 21] liburcu-qsbr.so -> liburcu-qsbr.so.8.1.0 + ├── [lrwxrwxrwx 21] liburcu-qsbr.so.8 -> liburcu-qsbr.so.8.1.0 + ├── [-rwxr-xr-x 35K] liburcu-qsbr.so.8.1.0 + ├── [-rwxr-xr-x 983] liburcu-signal.la + ├── [lrwxrwxrwx 23] liburcu-signal.so -> liburcu-signal.so.8.1.0 + ├── [lrwxrwxrwx 23] liburcu-signal.so.8 -> liburcu-signal.so.8.1.0 + ├── [-rwxr-xr-x 39K] liburcu-signal.so.8.1.0 + ├── [-rwxr-xr-x 941] liburcu.la + ├── [lrwxrwxrwx 16] liburcu.so -> liburcu.so.8.1.0 + ├── [lrwxrwxrwx 16] liburcu.so.8 -> liburcu.so.8.1.0 + ├── [-rwxr-xr-x 35K] liburcu.so.8.1.0 + └── [drwxr-xr-x 9] pkgconfig + ├── [-rw-r--r-- 284] liburcu-bp.pc + ├── [-rw-r--r-- 336] liburcu-cds.pc + ├── [-rw-r--r-- 292] liburcu-mb.pc + ├── [-rw-r--r-- 299] liburcu-memb.pc + ├── [-rw-r--r-- 283] liburcu-qsbr.pc + ├── [-rw-r--r-- 278] liburcu-signal.pc + └── [-rw-r--r-- 267] liburcu.pc + +10 directories, 137 files +``` \ No newline at end of file diff --git a/images/packages/libuserspace-rcu/werf.inc.yaml b/images/packages/libuserspace-rcu/werf.inc.yaml new file mode 100644 index 0000000000..c326d02478 --- /dev/null +++ b/images/packages/libuserspace-rcu/werf.inc.yaml @@ -0,0 +1,74 @@ +--- +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }} +final: false +fromImage: builder/scratch +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder + add: /out + to: /{{ $.ImageName }} + before: setup + +--- +{{- $version := get .PackageVersion .ImageName }} +{{- $gitRepoUrl := "liburcu/userspace-rcu.git" }} +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact +final: false +fromImage: builder/src +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +shell: + install: + - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src +--- + +{{- $name := print $.ImageName "-dependencies" -}} +{{- define "$name" -}} +altPackages: +- gcc gcc-c++ git make libtool gettext-tools +- tree +{{- end -}} + +{{ $builderDependencies := include "$name" . | fromYaml }} + + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder +final: false +fromImage: builder/alt +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact + add: /src + to: /src + before: install +shell: + beforeInstall: + {{- include "alt packages proxy" . | nindent 2 }} + - | + apt-get install -y \ + {{ $builderDependencies.altPackages | join " " }} + + {{- include "alt packages clean" . | nindent 2 }} + + install: + - | + OUTDIR=/out + cd /src + ./bootstrap + ./configure \ + --prefix=/usr \ + --libdir=/usr/lib64 \ + --disable-static + + make -j$(nproc) + make DESTDIR=$OUTDIR install + rm -rf $OUTDIR/usr/share + find $OUTDIR -type f -executable | while read -r execfile; do + if strip "$execfile"; then + echo "Stripped: $execfile" + fi + done + + tree -shp $OUTDIR diff --git a/images/packages/libxkbcommon/README.md b/images/packages/libxkbcommon/README.md new file mode 100644 index 0000000000..0a3874f099 --- /dev/null +++ b/images/packages/libxkbcommon/README.md @@ -0,0 +1,42 @@ +# libxkbcommon +``` +└── [drwxr-xr-x 6] usr + ├── [drwxr-xr-x 3] bin + │ └── [-rwxr-xr-x 23K] xkbcli + ├── [drwxr-xr-x 3] include + │ └── [drwxr-xr-x 9] xkbcommon + │ ├── [-rw-r--r-- 3.1K] xkbcommon-compat.h + │ ├── [-rw-r--r-- 19K] xkbcommon-compose.h + │ ├── [-rw-r--r-- 244K] xkbcommon-keysyms.h + │ ├── [-rw-r--r-- 3.8K] xkbcommon-names.h + │ ├── [-rw-r--r-- 8.0K] xkbcommon-x11.h + │ ├── [-rw-r--r-- 72K] xkbcommon.h + │ └── [-rw-r--r-- 24K] xkbregistry.h + ├── [drwxr-xr-x 12] lib64 + │ ├── [lrwxrwxrwx 21] libxkbcommon-x11.so -> libxkbcommon-x11.so.0 + │ ├── [lrwxrwxrwx 26] libxkbcommon-x11.so.0 -> libxkbcommon-x11.so.0.10.0 + │ ├── [-rwxr-xr-x 51K] libxkbcommon-x11.so.0.10.0 + │ ├── [lrwxrwxrwx 17] libxkbcommon.so -> libxkbcommon.so.0 + │ ├── [lrwxrwxrwx 22] libxkbcommon.so.0 -> libxkbcommon.so.0.10.0 + │ ├── [-rwxr-xr-x 416K] libxkbcommon.so.0.10.0 + │ ├── [lrwxrwxrwx 19] libxkbregistry.so -> libxkbregistry.so.0 + │ ├── [lrwxrwxrwx 24] libxkbregistry.so.0 -> libxkbregistry.so.0.10.0 + │ ├── [-rwxr-xr-x 43K] libxkbregistry.so.0.10.0 + │ └── [drwxr-xr-x 5] pkgconfig + │ ├── [-rw-r--r-- 291] xkbcommon-x11.pc + │ ├── [-rw-r--r-- 202] xkbcommon.pc + │ └── [-rw-r--r-- 269] xkbregistry.pc + └── [drwxr-xr-x 3] libexec + └── [drwxr-xr-x 11] xkbcommon + ├── [-rwxr-xr-x 27K] xkbcli-compile-compose + ├── [-rwxr-xr-x 31K] xkbcli-compile-keymap + ├── [-rwxr-xr-x 47K] xkbcli-dump-keymap-wayland + ├── [-rwxr-xr-x 23K] xkbcli-dump-keymap-x11 + ├── [-rwxr-xr-x 31K] xkbcli-how-to-type + ├── [-rwxr-xr-x 31K] xkbcli-interactive-evdev + ├── [-rwxr-xr-x 47K] xkbcli-interactive-wayland + ├── [-rwxr-xr-x 27K] xkbcli-interactive-x11 + └── [-rwxr-xr-x 19K] xkbcli-list + +9 directories, 29 files +``` \ No newline at end of file diff --git a/images/packages/libxkbcommon/werf.inc.yaml b/images/packages/libxkbcommon/werf.inc.yaml new file mode 100644 index 0000000000..d2588dea23 --- /dev/null +++ b/images/packages/libxkbcommon/werf.inc.yaml @@ -0,0 +1,95 @@ +--- +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }} +final: false +fromImage: builder/scratch +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder + add: /out + to: /{{ $.ImageName }} + before: setup + +--- +{{- $version := get .PackageVersion .ImageName }} +{{- $gitRepoUrl := "xkbcommon/libxkbcommon.git" }} +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact +final: false +fromImage: builder/src +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +shell: + install: + - | + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src + +--- + +{{- $name := print $.ImageName "-dependencies" -}} +{{- define "$name" -}} +altPackages: +- gcc git make libtool gettext-tools meson ninja-build bison +- xkeyboard-config-devel +- wayland-devel libwayland-client-devel wayland-protocols +- libxcb-devel +- bash-completion +- tree +packages: +- libxml2 +{{- end -}} + +{{ $builderDependencies := include "$name" . | fromYaml }} + + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder +final: false +fromImage: builder/alt +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact + add: /src + to: /src + before: install +{{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} +shell: + beforeInstall: + {{- include "alt packages proxy" . | nindent 2 }} + - | + apt-get install -y \ + {{ $builderDependencies.altPackages | join " " }} + + {{- include "alt packages clean" . | nindent 2 }} + + install: + - | + # Install packages + PKGS="{{ $builderDependencies.packages | join " " }}" + for pkg in $PKGS; do + cp -a /$pkg/. / + rm -rf /$pkg + done + + OUTDIR=/out + cd /src + mkdir _build + /usr/bin/meson setup _build \ + --prefix=/usr \ + --libdir=/usr/lib64 \ + --wrap-mode=nofallback \ + --wrap-mode=nodownload \ + -Denable-docs=false \ + -Denable-x11=true \ + -Denable-xkbregistry=true \ + -Ddefault_library=shared + + ninja-build -j$(nproc) -C _build + DESTDIR=$OUTDIR ninja-build install -C _build + rm -rf $OUTDIR/usr/share + find $OUTDIR -type f -executable | while read -r execfile; do + if strip "$execfile"; then + echo "Stripped: $execfile" + fi + done + tree -hp $OUTDIR + diff --git a/images/packages/multipath-tools/werf.inc.yaml b/images/packages/multipath-tools/werf.inc.yaml index aed06cb878..556ef400d1 100644 --- a/images/packages/multipath-tools/werf.inc.yaml +++ b/images/packages/multipath-tools/werf.inc.yaml @@ -32,11 +32,12 @@ altPackages: - libmultipath kpartx - dmsetup udev-rules-sgutils sg3_utils - libdevmapper-devel libudev-devel libsystemd-devel -- libuserspace-rcu-devel libjson-c-devel +- libjson-c-devel packages: - util-linux - readline - libaio +- liburing libuserspace-rcu {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -67,7 +68,7 @@ shell: cp -a /$pkg/. / rm -rf /$pkg done - - | + OUTDIR=/out cd /src diff --git a/images/packages/nbdkit/werf.inc.yaml b/images/packages/nbdkit/werf.inc.yaml index 2cd5c4dad9..99ce57a8ca 100644 --- a/images/packages/nbdkit/werf.inc.yaml +++ b/images/packages/nbdkit/werf.inc.yaml @@ -43,6 +43,8 @@ packages: - zstd - libxml2 - libtasn1 +- libgcc1 +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/ngtcp2/werf.inc.yaml b/images/packages/ngtcp2/werf.inc.yaml index 2d0ddbc9ef..ae942e436c 100644 --- a/images/packages/ngtcp2/werf.inc.yaml +++ b/images/packages/ngtcp2/werf.inc.yaml @@ -54,6 +54,7 @@ altPackages: packages: - openssl nghttp3 gnutls libidn2 libgmp - libtasn1 +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/p11-kit/werf.inc.yaml b/images/packages/p11-kit/werf.inc.yaml index 5a6da62753..a892d442d4 100644 --- a/images/packages/p11-kit/werf.inc.yaml +++ b/images/packages/p11-kit/werf.inc.yaml @@ -20,18 +20,33 @@ secrets: value: {{ $.SOURCE_REPO_GIT }} shell: install: - - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src + - | + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src + + # Download subprojects/pkcs11-json + cd /src + if [[ "$(cat /run/secrets/SOURCE_REPO)" =~ "github.com" ]] ; then + echo "Checkout submodules" + git submodule update --init --recursive --depth=1 + else + echo "Checkout submodules with URL rewrite" + git \ + -c url."$(cat /run/secrets/SOURCE_REPO)/".insteadOf=https://github.com/ \ + submodule update --init --recursive --depth=1 + fi + --- {{- $name := print $.ImageName "-dependencies" -}} {{- define "$name" -}} altPackages: - git gcc gcc-c++ make meson ninja-build pkg-config -- glib2-devel libffi-devel gettext-devel -- openssl-devel ca-certificates +- gettext-devel +- ca-certificates - tree packages: - libtasn1 +- libffi openssl {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/selinux/README.md b/images/packages/selinux/README.md index 668b9f9490..b89622b07c 100644 --- a/images/packages/selinux/README.md +++ b/images/packages/selinux/README.md @@ -1,355 +1,71 @@ # selinux /selinux ``` -[drwxr-xr-x 4.0K] ./ -├── [drwxr-xr-x 4.0K] etc/ -│   ├── [drwxr-xr-x 4.0K] dbus-1/ -│   │   └── [drwxr-xr-x 4.0K] system.d/ -│   │   └── [-rw-r--r-- 535] org.selinux.conf -│   ├── [drwxr-xr-x 4.0K] pam.d/ -│   │   ├── [-rw-r--r-- 284] newrole -│   │   └── [-rw-r--r-- 283] run_init -│   ├── [drwxr-xr-x 4.0K] rc.d/ -│   │   └── [drwxr-xr-x 4.0K] init.d/ -│   │   ├── [-rwxr-xr-x 1.7K] mcstrans* -│   │   └── [-rwxr-xr-x 1.8K] restorecond* -│   ├── [drwxr-xr-x 4.0K] selinux/ -│   │   ├── [-rw-r--r-- 118] restorecond.conf -│   │   ├── [-rw-r--r-- 93] restorecond_user.conf -│   │   └── [-rw-r--r-- 1.9K] semanage.conf -│   ├── [-rw-r--r-- 216] sestatus.conf -│   ├── [drwxr-xr-x 4.0K] sysconfig/ -│   │   └── [-rw-r--r-- 85] sandbox -│   └── [drwxr-xr-x 4.0K] xdg/ -│   └── [drwxr-xr-x 4.0K] autostart/ -│   └── [-rw-r--r-- 222] restorecond.desktop -├── [drwxr-xr-x 4.0K] usr/ -│   ├── [drwxr-xr-x 4.0K] bin/ -│   │   ├── [-rwxr-xr-x 15K] audit2allow* -│   │   ├── [lrwxrwxrwx 11] audit2why -> audit2allow* -│   │   ├── [-rwxr-xr-x 14K] chcat* -│   │   ├── [-rwxr-xr-x 443K] checkmodule* -│   │   ├── [-rwxr-xr-x 515K] checkpolicy* -│   │   ├── [-rwxr-xr-x 15K] chkcon* -│   │   ├── [-r-xr-xr-x 31K] newrole* -│   │   ├── [-rwxr-xr-x 18K] sandbox* -│   │   ├── [-rwxr-xr-x 15K] secil2conf* -│   │   ├── [-rwxr-xr-x 15K] secil2tree* -│   │   ├── [-rwxr-xr-x 27K] secilc* -│   │   ├── [-rwxr-xr-x 28K] secon* -│   │   ├── [-rwxr-xr-x 33K] selinux-polgengui* -│   │   ├── [-rwxr-xr-x 15K] semodule_expand* -│   │   ├── [-rwxr-xr-x 15K] semodule_link* -│   │   ├── [-rwxr-xr-x 15K] semodule_package* -│   │   ├── [-rwxr-xr-x 15K] semodule_unpackage* -│   │   ├── [-rwxr-xr-x 15K] sepol_check_access* -│   │   ├── [-rwxr-xr-x 15K] sepol_compute_av* -│   │   ├── [-rwxr-xr-x 15K] sepol_compute_member* -│   │   ├── [-rwxr-xr-x 15K] sepol_compute_relabel* -│   │   ├── [-rwxr-xr-x 15K] sepol_validate_transition* -│   │   ├── [lrwxrwxrwx 8] sepolgen -> sepolicy* -│   │   ├── [-rwxr-xr-x 4.2K] sepolgen-ifgen* -│   │   ├── [-rwxr-xr-x 235K] sepolgen-ifgen-attr-helper* -│   │   ├── [-rwxr-xr-x 29K] sepolicy* -│   │   ├── [-rwxr-xr-x 23K] sestatus* -│   │   └── [-rwxr-xr-x 90] system-config-selinux* -│   ├── [drwxr-xr-x 4.0K] include/ -│   │   ├── [drwxr-xr-x 4.0K] selinux/ -│   │   │   ├── [-rw-r--r-- 16K] avc.h -│   │   │   ├── [-rw-r--r-- 1.2K] context.h -│   │   │   ├── [-rw-r--r-- 2.9K] get_context_list.h -│   │   │   ├── [-rw-r--r-- 643] get_default_type.h -│   │   │   ├── [-rw-r--r-- 6.3K] label.h -│   │   │   ├── [-rw-r--r-- 7.3K] restorecon.h -│   │   │   └── [-rw-r--r-- 28K] selinux.h -│   │   ├── [drwxr-xr-x 4.0K] semanage/ -│   │   │   ├── [-rw-r--r-- 1.6K] boolean_record.h -│   │   │   ├── [-rw-r--r-- 1.0K] booleans_active.h -│   │   │   ├── [-rw-r--r-- 1.1K] booleans_local.h -│   │   │   ├── [-rw-r--r-- 820] booleans_policy.h -│   │   │   ├── [-rw-r--r-- 1.8K] context_record.h -│   │   │   ├── [-rw-r--r-- 1.8K] debug.h -│   │   │   ├── [-rw-r--r-- 2.4K] fcontext_record.h -│   │   │   ├── [-rw-r--r-- 1.2K] fcontexts_local.h -│   │   │   ├── [-rw-r--r-- 1020] fcontexts_policy.h -│   │   │   ├── [-rw-r--r-- 7.3K] handle.h -│   │   │   ├── [-rw-r--r-- 2.1K] ibendport_record.h -│   │   │   ├── [-rw-r--r-- 1.2K] ibendports_local.h -│   │   │   ├── [-rw-r--r-- 896] ibendports_policy.h -│   │   │   ├── [-rw-r--r-- 2.4K] ibpkey_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] ibpkeys_local.h -│   │   │   ├── [-rw-r--r-- 829] ibpkeys_policy.h -│   │   │   ├── [-rw-r--r-- 1.9K] iface_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] interfaces_local.h -│   │   │   ├── [-rw-r--r-- 834] interfaces_policy.h -│   │   │   ├── [-rw-r--r-- 9.9K] modules.h -│   │   │   ├── [-rw-r--r-- 2.8K] node_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] nodes_local.h -│   │   │   ├── [-rw-r--r-- 811] nodes_policy.h -│   │   │   ├── [-rw-r--r-- 2.1K] port_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] ports_local.h -│   │   │   ├── [-rw-r--r-- 811] ports_policy.h -│   │   │   ├── [-rw-r--r-- 2.1K] semanage.h -│   │   │   ├── [-rw-r--r-- 1.9K] seuser_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] seusers_local.h -│   │   │   ├── [-rw-r--r-- 835] seusers_policy.h -│   │   │   ├── [-rw-r--r-- 2.7K] user_record.h -│   │   │   ├── [-rw-r--r-- 1.1K] users_local.h -│   │   │   └── [-rw-r--r-- 811] users_policy.h -│   │   └── [drwxr-xr-x 4.0K] sepol/ -│   │   ├── [-rw-r--r-- 1.5K] boolean_record.h -│   │   ├── [-rw-r--r-- 1.3K] booleans.h -│   │   ├── [drwxr-xr-x 4.0K] cil/ -│   │   │   └── [-rw-r--r-- 3.7K] cil.h -│   │   ├── [-rw-r--r-- 752] context.h -│   │   ├── [-rw-r--r-- 1.6K] context_record.h -│   │   ├── [-rw-r--r-- 975] debug.h -│   │   ├── [-rw-r--r-- 826] errcodes.h -│   │   ├── [-rw-r--r-- 1.4K] handle.h -│   │   ├── [-rw-r--r-- 2.1K] ibendport_record.h -│   │   ├── [-rw-r--r-- 1.4K] ibendports.h -│   │   ├── [-rw-r--r-- 2.2K] ibpkey_record.h -│   │   ├── [-rw-r--r-- 1.3K] ibpkeys.h -│   │   ├── [-rw-r--r-- 1.8K] iface_record.h -│   │   ├── [-rw-r--r-- 1.4K] interfaces.h -│   │   ├── [-rw-r--r-- 125] kernel_to_cil.h -│   │   ├── [-rw-r--r-- 126] kernel_to_conf.h -│   │   ├── [-rw-r--r-- 2.6K] module.h -│   │   ├── [-rw-r--r-- 329] module_to_cil.h -│   │   ├── [-rw-r--r-- 2.7K] node_record.h -│   │   ├── [-rw-r--r-- 1.3K] nodes.h -│   │   ├── [drwxr-xr-x 4.0K] policydb/ -│   │   │   ├── [-rw-r--r-- 1.6K] avrule_block.h -│   │   │   ├── [-rw-r--r-- 4.7K] avtab.h -│   │   │   ├── [-rw-r--r-- 4.7K] conditional.h -│   │   │   ├── [-rw-r--r-- 2.5K] constraint.h -│   │   │   ├── [-rw-r--r-- 3.5K] context.h -│   │   │   ├── [-rw-r--r-- 3.5K] ebitmap.h -│   │   │   ├── [-rw-r--r-- 3.6K] expand.h -│   │   │   ├── [-rw-r--r-- 1.5K] flask_types.h -│   │   │   ├── [-rw-r--r-- 3.3K] hashtab.h -│   │   │   ├── [-rw-r--r-- 1.8K] hierarchy.h -│   │   │   ├── [-rw-r--r-- 517] link.h -│   │   │   ├── [-rw-r--r-- 5.0K] mls_types.h -│   │   │   ├── [-rw-r--r-- 1.5K] module.h -│   │   │   ├── [-rw-r--r-- 772] polcaps.h -│   │   │   ├── [-rw-r--r-- 26K] policydb.h -│   │   │   ├── [-rw-r--r-- 8.5K] services.h -│   │   │   ├── [-rw-r--r-- 1.9K] sidtab.h -│   │   │   ├── [-rw-r--r-- 1.1K] symtab.h -│   │   │   └── [-rw-r--r-- 1.5K] util.h -│   │   ├── [-rw-r--r-- 4.7K] policydb.h -│   │   ├── [-rw-r--r-- 2.0K] port_record.h -│   │   ├── [-rw-r--r-- 1.3K] ports.h -│   │   ├── [-rw-r--r-- 862] sepol.h -│   │   ├── [-rw-r--r-- 2.3K] user_record.h -│   │   └── [-rw-r--r-- 1.3K] users.h -│   ├── [drwxr-xr-x 4.0K] lib/ -│   │   ├── [drwxr-xr-x 4.0K] python3/ -│   │   │   └── [drwxr-xr-x 4.0K] site-packages/ -│   │   │   ├── [-rw-r--r-- 105K] seobject.py -│   │   │   ├── [drwxr-xr-x 4.0K] sepolgen/ -│   │   │   │   ├── [-rw-r--r-- 0] __init__.py -│   │   │   │   ├── [-rw-r--r-- 12K] access.py -│   │   │   │   ├── [-rw-r--r-- 21K] audit.py -│   │   │   │   ├── [-rw-r--r-- 2.8K] classperms.py -│   │   │   │   ├── [-rw-r--r-- 2.8K] defaults.py -│   │   │   │   ├── [-rw-r--r-- 16K] interfaces.py -│   │   │   │   ├── [-rw-r--r-- 42K] lex.py -│   │   │   │   ├── [-rw-r--r-- 8.5K] matching.py -│   │   │   │   ├── [-rw-r--r-- 7.1K] module.py -│   │   │   │   ├── [-rw-r--r-- 6.4K] objectmodel.py -│   │   │   │   ├── [-rw-r--r-- 5.0K] output.py -│   │   │   │   ├── [-rw-r--r-- 15K] policygen.py -│   │   │   │   ├── [-rw-r--r-- 31K] refparser.py -│   │   │   │   ├── [-rw-r--r-- 31K] refpolicy.py -│   │   │   │   ├── [-rw-r--r-- 1013] sepolgeni18n.py -│   │   │   │   ├── [-rw-r--r-- 5.4K] util.py -│   │   │   │   └── [-rw-r--r-- 134K] yacc.py -│   │   │   ├── [drwxr-xr-x 4.0K] sepolicy/ -│   │   │   │   ├── [-rw-r--r-- 37K] __init__.py -│   │   │   │   ├── [drwxr-xr-x 4.0K] __pycache__/ -│   │   │   │   │   ├── [-rw-r--r-- 54K] __init__.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 1.6K] booleans.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 2.1K] communicate.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 79K] generate.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 182K] gui.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 10K] interface.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 55K] manpage.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 2.8K] network.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 3.0K] sedbus.cpython-312.pyc -│   │   │   │   │   └── [-rw-r--r-- 5.0K] transition.cpython-312.pyc -│   │   │   │   ├── [-rw-r--r-- 1.5K] booleans.py -│   │   │   │   ├── [-rw-r--r-- 1.7K] communicate.py -│   │   │   │   ├── [-rw-r--r-- 50K] generate.py -│   │   │   │   ├── [-rw-r--r-- 131K] gui.py -│   │   │   │   ├── [-rw-r--r-- 8.0K] interface.py -│   │   │   │   ├── [-rw-r--r-- 39K] manpage.py -│   │   │   │   ├── [-rw-r--r-- 2.7K] network.py -│   │   │   │   ├── [-rw-r--r-- 1.5K] sedbus.py -│   │   │   │   ├── [-rw-r--r-- 307K] sepolicy.glade -│   │   │   │   ├── [drwxr-xr-x 4.0K] templates/ -│   │   │   │   │   ├── [-rw-r--r-- 724] __init__.py -│   │   │   │   │   ├── [drwxr-xr-x 4.0K] __pycache__/ -│   │   │   │   │   │   ├── [-rw-r--r-- 162] __init__.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 334] boolean.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.8K] etc_rw.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 8.9K] executable.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 13K] network.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.9K] rw.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 3.4K] script.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 476] semodule.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.4K] spec.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.9K] test_module.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.6K] tmp.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 1.2K] unit_file.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 3.6K] user.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 3.1K] var_cache.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 3.2K] var_lib.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.2K] var_log.cpython-312.pyc -│   │   │   │   │   │   ├── [-rw-r--r-- 2.1K] var_run.cpython-312.pyc -│   │   │   │   │   │   └── [-rw-r--r-- 3.0K] var_spool.cpython-312.pyc -│   │   │   │   │   ├── [-rw-r--r-- 1.2K] boolean.py -│   │   │   │   │   ├── [-rw-r--r-- 3.8K] etc_rw.py -│   │   │   │   │   ├── [-rw-r--r-- 9.7K] executable.py -│   │   │   │   │   ├── [-rw-r--r-- 13K] network.py -│   │   │   │   │   ├── [-rw-r--r-- 3.8K] rw.py -│   │   │   │   │   ├── [-rw-r--r-- 4.2K] script.py -│   │   │   │   │   ├── [-rw-r--r-- 1.3K] semodule.py -│   │   │   │   │   ├── [-rw-r--r-- 2.2K] spec.py -│   │   │   │   │   ├── [-rw-r--r-- 4.3K] test_module.py -│   │   │   │   │   ├── [-rw-r--r-- 3.4K] tmp.py -│   │   │   │   │   ├── [-rw-r--r-- 2.2K] unit_file.py -│   │   │   │   │   ├── [-rw-r--r-- 4.3K] user.py -│   │   │   │   │   ├── [-rw-r--r-- 4.1K] var_cache.py -│   │   │   │   │   ├── [-rw-r--r-- 4.2K] var_lib.py -│   │   │   │   │   ├── [-rw-r--r-- 3.2K] var_log.py -│   │   │   │   │   ├── [-rw-r--r-- 2.9K] var_run.py -│   │   │   │   │   └── [-rw-r--r-- 4.0K] var_spool.py -│   │   │   │   └── [-rw-r--r-- 3.1K] transition.py -│   │   │   └── [drwxr-xr-x 4.0K] sepolicy-3.6.dist-info/ -│   │   │   ├── [-rw-r--r-- 4] INSTALLER -│   │   │   ├── [-rw-r--r-- 207] METADATA -│   │   │   ├── [-rw-r--r-- 9.8K] RECORD -│   │   │   ├── [-rw-r--r-- 0] REQUESTED -│   │   │   ├── [-rw-r--r-- 91] WHEEL -│   │   │   ├── [-rw-r--r-- 54] direct_url.json -│   │   │   └── [-rw-r--r-- 9] top_level.txt -│   │   └── [drwxr-xr-x 4.0K] systemd/ -│   │   ├── [drwxr-xr-x 4.0K] system/ -│   │   │   ├── [-rw-r--r-- 353] mcstrans.service -│   │   │   └── [-rw-r--r-- 292] restorecond.service -│   │   └── [drwxr-xr-x 4.0K] user/ -│   │   └── [-rw-r--r-- 277] restorecond_user.service -│   ├── [drwxr-xr-x 4.0K] lib64/ -│   │   ├── [-rw-r--r-- 410K] libselinux.a -│   │   ├── [lrwxrwxrwx 15] libselinux.so -> libselinux.so.1* -│   │   ├── [-rwxr-xr-x 180K] libselinux.so.1* -│   │   ├── [-rw-r--r-- 567K] libsemanage.a -│   │   ├── [lrwxrwxrwx 16] libsemanage.so -> libsemanage.so.2* -│   │   ├── [-rwxr-xr-x 268K] libsemanage.so.2* -│   │   ├── [-rw-r--r-- 1.5M] libsepol.a -│   │   ├── [lrwxrwxrwx 13] libsepol.so -> libsepol.so.2* -│   │   ├── [-rwxr-xr-x 772K] libsepol.so.2* -│   │   ├── [drwxr-xr-x 4.0K] pkgconfig/ -│   │   │   ├── [-rw-r--r-- 276] libselinux.pc -│   │   │   ├── [-rw-r--r-- 301] libsemanage.pc -│   │   │   └── [-rw-r--r-- 233] libsepol.pc -│   │   └── [drwxr-xr-x 4.0K] python3/ -│   │   └── [drwxr-xr-x 4.0K] site-packages/ -│   │   ├── [lrwxrwxrwx 31] _selinux.cpython-312.so -> selinux/_selinux.cpython-312.so* -│   │   ├── [-rwxr-xr-x 303K] _semanage.cpython-312.so* -│   │   ├── [drwxr-xr-x 4.0K] selinux/ -│   │   │   ├── [-rw-r--r-- 38K] __init__.py -│   │   │   ├── [-rwxr-xr-x 263K] _selinux.cpython-312.so* -│   │   │   └── [-rwxr-xr-x 243K] audit2why.cpython-312.so* -│   │   ├── [drwxr-xr-x 4.0K] selinux-3.6.dist-info/ -│   │   │   ├── [-rw-r--r-- 4] INSTALLER -│   │   │   ├── [-rw-r--r-- 201] METADATA -│   │   │   ├── [-rw-r--r-- 742] RECORD -│   │   │   ├── [-rw-r--r-- 0] REQUESTED -│   │   │   ├── [-rw-r--r-- 104] WHEEL -│   │   │   ├── [-rw-r--r-- 53] direct_url.json -│   │   │   └── [-rw-r--r-- 8] top_level.txt -│   │   └── [-rw-r--r-- 38K] semanage.py -│   ├── [drwxr-xr-x 4.0K] libexec/ -│   │   └── [drwxr-xr-x 4.0K] selinux/ -│   │   ├── [drwxr-xr-x 4.0K] hll/ -│   │   │   └── [-rwxr-xr-x 15K] pp* -│   │   └── [-rwxr-xr-x 9.0K] semanage_migrate_store* -│   ├── [drwxr-xr-x 4.0K] sbin/ -│   │   ├── [-rwxr-xr-x 15K] avcstat* -│   │   ├── [-rwxr-xr-x 15K] compute_av* -│   │   ├── [-rwxr-xr-x 15K] compute_create* -│   │   ├── [-rwxr-xr-x 15K] compute_member* -│   │   ├── [-rwxr-xr-x 15K] compute_relabel* -│   │   ├── [-rwxr-xr-x 12K] fixfiles* -│   │   ├── [lrwxrwxrwx 8] genhomedircon -> semodule* -│   │   ├── [-rwxr-xr-x 15K] getconlist* -│   │   ├── [-rwxr-xr-x 15K] getdefaultcon* -│   │   ├── [-rwxr-xr-x 15K] getenforce* -│   │   ├── [-rwxr-xr-x 15K] getfilecon* -│   │   ├── [-rwxr-xr-x 15K] getpidcon* -│   │   ├── [-rwxr-xr-x 15K] getpidprevcon* -│   │   ├── [-rwxr-xr-x 15K] getpolicyload* -│   │   ├── [-rwxr-xr-x 15K] getsebool* -│   │   ├── [-rwxr-xr-x 15K] getseuser* -│   │   ├── [-rwxr-xr-x 15K] load_policy* -│   │   ├── [-rwxr-xr-x 15K] matchpathcon* -│   │   ├── [-rwxr-xr-x 239K] mcstransd* -│   │   ├── [-rwxr-xr-x 15K] open_init_pty* -│   │   ├── [-rwxr-xr-x 15K] policyvers* -│   │   ├── [lrwxrwxrwx 8] restorecon -> setfiles* -│   │   ├── [-rwxr-xr-x 15K] restorecon_xattr* -│   │   ├── [-rwxr-xr-x 27K] restorecond* -│   │   ├── [-rwxr-xr-x 15K] run_init* -│   │   ├── [-rwxr-xr-x 75K] sefcontext_compile* -│   │   ├── [-rwxr-xr-x 15K] selabel_digest* -│   │   ├── [-rwxr-xr-x 15K] selabel_get_digests_all_partial_matches* -│   │   ├── [-rwxr-xr-x 15K] selabel_lookup* -│   │   ├── [-rwxr-xr-x 15K] selabel_lookup_best_match* -│   │   ├── [-rwxr-xr-x 15K] selabel_partial_match* -│   │   ├── [-rwxr-xr-x 15K] selinux_check_access* -│   │   ├── [-rwxr-xr-x 15K] selinux_check_securetty_context* -│   │   ├── [-rwxr-xr-x 15K] selinuxenabled* -│   │   ├── [-rwxr-xr-x 15K] selinuxexeccon* -│   │   ├── [-rwxr-xr-x 41K] semanage* -│   │   ├── [-rwxr-xr-x 27K] semodule* -│   │   ├── [lrwxrwxrwx 15] sestatus -> ../bin/sestatus* -│   │   ├── [-rwxr-xr-x 15K] setenforce* -│   │   ├── [-rwxr-xr-x 15K] setfilecon* -│   │   ├── [-rwxr-xr-x 23K] setfiles* -│   │   ├── [-rwxr-xr-x 19K] setsebool* -│   │   ├── [-rwsr-xr-x 31K] seunshare* -│   │   ├── [-rwxr-xr-x 15K] togglesebool* -│   │   └── [-rwxr-xr-x 15K] validatetrans* -│   └── [drwxr-xr-x 4.0K] share/ -│   ├── [drwxr-xr-x 4.0K] polkit-1/ -│   │   └── [drwxr-xr-x 4.0K] actions/ -│   │   ├── [-rw-r--r-- 928] org.selinux.config.policy -│   │   └── [-rw-r--r-- 3.2K] org.selinux.policy -│   ├── [drwxr-xr-x 4.0K] sandbox/ -│   │   ├── [-rwxr-xr-x 991] sandboxX.sh* -│   │   └── [-rwxr-xr-x 250] start* -│   └── [drwxr-xr-x 4.0K] system-config-selinux/ -│   ├── [-rw-r--r-- 7.8K] booleansPage.py -│   ├── [-rw-r--r-- 5.1K] domainsPage.py -│   ├── [-rw-r--r-- 8.4K] fcontextPage.py -│   ├── [-rw-r--r-- 6.8K] loginsPage.py -│   ├── [-rw-r--r-- 6.8K] modulesPage.py -│   ├── [-rw-r--r-- 137K] polgen.ui -│   ├── [-rw-r--r-- 10K] portsPage.py -│   ├── [-rwxr-xr-x 6.4K] selinux_server.py* -│   ├── [-rw-r--r-- 5.3K] semanagePage.py -│   ├── [-rw-r--r-- 7.6K] statusPage.py -│   ├── [-rw-r--r-- 1.4K] system-config-selinux.png -│   ├── [-rwxr-xr-x 6.1K] system-config-selinux.py* -│   ├── [-rw-r--r-- 100K] system-config-selinux.ui -│   └── [-rw-r--r-- 5.3K] usersPage.py -└── [drwxr-xr-x 4.0K] var/ - └── [drwxr-xr-x 4.0K] lib/ - └── [drwxr-xr-x 4.0K] sepolgen/ - └── [-rw-r--r-- 33K] perm_map +└── [drwxr-xr-x 6] usr + ├── [drwxr-xr-x 3] include + │ └── [drwxr-xr-x 9] selinux + │ ├── [-rw-r--r-- 16K] avc.h + │ ├── [-rw-r--r-- 1.2K] context.h + │ ├── [-rw-r--r-- 2.9K] get_context_list.h + │ ├── [-rw-r--r-- 643] get_default_type.h + │ ├── [-rw-r--r-- 6.3K] label.h + │ ├── [-rw-r--r-- 7.3K] restorecon.h + │ └── [-rw-r--r-- 29K] selinux.h + ├── [drwxr-xr-x 7] lib64 + │ ├── [-rw-r--r-- 444K] libselinux.a + │ ├── [lrwxrwxrwx 15] libselinux.so -> libselinux.so.1 + │ ├── [-rwxr-xr-x 196K] libselinux.so.1 + │ ├── [drwxr-xr-x 3] pkgconfig + │ │ └── [-rw-r--r-- 276] libselinux.pc + │ └── [drwxr-xr-x 3] python3 + │ └── [drwxr-xr-x 5] site-packages + │ ├── [lrwxrwxrwx 31] _selinux.cpython-312.so -> ↵ +selinux/_selinux.cpython-312.so + │ ├── [drwxr-xr-x 5] selinux + │ │ ├── [-rw-r--r-- 38K] __init__.py + │ │ ├── [-rwxr-xr-x 267K] _selinux.cpython-312.so + │ │ └── [-rwxr-xr-x 247K] audit2why.cpython-312.so + │ └── [drwxr-xr-x 9] selinux-3.8.dist-info + │ ├── [-rw-r--r-- 4] INSTALLER + │ ├── [-rw-r--r-- 201] METADATA + │ ├── [-rw-r--r-- 743] RECORD + │ ├── [-rw-r--r-- 0] REQUESTED + │ ├── [-rw-r--r-- 104] WHEEL + │ ├── [-rw-r--r-- 53] direct_url.json + │ └── [-rw-r--r-- 8] top_level.txt + ├── [drwxr-xr-x 33] sbin + │ ├── [-rwxr-xr-x 15K] avcstat + │ ├── [-rwxr-xr-x 15K] compute_av + │ ├── [-rwxr-xr-x 15K] compute_create + │ ├── [-rwxr-xr-x 15K] compute_member + │ ├── [-rwxr-xr-x 15K] compute_relabel + │ ├── [-rwxr-xr-x 15K] getconlist + │ ├── [-rwxr-xr-x 15K] getdefaultcon + │ ├── [-rwxr-xr-x 15K] getenforce + │ ├── [-rwxr-xr-x 15K] getfilecon + │ ├── [-rwxr-xr-x 15K] getpidcon + │ ├── [-rwxr-xr-x 15K] getpidprevcon + │ ├── [-rwxr-xr-x 15K] getpolicyload + │ ├── [-rwxr-xr-x 15K] getsebool + │ ├── [-rwxr-xr-x 15K] getseuser + │ ├── [-rwxr-xr-x 15K] matchpathcon + │ ├── [-rwxr-xr-x 15K] policyvers + │ ├── [-rwxr-xr-x 115K] sefcontext_compile + │ ├── [-rwxr-xr-x 15K] selabel_compare + │ ├── [-rwxr-xr-x 15K] selabel_digest + │ ├── [-rwxr-xr-x 15K] selabel_get_digests_all_partial_matches + │ ├── [-rwxr-xr-x 15K] selabel_lookup + │ ├── [-rwxr-xr-x 15K] selabel_lookup_best_match + │ ├── [-rwxr-xr-x 15K] selabel_partial_match + │ ├── [-rwxr-xr-x 15K] selinux_check_access + │ ├── [-rwxr-xr-x 15K] selinux_check_securetty_context + │ ├── [-rwxr-xr-x 15K] selinuxenabled + │ ├── [-rwxr-xr-x 15K] selinuxexeccon + │ ├── [-rwxr-xr-x 15K] setenforce + │ ├── [-rwxr-xr-x 15K] setfilecon + │ ├── [-rwxr-xr-x 15K] togglesebool + │ └── [-rwxr-xr-x 15K] validatetrans + └── [drwxr-xr-x 2] share -49 directories, 300 files +12 directories, 53 files ``` diff --git a/images/packages/selinux/werf.inc.yaml b/images/packages/selinux/werf.inc.yaml index 34fb101f30..ec7b36bb94 100644 --- a/images/packages/selinux/werf.inc.yaml +++ b/images/packages/selinux/werf.inc.yaml @@ -44,13 +44,12 @@ altPackages: - python3-module-wheel - python3-module-distro - ruby-devel -- libgio-devel -- libustr-devel libustr +- libustr-devel libustr - policycoreutils-restorecond policycoreutils - tree packages: - libaudit -- glib2 bzip2 +- util-linux bzip2 - pcre2 - libcap libcap-ng {{- end -}} @@ -87,7 +86,8 @@ shell: cd /src OUTDIR=/out - make -j$(nproc) clean distclean + cd libselinux + # make -j$(nproc) make -j$(nproc) \ DESTDIR=$OUTDIR \ diff --git a/images/packages/swtpm/werf.inc.yaml b/images/packages/swtpm/werf.inc.yaml index 8cd55c12ca..cb9833aff5 100644 --- a/images/packages/swtpm/werf.inc.yaml +++ b/images/packages/swtpm/werf.inc.yaml @@ -33,14 +33,16 @@ altPackages: - cryptote - net-tools softhsm - tpm2-pkcs11 tpm2-pkcs11-tools tpm2-tools tpm2-abrmd -- glib2-devel libgnutls-openssl-devel -- libgnutls30 libfuse-devel libgnutls-devel gnutls-utils +- libgnutls-openssl-devel +- libfuse-devel - libseccomp-devel libseccomp - perl-podlators -- glib2-devel libgio-devel packages: - libgmp libtpms openssl libjson-glib - libtasn1 +- glib2 util-linux +- gnutls +- libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} diff --git a/images/packages/systemd/README.md b/images/packages/systemd/README.md new file mode 100644 index 0000000000..19156f12b6 --- /dev/null +++ b/images/packages/systemd/README.md @@ -0,0 +1,797 @@ +# systemd +``` +├── [drwxr-xr-x 14] etc +│ ├── [drwxr-xr-x 3] X11 +│ │ └── [drwxr-xr-x 3] xinit.d +│ │ └── [-rwxr-xr-x 538] 50-systemd-user.sh +│ ├── [drwxr-xr-x 2] binfmt.d +│ ├── [drwx------ 2] credstore +│ ├── [drwx------ 2] credstore.encrypted +│ ├── [drwxr-xr-x 3] kernel +│ │ └── [drwxr-xr-x 2] install.d +│ ├── [drwxr-xr-x 3] pam.d +│ │ └── [-rw-r--r-- 562] systemd-user +│ ├── [drwxr-xr-x 3] rc.d +│ │ └── [drwxr-xr-x 3] init.d +│ │ └── [-rw-r--r-- 1.1K] README +│ ├── [drwxr-xr-x 2] sysctl.d +│ ├── [drwxr-xr-x 19] systemd +│ │ ├── [-rw-r--r-- 1018] coredump.conf +│ │ ├── [-rw-r--r-- 890] homed.conf +│ │ ├── [-rw-r--r-- 1.1K] journal-remote.conf +│ │ ├── [-rw-r--r-- 1.0K] journal-upload.conf +│ │ ├── [-rw-r--r-- 1.4K] journald.conf +│ │ ├── [-rw-r--r-- 1.6K] logind.conf +│ │ ├── [drwxr-xr-x 2] network +│ │ ├── [-rw-r--r-- 1.1K] networkd.conf +│ │ ├── [-rw-r--r-- 928] oomd.conf +│ │ ├── [-rw-r--r-- 879] pstore.conf +│ │ ├── [-rw-r--r-- 1.5K] resolved.conf +│ │ ├── [-rw-r--r-- 1.0K] sleep.conf +│ │ ├── [drwxr-xr-x 2] system +│ │ ├── [-rw-r--r-- 2.3K] system.conf +│ │ ├── [-rw-r--r-- 989] timesyncd.conf +│ │ ├── [drwxr-xr-x 2] user +│ │ └── [-rw-r--r-- 1.7K] user.conf +│ ├── [drwxr-xr-x 2] tmpfiles.d +│ ├── [drwxr-xr-x 6] udev +│ │ ├── [drwxr-xr-x 2] hwdb.d +│ │ ├── [-rw-r--r-- 865] iocost.conf +│ │ ├── [drwxr-xr-x 2] rules.d +│ │ └── [-rw-r--r-- 305] udev.conf +│ └── [drwxr-xr-x 3] xdg +│ └── [drwxr-xr-x 3] systemd +│ └── [lrwxrwxrwx 18] user -> ../../systemd/user +├── [drwxr-xr-x 7] usr +│ ├── [drwxr-xr-x 58] bin +│ │ ├── [-rwxr-xr-x 668K] bootctl +│ │ ├── [-rwxr-xr-x 91K] busctl +│ │ ├── [-rwxr-xr-x 79K] coredumpctl +│ │ ├── [-rwxr-xr-x 131K] homectl +│ │ ├── [-rwxr-xr-x 31K] hostnamectl +│ │ ├── [-rwxr-xr-x 1001K] journalctl +│ │ ├── [-rwxr-xr-x 55K] kernel-install +│ │ ├── [-rwxr-xr-x 27K] localectl +│ │ ├── [-rwxr-xr-x 63K] loginctl +│ │ ├── [-rwxr-xr-x 99K] machinectl +│ │ ├── [-rwxr-xr-x 814K] networkctl +│ │ ├── [-rwxr-xr-x 19K] oomctl +│ │ ├── [-rwxr-xr-x 466K] portablectl +│ │ ├── [-rwxr-xr-x 155K] resolvectl +│ │ ├── [-rwxr-xr-x 1.4M] systemctl +│ │ ├── [-rwxr-xr-x 15K] systemd-ac-power +│ │ ├── [-rwxr-xr-x 203K] systemd-analyze +│ │ ├── [-rwxr-xr-x 19K] systemd-ask-password +│ │ ├── [-rwxr-xr-x 19K] systemd-cat +│ │ ├── [-rwxr-xr-x 23K] systemd-cgls +│ │ ├── [-rwxr-xr-x 39K] systemd-cgtop +│ │ ├── [lrwxrwxrwx 14] systemd-confext -> systemd-sysext +│ │ ├── [-rwxr-xr-x 43K] systemd-creds +│ │ ├── [-rwxr-xr-x 67K] systemd-cryptenroll +│ │ ├── [-rwxr-xr-x 83K] systemd-cryptsetup +│ │ ├── [-rwxr-xr-x 27K] systemd-delta +│ │ ├── [-rwxr-xr-x 19K] systemd-detect-virt +│ │ ├── [-rwxr-xr-x 71K] systemd-dissect +│ │ ├── [-rwxr-xr-x 19K] systemd-escape +│ │ ├── [-rwxr-xr-x 155K] systemd-hwdb +│ │ ├── [-rwxr-xr-x 31K] systemd-id128 +│ │ ├── [-rwxr-xr-x 23K] systemd-inhibit +│ │ ├── [-rwxr-xr-x 19K] systemd-machine-id-setup +│ │ ├── [-rwxr-xr-x 51K] systemd-mount +│ │ ├── [-rwxr-xr-x 27K] systemd-notify +│ │ ├── [-rwxr-xr-x 352K] systemd-nspawn +│ │ ├── [-rwxr-xr-x 19K] systemd-path +│ │ ├── [-rwxr-xr-x 195K] systemd-repart +│ │ ├── [-rwxr-xr-x 868K] systemd-repart.standalone +│ │ ├── [lrwxrwxrwx 10] systemd-resolve -> resolvectl +│ │ ├── [-rwxr-xr-x 67K] systemd-run +│ │ ├── [-rwxr-xr-x 27K] systemd-socket-activate +│ │ ├── [-rwxr-xr-x 19K] systemd-stdio-bridge +│ │ ├── [-rwxr-xr-x 51K] systemd-sysext +│ │ ├── [-rwxr-xr-x 67K] systemd-sysusers +│ │ ├── [-rwxr-xr-x 243K] systemd-sysusers.standalone +│ │ ├── [-rwxr-xr-x 107K] systemd-tmpfiles +│ │ ├── [-rwxr-xr-x 327K] systemd-tmpfiles.standalone +│ │ ├── [-rwxr-xr-x 35K] systemd-tty-ask-password-agent +│ │ ├── [lrwxrwxrwx 13] systemd-umount -> systemd-mount +│ │ ├── [-rwxr-xr-x 43K] systemd-vmspawn +│ │ ├── [-rwxr-xr-x 47K] timedatectl +│ │ ├── [-rwxr-xr-x 1.4M] udevadm +│ │ ├── [-rwxr-xr-x 55K] ukify +│ │ ├── [-rwxr-xr-x 47K] userdbctl +│ │ └── [-rwxr-xr-x 31K] varlinkctl +│ ├── [drwxr-xr-x 4] include +│ │ ├── [-rw-r--r-- 9.6K] libudev.h +│ │ └── [drwxr-xr-x 16] systemd +│ │ ├── [-rw-r--r-- 3.7K] _sd-common.h +│ │ ├── [-rw-r--r-- 5.8K] sd-bus-protocol.h +│ │ ├── [-rw-r--r-- 23K] sd-bus-vtable.h +│ │ ├── [-rw-r--r-- 30K] sd-bus.h +│ │ ├── [-rw-r--r-- 14K] sd-daemon.h +│ │ ├── [-rw-r--r-- 8.3K] sd-device.h +│ │ ├── [-rw-r--r-- 8.9K] sd-event.h +│ │ ├── [-rw-r--r-- 25K] sd-gpt.h +│ │ ├── [-rw-r--r-- 1.6K] sd-hwdb.h +│ │ ├── [-rw-r--r-- 8.4K] sd-id128.h +│ │ ├── [-rw-r--r-- 8.5K] sd-journal.h +│ │ ├── [-rw-r--r-- 10K] sd-login.h +│ │ ├── [-rw-r--r-- 24K] sd-messages.h +│ │ └── [-rw-r--r-- 4.0K] sd-path.h +│ ├── [drwxr-xr-x 14] lib +│ │ ├── [drwxr-xr-x 2] binfmt.d +│ │ ├── [drwxr-xr-x 2] credstore +│ │ ├── [drwxr-xr-x 3] environment.d +│ │ │ └── [lrwxrwxrwx 24] 99-environment.conf -> ../../../etc/environment +│ │ ├── [drwxr-xr-x 4] kernel +│ │ │ ├── [-rw-r--r-- 407] install.conf +│ │ │ └── [drwxr-xr-x 6] install.d +│ │ │ ├── [-rwxr-xr-x 2.0K] 50-depmod.install +│ │ │ ├── [-rwxr-xr-x 8.5K] 60-ukify.install +│ │ │ ├── [-rwxr-xr-x 7.0K] 90-loaderentry.install +│ │ │ └── [-rwxr-xr-x 3.1K] 90-uki-copy.install +│ │ ├── [drwxr-xr-x 4] modprobe.d +│ │ │ ├── [-rw-r--r-- 306] README +│ │ │ └── [-rw-r--r-- 773] systemd.conf +│ │ ├── [drwxr-xr-x 12] pcrlock.d +│ │ │ ├── [-rw-r--r-- 494] 350-action-efi-application.pcrlock +│ │ │ ├── [drwxr-xr-x 4] 400-secureboot-separator.pcrlock.d +│ │ │ │ ├── [-rw-r--r-- 494] 300-0x00000000.pcrlock +│ │ │ │ └── [-rw-r--r-- 494] 600-0xffffffff.pcrlock +│ │ │ ├── [drwxr-xr-x 4] 500-separator.pcrlock.d +│ │ │ │ ├── [-rw-r--r-- 3.3K] 300-0x00000000.pcrlock +│ │ │ │ └── [-rw-r--r-- 3.3K] 600-0xffffffff.pcrlock +│ │ │ ├── [drwxr-xr-x 4] 700-action-efi-exit-boot-services.pcrlock.d +│ │ │ │ ├── [-rw-r--r-- 974] 300-present.pcrlock +│ │ │ │ └── [-rw-r--r-- 15] 600-absent.pcrlock +│ │ │ ├── [-rw-r--r-- 495] 750-enter-initrd.pcrlock +│ │ │ ├── [-rw-r--r-- 495] 800-leave-initrd.pcrlock +│ │ │ ├── [-rw-r--r-- 495] 850-sysinit.pcrlock +│ │ │ ├── [-rw-r--r-- 495] 900-ready.pcrlock +│ │ │ ├── [-rw-r--r-- 495] 950-shutdown.pcrlock +│ │ │ └── [-rw-r--r-- 495] 990-final.pcrlock +│ │ ├── [drwxr-xr-x 3] rpm +│ │ │ └── [drwxr-xr-x 3] macros.d +│ │ │ └── [-rw-r--r-- 6.9K] macros.systemd +│ │ ├── [drwxr-xr-x 6] sysctl.d +│ │ │ ├── [-rw-r--r-- 1.8K] 50-coredump.conf +│ │ │ ├── [-rw-r--r-- 1.9K] 50-default.conf +│ │ │ ├── [-rw-r--r-- 649] 50-pid-max.conf +│ │ │ └── [-rw-r--r-- 387] README +│ │ ├── [drwxr-xr-x 89] systemd +│ │ │ ├── [drwxr-xr-x 3] boot +│ │ │ │ └── [drwxr-xr-x 5] efi +│ │ │ │ ├── [-rw-r--r-- 2.0K] addonx64.efi.stub +│ │ │ │ ├── [-rw-r--r-- 69K] linuxx64.efi.stub +│ │ │ │ └── [-rw-r--r-- 97K] systemd-bootx64.efi +│ │ │ ├── [drwxr-xr-x 19] catalog +│ │ │ │ ├── [-rw-r--r-- 13K] systemd.be.catalog +│ │ │ │ ├── [-rw-r--r-- 10K] systemd.be@latin.catalog +│ │ │ │ ├── [-rw-r--r-- 29K] systemd.bg.catalog +│ │ │ │ ├── [-rw-r--r-- 28K] systemd.catalog +│ │ │ │ ├── [-rw-r--r-- 8.3K] systemd.da.catalog +│ │ │ │ ├── [-rw-r--r-- 748] systemd.de.catalog +│ │ │ │ ├── [-rw-r--r-- 14K] systemd.fr.catalog +│ │ │ │ ├── [-rw-r--r-- 11K] systemd.hr.catalog +│ │ │ │ ├── [-rw-r--r-- 8.7K] systemd.hu.catalog +│ │ │ │ ├── [-rw-r--r-- 16K] systemd.it.catalog +│ │ │ │ ├── [-rw-r--r-- 12K] systemd.ko.catalog +│ │ │ │ ├── [-rw-r--r-- 25K] systemd.pl.catalog +│ │ │ │ ├── [-rw-r--r-- 8.7K] systemd.pt_BR.catalog +│ │ │ │ ├── [-rw-r--r-- 21K] systemd.ru.catalog +│ │ │ │ ├── [-rw-r--r-- 11K] systemd.sr.catalog +│ │ │ │ ├── [-rw-r--r-- 7.7K] systemd.zh_CN.catalog +│ │ │ │ └── [-rw-r--r-- 7.7K] systemd.zh_TW.catalog +│ │ │ ├── [-rw-r--r-- 9.3K] import-pubring.gpg +│ │ │ ├── [drwxr-xr-x 14] network +│ │ │ │ ├── [-rw-r--r-- 819] 80-6rd-tunnel.network +│ │ │ │ ├── [-rw-r--r-- 719] 80-auto-link-local.network.example +│ │ │ │ ├── [-rw-r--r-- 947] 80-container-host0.network +│ │ │ │ ├── [-rw-r--r-- 940] 80-container-vb.network +│ │ │ │ ├── [-rw-r--r-- 1.0K] 80-container-ve.network +│ │ │ │ ├── [-rw-r--r-- 1023] 80-container-vz.network +│ │ │ │ ├── [-rw-r--r-- 984] 80-vm-vt.network +│ │ │ │ ├── [-rw-r--r-- 730] 80-wifi-adhoc.network +│ │ │ │ ├── [-rw-r--r-- 664] 80-wifi-ap.network.example +│ │ │ │ ├── [-rw-r--r-- 595] 80-wifi-station.network.example +│ │ │ │ ├── [-rw-r--r-- 636] 89-ethernet.network.example +│ │ │ │ └── [-rw-r--r-- 769] 99-default.link +│ │ │ ├── [drwxr-xr-x 3] ntp-units.d +│ │ │ │ └── [-rw-r--r-- 116] 80-systemd-timesync.list +│ │ │ ├── [drwxr-xr-x 3] portable +│ │ │ │ └── [drwxr-xr-x 6] profile +│ │ │ │ ├── [drwxr-xr-x 3] default +│ │ │ │ │ └── [-rw-r--r-- 1.0K] service.conf +│ │ │ │ ├── [drwxr-xr-x 3] nonetwork +│ │ │ │ │ └── [-rw-r--r-- 975] service.conf +│ │ │ │ ├── [drwxr-xr-x 3] strict +│ │ │ │ │ └── [-rw-r--r-- 712] service.conf +│ │ │ │ └── [drwxr-xr-x 3] trusted +│ │ │ │ └── [-rw-r--r-- 223] service.conf +│ │ │ ├── [drwxr-xr-x 3] repart +│ │ │ │ └── [drwxr-xr-x 5] definitions +│ │ │ │ ├── [drwxr-xr-x 5] confext.repart.d +│ │ │ │ │ ├── [-rw-r--r-- 437] 10-root.conf +│ │ │ │ │ ├── [-rw-r--r-- 415] 20-root-verity.conf +│ │ │ │ │ └── [-rw-r--r-- 410] 30-root-verity-sig.conf +│ │ │ │ ├── [drwxr-xr-x 5] portable.repart.d +│ │ │ │ │ ├── [-rw-r--r-- 433] 10-root.conf +│ │ │ │ │ ├── [-rw-r--r-- 415] 20-root-verity.conf +│ │ │ │ │ └── [-rw-r--r-- 410] 30-root-verity-sig.conf +│ │ │ │ └── [drwxr-xr-x 5] sysext.repart.d +│ │ │ │ ├── [-rw-r--r-- 453] 10-root.conf +│ │ │ │ ├── [-rw-r--r-- 415] 20-root-verity.conf +│ │ │ │ └── [-rw-r--r-- 410] 30-root-verity-sig.conf +│ │ │ ├── [-rw-r--r-- 710] resolv.conf +│ │ │ ├── [drwxr-xr-x 253] system +│ │ │ │ ├── [lrwxrwxrwx 14] autovt@.service -> getty@.service +│ │ │ │ ├── [-rw-r--r-- 927] basic.target +│ │ │ │ ├── [-rw-r--r-- 519] blockdev@.target +│ │ │ │ ├── [-rw-r--r-- 435] bluetooth.target +│ │ │ │ ├── [-rw-r--r-- 463] boot-complete.target +│ │ │ │ ├── [-rw-r--r-- 1.1K] console-getty.service +│ │ │ │ ├── [-rw-r--r-- 1.3K] container-getty@.service +│ │ │ │ ├── [-rw-r--r-- 473] cryptsetup-pre.target +│ │ │ │ ├── [-rw-r--r-- 420] cryptsetup.target +│ │ │ │ ├── [lrwxrwxrwx 13] ctrl-alt-del.target -> reboot.target +│ │ │ │ ├── [lrwxrwxrwx 25] dbus-org.freedesktop.hostname1.service -> systemd-hostnamed.service +│ │ │ │ ├── [lrwxrwxrwx 23] dbus-org.freedesktop.import1.service -> systemd-importd.service +│ │ │ │ ├── [lrwxrwxrwx 23] dbus-org.freedesktop.locale1.service -> systemd-localed.service +│ │ │ │ ├── [lrwxrwxrwx 22] dbus-org.freedesktop.login1.service -> systemd-logind.service +│ │ │ │ ├── [lrwxrwxrwx 24] dbus-org.freedesktop.machine1.service -> systemd-machined.service +│ │ │ │ ├── [lrwxrwxrwx 25] dbus-org.freedesktop.portable1.service -> systemd-portabled.service +│ │ │ │ ├── [lrwxrwxrwx 25] dbus-org.freedesktop.timedate1.service -> systemd-timedated.service +│ │ │ │ ├── [-rw-r--r-- 1.1K] debug-shell.service +│ │ │ │ ├── [lrwxrwxrwx 16] default.target -> graphical.target +│ │ │ │ ├── [-rw-r--r-- 775] dev-hugepages.mount +│ │ │ │ ├── [-rw-r--r-- 701] dev-mqueue.mount +│ │ │ │ ├── [-rw-r--r-- 813] emergency.service +│ │ │ │ ├── [-rw-r--r-- 479] emergency.target +│ │ │ │ ├── [-rw-r--r-- 549] exit.target +│ │ │ │ ├── [-rw-r--r-- 410] factory-reset.target +│ │ │ │ ├── [-rw-r--r-- 500] final.target +│ │ │ │ ├── [-rw-r--r-- 461] first-boot-complete.target +│ │ │ │ ├── [-rw-r--r-- 518] getty-pre.target +│ │ │ │ ├── [-rw-r--r-- 509] getty.target +│ │ │ │ ├── [-rw-r--r-- 2.0K] getty@.service +│ │ │ │ ├── [-rw-r--r-- 606] graphical.target +│ │ │ │ ├── [drwxr-xr-x 3] graphical.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 39] systemd-update-utmp-runlevel.service -> ../systemd-update-utmp-runlevel.service +│ │ │ │ ├── [-rw-r--r-- 542] halt.target +│ │ │ │ ├── [-rw-r--r-- 526] hibernate.target +│ │ │ │ ├── [-rw-r--r-- 538] hybrid-sleep.target +│ │ │ │ ├── [-rw-r--r-- 670] initrd-cleanup.service +│ │ │ │ ├── [-rw-r--r-- 598] initrd-fs.target +│ │ │ │ ├── [-rw-r--r-- 1.3K] initrd-parse-etc.service +│ │ │ │ ├── [-rw-r--r-- 566] initrd-root-device.target +│ │ │ │ ├── [drwxr-xr-x 4] initrd-root-device.target.wants +│ │ │ │ │ ├── [lrwxrwxrwx 27] remote-cryptsetup.target -> ../remote-cryptsetup.target +│ │ │ │ │ └── [lrwxrwxrwx 28] remote-veritysetup.target -> ../remote-veritysetup.target +│ │ │ │ ├── [-rw-r--r-- 571] initrd-root-fs.target +│ │ │ │ ├── [drwxr-xr-x 3] initrd-root-fs.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 25] systemd-repart.service -> ../systemd-repart.service +│ │ │ │ ├── [-rw-r--r-- 614] initrd-switch-root.service +│ │ │ │ ├── [-rw-r--r-- 779] initrd-switch-root.target +│ │ │ │ ├── [-rw-r--r-- 823] initrd-udevadm-cleanup-db.service +│ │ │ │ ├── [-rw-r--r-- 571] initrd-usr-fs.target +│ │ │ │ ├── [-rw-r--r-- 810] initrd.target +│ │ │ │ ├── [drwxr-xr-x 4] initrd.target.wants +│ │ │ │ │ ├── [lrwxrwxrwx 32] systemd-battery-check.service -> ../systemd-battery-check.service +│ │ │ │ │ └── [lrwxrwxrwx 34] systemd-pcrphase-initrd.service -> ../systemd-pcrphase-initrd.service +│ │ │ │ ├── [-rw-r--r-- 487] integritysetup-pre.target +│ │ │ │ ├── [-rw-r--r-- 430] integritysetup.target +│ │ │ │ ├── [-rw-r--r-- 549] kexec.target +│ │ │ │ ├── [-rw-r--r-- 756] ldconfig.service +│ │ │ │ ├── [-rw-r--r-- 453] local-fs-pre.target +│ │ │ │ ├── [-rw-r--r-- 556] local-fs.target +│ │ │ │ ├── [drwxr-xr-x 3] local-fs.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 12] tmp.mount -> ../tmp.mount +│ │ │ │ ├── [-rw-r--r-- 453] machine.slice +│ │ │ │ ├── [-rw-r--r-- 470] machines.target +│ │ │ │ ├── [drwxr-xr-x 3] machines.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 25] var-lib-machines.mount -> ../var-lib-machines.mount +│ │ │ │ ├── [-rw-r--r-- 573] modprobe@.service +│ │ │ │ ├── [-rw-r--r-- 540] multi-user.target +│ │ │ │ ├── [drwxr-xr-x 6] multi-user.target.wants +│ │ │ │ │ ├── [lrwxrwxrwx 15] getty.target -> ../getty.target +│ │ │ │ │ ├── [lrwxrwxrwx 33] systemd-ask-password-wall.path -> ../systemd-ask-password-wall.path +│ │ │ │ │ ├── [lrwxrwxrwx 25] systemd-logind.service -> ../systemd-logind.service +│ │ │ │ │ └── [lrwxrwxrwx 39] systemd-update-utmp-runlevel.service -> ../systemd-update-utmp-runlevel.service +│ │ │ │ ├── [-rw-r--r-- 483] network-online.target +│ │ │ │ ├── [-rw-r--r-- 490] network-pre.target +│ │ │ │ ├── [-rw-r--r-- 499] network.target +│ │ │ │ ├── [-rw-r--r-- 562] nss-lookup.target +│ │ │ │ ├── [-rw-r--r-- 521] nss-user-lookup.target +│ │ │ │ ├── [-rw-r--r-- 407] paths.target +│ │ │ │ ├── [-rw-r--r-- 607] poweroff.target +│ │ │ │ ├── [-rw-r--r-- 433] printer.target +│ │ │ │ ├── [-rw-r--r-- 789] proc-sys-fs-binfmt_misc.automount +│ │ │ │ ├── [-rw-r--r-- 711] proc-sys-fs-binfmt_misc.mount +│ │ │ │ ├── [-rw-r--r-- 626] quotaon.service +│ │ │ │ ├── [-rw-r--r-- 751] rc-local.service +│ │ │ │ ├── [-rw-r--r-- 598] reboot.target +│ │ │ │ ├── [-rw-r--r-- 557] remote-cryptsetup.target +│ │ │ │ ├── [-rw-r--r-- 454] remote-fs-pre.target +│ │ │ │ ├── [-rw-r--r-- 530] remote-fs.target +│ │ │ │ ├── [drwxr-xr-x 3] remote-fs.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 25] var-lib-machines.mount -> ../var-lib-machines.mount +│ │ │ │ ├── [-rw-r--r-- 565] remote-veritysetup.target +│ │ │ │ ├── [-rw-r--r-- 804] rescue.service +│ │ │ │ ├── [-rw-r--r-- 500] rescue.target +│ │ │ │ ├── [drwxr-xr-x 3] rescue.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 39] systemd-update-utmp-runlevel.service -> ../systemd-update-utmp-runlevel.service +│ │ │ │ ├── [-rw-r--r-- 548] rpcbind.target +│ │ │ │ ├── [lrwxrwxrwx 15] runlevel0.target -> poweroff.target +│ │ │ │ ├── [lrwxrwxrwx 13] runlevel1.target -> rescue.target +│ │ │ │ ├── [drwxr-xr-x 2] runlevel1.target.wants +│ │ │ │ ├── [lrwxrwxrwx 17] runlevel2.target -> multi-user.target +│ │ │ │ ├── [drwxr-xr-x 2] runlevel2.target.wants +│ │ │ │ ├── [lrwxrwxrwx 17] runlevel3.target -> multi-user.target +│ │ │ │ ├── [drwxr-xr-x 2] runlevel3.target.wants +│ │ │ │ ├── [lrwxrwxrwx 17] runlevel4.target -> multi-user.target +│ │ │ │ ├── [drwxr-xr-x 2] runlevel4.target.wants +│ │ │ │ ├── [lrwxrwxrwx 16] runlevel5.target -> graphical.target +│ │ │ │ ├── [drwxr-xr-x 2] runlevel5.target.wants +│ │ │ │ ├── [lrwxrwxrwx 13] runlevel6.target -> reboot.target +│ │ │ │ ├── [-rw-r--r-- 1.5K] serial-getty@.service +│ │ │ │ ├── [-rw-r--r-- 457] shutdown.target +│ │ │ │ ├── [-rw-r--r-- 410] sigpwr.target +│ │ │ │ ├── [-rw-r--r-- 468] sleep.target +│ │ │ │ ├── [-rw-r--r-- 462] slices.target +│ │ │ │ ├── [-rw-r--r-- 428] smartcard.target +│ │ │ │ ├── [-rw-r--r-- 409] sockets.target +│ │ │ │ ├── [drwxr-xr-x 10] sockets.target.wants +│ │ │ │ │ ├── [lrwxrwxrwx 26] systemd-coredump.socket -> ../systemd-coredump.socket +│ │ │ │ │ ├── [lrwxrwxrwx 25] systemd-initctl.socket -> ../systemd-initctl.socket +│ │ │ │ │ ├── [lrwxrwxrwx 34] systemd-journald-dev-log.socket -> ../systemd-journald-dev-log.socket +│ │ │ │ │ ├── [lrwxrwxrwx 26] systemd-journald.socket -> ../systemd-journald.socket +│ │ │ │ │ ├── [lrwxrwxrwx 27] systemd-pcrextend.socket -> ../systemd-pcrextend.socket +│ │ │ │ │ ├── [lrwxrwxrwx 24] systemd-sysext.socket -> ../systemd-sysext.socket +│ │ │ │ │ ├── [lrwxrwxrwx 31] systemd-udevd-control.socket -> ../systemd-udevd-control.socket +│ │ │ │ │ └── [lrwxrwxrwx 30] systemd-udevd-kernel.socket -> ../systemd-udevd-kernel.socket +│ │ │ │ ├── [-rw-r--r-- 586] soft-reboot.target +│ │ │ │ ├── [-rw-r--r-- 428] sound.target +│ │ │ │ ├── [-rw-r--r-- 943] storage-target-mode.target +│ │ │ │ ├── [-rw-r--r-- 585] suspend-then-hibernate.target +│ │ │ │ ├── [-rw-r--r-- 511] suspend.target +│ │ │ │ ├── [-rw-r--r-- 402] swap.target +│ │ │ │ ├── [-rw-r--r-- 1.1K] sys-fs-fuse-connections.mount +│ │ │ │ ├── [-rw-r--r-- 1.1K] sys-kernel-config.mount +│ │ │ │ ├── [-rw-r--r-- 730] sys-kernel-debug.mount +│ │ │ │ ├── [-rw-r--r-- 756] sys-kernel-tracing.mount +│ │ │ │ ├── [-rw-r--r-- 574] sysinit.target +│ │ │ │ ├── [drwxr-xr-x 37] sysinit.target.wants +│ │ │ │ │ ├── [lrwxrwxrwx 20] cryptsetup.target -> ../cryptsetup.target +│ │ │ │ │ ├── [lrwxrwxrwx 22] dev-hugepages.mount -> ../dev-hugepages.mount +│ │ │ │ │ ├── [lrwxrwxrwx 19] dev-mqueue.mount -> ../dev-mqueue.mount +│ │ │ │ │ ├── [lrwxrwxrwx 24] integritysetup.target -> ../integritysetup.target +│ │ │ │ │ ├── [lrwxrwxrwx 19] ldconfig.service -> ../ldconfig.service +│ │ │ │ │ ├── [lrwxrwxrwx 36] proc-sys-fs-binfmt_misc.automount -> ../proc-sys-fs-binfmt_misc.automount +│ │ │ │ │ ├── [lrwxrwxrwx 32] sys-fs-fuse-connections.mount -> ../sys-fs-fuse-connections.mount +│ │ │ │ │ ├── [lrwxrwxrwx 26] sys-kernel-config.mount -> ../sys-kernel-config.mount +│ │ │ │ │ ├── [lrwxrwxrwx 25] sys-kernel-debug.mount -> ../sys-kernel-debug.mount +│ │ │ │ │ ├── [lrwxrwxrwx 27] sys-kernel-tracing.mount -> ../sys-kernel-tracing.mount +│ │ │ │ │ ├── [lrwxrwxrwx 36] systemd-ask-password-console.path -> ../systemd-ask-password-console.path +│ │ │ │ │ ├── [lrwxrwxrwx 25] systemd-binfmt.service -> ../systemd-binfmt.service +│ │ │ │ │ ├── [lrwxrwxrwx 35] systemd-boot-random-seed.service -> ../systemd-boot-random-seed.service +│ │ │ │ │ ├── [lrwxrwxrwx 30] systemd-hwdb-update.service -> ../systemd-hwdb-update.service +│ │ │ │ │ ├── [lrwxrwxrwx 41] systemd-journal-catalog-update.service -> ../systemd-journal-catalog-update.service +│ │ │ │ │ ├── [lrwxrwxrwx 32] systemd-journal-flush.service -> ../systemd-journal-flush.service +│ │ │ │ │ ├── [lrwxrwxrwx 27] systemd-journald.service -> ../systemd-journald.service +│ │ │ │ │ ├── [lrwxrwxrwx 36] systemd-machine-id-commit.service -> ../systemd-machine-id-commit.service +│ │ │ │ │ ├── [lrwxrwxrwx 29] systemd-pcrmachine.service -> ../systemd-pcrmachine.service +│ │ │ │ │ ├── [lrwxrwxrwx 35] systemd-pcrphase-sysinit.service -> ../systemd-pcrphase-sysinit.service +│ │ │ │ │ ├── [lrwxrwxrwx 27] systemd-pcrphase.service -> ../systemd-pcrphase.service +│ │ │ │ │ ├── [lrwxrwxrwx 30] systemd-random-seed.service -> ../systemd-random-seed.service +│ │ │ │ │ ├── [lrwxrwxrwx 25] systemd-repart.service -> ../systemd-repart.service +│ │ │ │ │ ├── [lrwxrwxrwx 25] systemd-sysctl.service -> ../systemd-sysctl.service +│ │ │ │ │ ├── [lrwxrwxrwx 27] systemd-sysusers.service -> ../systemd-sysusers.service +│ │ │ │ │ ├── [lrwxrwxrwx 43] systemd-tmpfiles-setup-dev-early.service -> ../systemd-tmpfiles-setup-dev-early.service +│ │ │ │ │ ├── [lrwxrwxrwx 37] systemd-tmpfiles-setup-dev.service -> ../systemd-tmpfiles-setup-dev.service +│ │ │ │ │ ├── [lrwxrwxrwx 33] systemd-tmpfiles-setup.service -> ../systemd-tmpfiles-setup.service +│ │ │ │ │ ├── [lrwxrwxrwx 35] systemd-tpm2-setup-early.service -> ../systemd-tpm2-setup-early.service +│ │ │ │ │ ├── [lrwxrwxrwx 29] systemd-tpm2-setup.service -> ../systemd-tpm2-setup.service +│ │ │ │ │ ├── [lrwxrwxrwx 31] systemd-udev-trigger.service -> ../systemd-udev-trigger.service +│ │ │ │ │ ├── [lrwxrwxrwx 24] systemd-udevd.service -> ../systemd-udevd.service +│ │ │ │ │ ├── [lrwxrwxrwx 30] systemd-update-done.service -> ../systemd-update-done.service +│ │ │ │ │ ├── [lrwxrwxrwx 30] systemd-update-utmp.service -> ../systemd-update-utmp.service +│ │ │ │ │ └── [lrwxrwxrwx 21] veritysetup.target -> ../veritysetup.target +│ │ │ │ ├── [-rw-r--r-- 1.4K] syslog.socket +│ │ │ │ ├── [-rw-r--r-- 468] system-systemd\x2dcryptsetup.slice +│ │ │ │ ├── [-rw-r--r-- 463] system-systemd\x2dveritysetup.slice +│ │ │ │ ├── [-rw-r--r-- 1.5K] system-update-cleanup.service +│ │ │ │ ├── [-rw-r--r-- 551] system-update-pre.target +│ │ │ │ ├── [-rw-r--r-- 625] system-update.target +│ │ │ │ ├── [-rw-r--r-- 771] systemd-ask-password-console.path +│ │ │ │ ├── [-rw-r--r-- 834] systemd-ask-password-console.service +│ │ │ │ ├── [-rw-r--r-- 695] systemd-ask-password-wall.path +│ │ │ │ ├── [-rw-r--r-- 747] systemd-ask-password-wall.service +│ │ │ │ ├── [-rw-r--r-- 777] systemd-backlight@.service +│ │ │ │ ├── [-rw-r--r-- 856] systemd-battery-check.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-binfmt.service +│ │ │ │ ├── [-rw-r--r-- 690] systemd-bless-boot.service +│ │ │ │ ├── [-rw-r--r-- 730] systemd-boot-check-no-failures.service +│ │ │ │ ├── [-rw-r--r-- 1.0K] systemd-boot-random-seed.service +│ │ │ │ ├── [-rw-r--r-- 733] systemd-boot-update.service +│ │ │ │ ├── [-rw-r--r-- 1.0K] systemd-confext.service +│ │ │ │ ├── [-rw-r--r-- 617] systemd-coredump.socket +│ │ │ │ ├── [-rw-r--r-- 1.1K] systemd-coredump@.service +│ │ │ │ ├── [-rw-r--r-- 564] systemd-exit.service +│ │ │ │ ├── [-rw-r--r-- 724] systemd-fsck-root.service +│ │ │ │ ├── [-rw-r--r-- 712] systemd-fsck@.service +│ │ │ │ ├── [-rw-r--r-- 667] systemd-growfs-root.service +│ │ │ │ ├── [-rw-r--r-- 664] systemd-growfs@.service +│ │ │ │ ├── [-rw-r--r-- 562] systemd-halt.service +│ │ │ │ ├── [-rw-r--r-- 666] systemd-hibernate-resume.service +│ │ │ │ ├── [-rw-r--r-- 555] systemd-hibernate.service +│ │ │ │ ├── [-rw-r--r-- 645] systemd-homed-activate.service +│ │ │ │ ├── [-rw-r--r-- 1.4K] systemd-homed.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-hostnamed.service +│ │ │ │ ├── [-rw-r--r-- 834] systemd-hwdb-update.service +│ │ │ │ ├── [-rw-r--r-- 576] systemd-hybrid-sleep.service +│ │ │ │ ├── [-rw-r--r-- 1.0K] systemd-importd.service +│ │ │ │ ├── [-rw-r--r-- 578] systemd-initctl.service +│ │ │ │ ├── [-rw-r--r-- 553] systemd-initctl.socket +│ │ │ │ ├── [-rw-r--r-- 750] systemd-journal-catalog-update.service +│ │ │ │ ├── [-rw-r--r-- 827] systemd-journal-flush.service +│ │ │ │ ├── [-rw-r--r-- 1.1K] systemd-journal-gatewayd.service +│ │ │ │ ├── [-rw-r--r-- 500] systemd-journal-gatewayd.socket +│ │ │ │ ├── [-rw-r--r-- 1.3K] systemd-journal-remote.service +│ │ │ │ ├── [-rw-r--r-- 450] systemd-journal-remote.socket +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-journal-upload.service +│ │ │ │ ├── [-rw-r--r-- 724] systemd-journald-audit.socket +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-journald-dev-log.socket +│ │ │ │ ├── [-rw-r--r-- 605] systemd-journald-varlink@.socket +│ │ │ │ ├── [-rw-r--r-- 2.4K] systemd-journald.service +│ │ │ │ ├── [-rw-r--r-- 934] systemd-journald.socket +│ │ │ │ ├── [-rw-r--r-- 1.6K] systemd-journald@.service +│ │ │ │ ├── [-rw-r--r-- 746] systemd-journald@.socket +│ │ │ │ ├── [-rw-r--r-- 569] systemd-kexec.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-localed.service +│ │ │ │ ├── [-rw-r--r-- 2.0K] systemd-logind.service +│ │ │ │ ├── [-rw-r--r-- 748] systemd-machine-id-commit.service +│ │ │ │ ├── [-rw-r--r-- 1.3K] systemd-machined.service +│ │ │ │ ├── [-rw-r--r-- 792] systemd-network-generator.service +│ │ │ │ ├── [-rw-r--r-- 785] systemd-networkd-wait-online.service +│ │ │ │ ├── [-rw-r--r-- 804] systemd-networkd-wait-online@.service +│ │ │ │ ├── [-rw-r--r-- 2.4K] systemd-networkd.service +│ │ │ │ ├── [-rw-r--r-- 682] systemd-networkd.socket +│ │ │ │ ├── [-rw-r--r-- 1.6K] systemd-nspawn@.service +│ │ │ │ ├── [-rw-r--r-- 1.7K] systemd-oomd.service +│ │ │ │ ├── [-rw-r--r-- 838] systemd-oomd.socket +│ │ │ │ ├── [-rw-r--r-- 649] systemd-pcrextend.socket +│ │ │ │ ├── [-rw-r--r-- 650] systemd-pcrextend@.service +│ │ │ │ ├── [-rw-r--r-- 738] systemd-pcrfs-root.service +│ │ │ │ ├── [-rw-r--r-- 762] systemd-pcrfs@.service +│ │ │ │ ├── [-rw-r--r-- 767] systemd-pcrlock-file-system.service +│ │ │ │ ├── [-rw-r--r-- 803] systemd-pcrlock-firmware-code.service +│ │ │ │ ├── [-rw-r--r-- 814] systemd-pcrlock-firmware-config.service +│ │ │ │ ├── [-rw-r--r-- 764] systemd-pcrlock-machine-id.service +│ │ │ │ ├── [-rw-r--r-- 758] systemd-pcrlock-make-policy.service +│ │ │ │ ├── [-rw-r--r-- 822] systemd-pcrlock-secureboot-authority.service +│ │ │ │ ├── [-rw-r--r-- 816] systemd-pcrlock-secureboot-policy.service +│ │ │ │ ├── [-rw-r--r-- 711] systemd-pcrmachine.service +│ │ │ │ ├── [-rw-r--r-- 892] systemd-pcrphase-initrd.service +│ │ │ │ ├── [-rw-r--r-- 794] systemd-pcrphase-sysinit.service +│ │ │ │ ├── [-rw-r--r-- 756] systemd-pcrphase.service +│ │ │ │ ├── [-rw-r--r-- 1.0K] systemd-portabled.service +│ │ │ │ ├── [-rw-r--r-- 575] systemd-poweroff.service +│ │ │ │ ├── [-rw-r--r-- 815] systemd-pstore.service +│ │ │ │ ├── [-rw-r--r-- 683] systemd-quotacheck.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-random-seed.service +│ │ │ │ ├── [-rw-r--r-- 568] systemd-reboot.service +│ │ │ │ ├── [-rw-r--r-- 787] systemd-remount-fs.service +│ │ │ │ ├── [-rw-r--r-- 1.3K] systemd-repart.service +│ │ │ │ ├── [-rw-r--r-- 1.8K] systemd-resolved.service +│ │ │ │ ├── [-rw-r--r-- 771] systemd-rfkill.service +│ │ │ │ ├── [-rw-r--r-- 776] systemd-rfkill.socket +│ │ │ │ ├── [-rw-r--r-- 588] systemd-soft-reboot.service +│ │ │ │ ├── [-rw-r--r-- 920] systemd-storagetm.service +│ │ │ │ ├── [-rw-r--r-- 623] systemd-suspend-then-hibernate.service +│ │ │ │ ├── [-rw-r--r-- 556] systemd-suspend.service +│ │ │ │ ├── [-rw-r--r-- 731] systemd-sysctl.service +│ │ │ │ ├── [-rw-r--r-- 1.0K] systemd-sysext.service +│ │ │ │ ├── [-rw-r--r-- 683] systemd-sysext.socket +│ │ │ │ ├── [-rw-r--r-- 657] systemd-sysext@.service +│ │ │ │ ├── [-rw-r--r-- 1.3K] systemd-sysusers.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-time-wait-sync.service +│ │ │ │ ├── [-rw-r--r-- 1.1K] systemd-timedated.service +│ │ │ │ ├── [-rw-r--r-- 1.7K] systemd-timesyncd.service +│ │ │ │ ├── [-rw-r--r-- 747] systemd-tmpfiles-clean.service +│ │ │ │ ├── [-rw-r--r-- 539] systemd-tmpfiles-clean.timer +│ │ │ │ ├── [-rw-r--r-- 852] systemd-tmpfiles-setup-dev-early.service +│ │ │ │ ├── [-rw-r--r-- 877] systemd-tmpfiles-setup-dev.service +│ │ │ │ ├── [-rw-r--r-- 1005] systemd-tmpfiles-setup.service +│ │ │ │ ├── [-rw-r--r-- 708] systemd-tpm2-setup-early.service +│ │ │ │ ├── [-rw-r--r-- 796] systemd-tpm2-setup.service +│ │ │ │ ├── [-rw-r--r-- 863] systemd-udev-settle.service +│ │ │ │ ├── [-rw-r--r-- 758] systemd-udev-trigger.service +│ │ │ │ ├── [-rw-r--r-- 650] systemd-udevd-control.socket +│ │ │ │ ├── [-rw-r--r-- 624] systemd-udevd-kernel.socket +│ │ │ │ ├── [-rw-r--r-- 1.3K] systemd-udevd.service +│ │ │ │ ├── [-rw-r--r-- 682] systemd-update-done.service +│ │ │ │ ├── [-rw-r--r-- 849] systemd-update-utmp-runlevel.service +│ │ │ │ ├── [-rw-r--r-- 856] systemd-update-utmp.service +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-userdbd.service +│ │ │ │ ├── [-rw-r--r-- 691] systemd-userdbd.socket +│ │ │ │ ├── [-rw-r--r-- 1.2K] systemd-vconsole-setup.service +│ │ │ │ ├── [-rw-r--r-- 743] systemd-volatile-root.service +│ │ │ │ ├── [-rw-r--r-- 434] time-set.target +│ │ │ │ ├── [-rw-r--r-- 487] time-sync.target +│ │ │ │ ├── [-rw-r--r-- 458] timers.target +│ │ │ │ ├── [drwxr-xr-x 3] timers.target.wants +│ │ │ │ │ └── [lrwxrwxrwx 31] systemd-tmpfiles-clean.timer -> ../systemd-tmpfiles-clean.timer +│ │ │ │ ├── [-rw-r--r-- 798] tmp.mount +│ │ │ │ ├── [-rw-r--r-- 465] umount.target +│ │ │ │ ├── [-rw-r--r-- 426] usb-gadget.target +│ │ │ │ ├── [drwxr-xr-x 3] user-.slice.d +│ │ │ │ │ └── [-rw-r--r-- 458] 10-defaults.conf +│ │ │ │ ├── [-rw-r--r-- 674] user-runtime-dir@.service +│ │ │ │ ├── [-rw-r--r-- 440] user.slice +│ │ │ │ ├── [-rw-r--r-- 833] user@.service +│ │ │ │ ├── [drwxr-xr-x 3] user@.service.d +│ │ │ │ │ └── [-rw-r--r-- 605] 10-login-barrier.conf +│ │ │ │ ├── [drwxr-xr-x 3] user@0.service.d +│ │ │ │ │ └── [-rw-r--r-- 548] 10-login-barrier.conf +│ │ │ │ ├── [-rw-r--r-- 807] var-lib-machines.mount +│ │ │ │ ├── [-rw-r--r-- 481] veritysetup-pre.target +│ │ │ │ └── [-rw-r--r-- 427] veritysetup.target +│ │ │ ├── [drwxr-xr-x 15] system-generators +│ │ │ │ ├── [-rwxr-xr-x 75K] systemd-bless-boot-generator +│ │ │ │ ├── [-rwxr-xr-x 35K] systemd-cryptsetup-generator +│ │ │ │ ├── [-rwxr-xr-x 15K] systemd-debug-generator +│ │ │ │ ├── [-rwxr-xr-x 51K] systemd-fstab-generator +│ │ │ │ ├── [-rwxr-xr-x 23K] systemd-getty-generator +│ │ │ │ ├── [-rwxr-xr-x 35K] systemd-gpt-auto-generator +│ │ │ │ ├── [-rwxr-xr-x 27K] systemd-hibernate-resume-generator +│ │ │ │ ├── [-rwxr-xr-x 23K] systemd-integritysetup-generator +│ │ │ │ ├── [-rwxr-xr-x 15K] systemd-rc-local-generator +│ │ │ │ ├── [-rwxr-xr-x 15K] systemd-run-generator +│ │ │ │ ├── [-rwxr-xr-x 15K] systemd-system-update-generator +│ │ │ │ ├── [-rwxr-xr-x 31K] systemd-sysv-generator +│ │ │ │ └── [-rwxr-xr-x 31K] systemd-veritysetup-generator +│ │ │ ├── [drwxr-xr-x 3] system-preset +│ │ │ │ └── [-rw-r--r-- 1.4K] 90-systemd.preset +│ │ │ ├── [drwxr-xr-x 2] system-shutdown +│ │ │ ├── [drwxr-xr-x 2] system-sleep +│ │ │ ├── [-rwxr-xr-x 95K] systemd +│ │ │ ├── [-rwxr-xr-x 35K] systemd-backlight +│ │ │ ├── [-rwxr-xr-x 19K] systemd-battery-check +│ │ │ ├── [-rwxr-xr-x 23K] systemd-binfmt +│ │ │ ├── [-rwxr-xr-x 167K] systemd-bless-boot +│ │ │ ├── [-rwxr-xr-x 15K] systemd-boot-check-no-failures +│ │ │ ├── [-rwxr-xr-x 15K] systemd-cgroups-agent +│ │ │ ├── [-rwxr-xr-x 83K] systemd-coredump +│ │ │ ├── [lrwxrwxrwx 28] systemd-cryptsetup -> ../../bin/systemd-cryptsetup +│ │ │ ├── [-rwxr-xr-x 127K] systemd-executor +│ │ │ ├── [-rwxr-xr-x 39K] systemd-export +│ │ │ ├── [-rwxr-xr-x 27K] systemd-fsck +│ │ │ ├── [-rwxr-xr-x 23K] systemd-growfs +│ │ │ ├── [-rwxr-xr-x 27K] systemd-hibernate-resume +│ │ │ ├── [-rwxr-xr-x 207K] systemd-homed +│ │ │ ├── [-rwxr-xr-x 199K] systemd-homework +│ │ │ ├── [-rwxr-xr-x 47K] systemd-hostnamed +│ │ │ ├── [-rwxr-xr-x 51K] systemd-import +│ │ │ ├── [-rwxr-xr-x 31K] systemd-import-fs +│ │ │ ├── [-rwxr-xr-x 43K] systemd-importd +│ │ │ ├── [-rwxr-xr-x 23K] systemd-initctl +│ │ │ ├── [-rwxr-xr-x 23K] systemd-integritysetup +│ │ │ ├── [-rwxr-xr-x 43K] systemd-journal-gatewayd +│ │ │ ├── [-rwxr-xr-x 63K] systemd-journal-remote +│ │ │ ├── [-rwxr-xr-x 43K] systemd-journal-upload +│ │ │ ├── [-rwxr-xr-x 189K] systemd-journald +│ │ │ ├── [-rwxr-xr-x 47K] systemd-localed +│ │ │ ├── [-rwxr-xr-x 279K] systemd-logind +│ │ │ ├── [-rwxr-xr-x 127K] systemd-machined +│ │ │ ├── [-rwxr-xr-x 15K] systemd-makefs +│ │ │ ├── [-rwxr-xr-x 47K] systemd-measure +│ │ │ ├── [-rwxr-xr-x 135K] systemd-network-generator +│ │ │ ├── [-rwxr-xr-x 2.5M] systemd-networkd +│ │ │ ├── [-rwxr-xr-x 239K] systemd-networkd-wait-online +│ │ │ ├── [-rwxr-xr-x 59K] systemd-oomd +│ │ │ ├── [-rwxr-xr-x 27K] systemd-pcrextend +│ │ │ ├── [-rwxr-xr-x 135K] systemd-pcrlock +│ │ │ ├── [-rwxr-xr-x 762K] systemd-portabled +│ │ │ ├── [-rwxr-xr-x 23K] systemd-pstore +│ │ │ ├── [-rwxr-xr-x 103K] systemd-pull +│ │ │ ├── [-rwxr-xr-x 15K] systemd-quotacheck +│ │ │ ├── [-rwxr-xr-x 27K] systemd-random-seed +│ │ │ ├── [-rwxr-xr-x 19K] systemd-remount-fs +│ │ │ ├── [-rwxr-xr-x 15K] systemd-reply-password +│ │ │ ├── [-rwxr-xr-x 519K] systemd-resolved +│ │ │ ├── [-rwxr-xr-x 23K] systemd-rfkill +│ │ │ ├── [-rwxr-xr-x 55K] systemd-shutdown +│ │ │ ├── [-rwxr-xr-x 327K] systemd-shutdown.standalone +│ │ │ ├── [-rwxr-xr-x 47K] systemd-sleep +│ │ │ ├── [-rwxr-xr-x 31K] systemd-socket-proxyd +│ │ │ ├── [-rwxr-xr-x 51K] systemd-storagetm +│ │ │ ├── [-rwxr-xr-x 19K] systemd-sulogin-shell +│ │ │ ├── [-rwxr-xr-x 23K] systemd-sysctl +│ │ │ ├── [lrwxrwxrwx 41] systemd-sysroot-fstab-check -> system-generators/systemd-fstab-generator +│ │ │ ├── [-rwxr-xr-x 91K] systemd-time-wait-sync +│ │ │ ├── [-rwxr-xr-x 43K] systemd-timedated +│ │ │ ├── [-rwxr-xr-x 506K] systemd-timesyncd +│ │ │ ├── [-rwxr-xr-x 27K] systemd-tpm2-setup +│ │ │ ├── [lrwxrwxrwx 17] systemd-udevd -> ../../bin/udevadm +│ │ │ ├── [-rwxr-xr-x 15K] systemd-update-done +│ │ │ ├── [-rwxr-xr-x 3.8K] systemd-update-helper +│ │ │ ├── [-rwxr-xr-x 19K] systemd-update-utmp +│ │ │ ├── [-rwxr-xr-x 23K] systemd-user-runtime-dir +│ │ │ ├── [-rwxr-xr-x 27K] systemd-userdbd +│ │ │ ├── [-rwxr-xr-x 31K] systemd-userwork +│ │ │ ├── [-rwxr-xr-x 27K] systemd-vconsole-setup +│ │ │ ├── [-rwxr-xr-x 27K] systemd-veritysetup +│ │ │ ├── [-rwxr-xr-x 23K] systemd-volatile-root +│ │ │ ├── [-rwxr-xr-x 15K] systemd-xdg-autostart-condition +│ │ │ ├── [lrwxrwxrwx 15] ukify -> ../../bin/ukify +│ │ │ ├── [drwxr-xr-x 23] user +│ │ │ │ ├── [-rw-r--r-- 442] app.slice +│ │ │ │ ├── [-rw-r--r-- 446] background.slice +│ │ │ │ ├── [-rw-r--r-- 505] basic.target +│ │ │ │ ├── [-rw-r--r-- 427] bluetooth.target +│ │ │ │ ├── [-rw-r--r-- 471] default.target +│ │ │ │ ├── [-rw-r--r-- 510] exit.target +│ │ │ │ ├── [-rw-r--r-- 576] graphical-session-pre.target +│ │ │ │ ├── [-rw-r--r-- 492] graphical-session.target +│ │ │ │ ├── [-rw-r--r-- 402] paths.target +│ │ │ │ ├── [-rw-r--r-- 425] printer.target +│ │ │ │ ├── [-rw-r--r-- 443] session.slice +│ │ │ │ ├── [-rw-r--r-- 450] shutdown.target +│ │ │ │ ├── [-rw-r--r-- 428] smartcard.target +│ │ │ │ ├── [-rw-r--r-- 404] sockets.target +│ │ │ │ ├── [-rw-r--r-- 428] sound.target +│ │ │ │ ├── [-rw-r--r-- 598] systemd-exit.service +│ │ │ │ ├── [-rw-r--r-- 688] systemd-tmpfiles-clean.service +│ │ │ │ ├── [-rw-r--r-- 541] systemd-tmpfiles-clean.timer +│ │ │ │ ├── [-rw-r--r-- 728] systemd-tmpfiles-setup.service +│ │ │ │ ├── [-rw-r--r-- 453] timers.target +│ │ │ │ └── [-rw-r--r-- 477] xdg-desktop-autostart.target +│ │ │ ├── [drwxr-xr-x 3] user-environment-generators +│ │ │ │ └── [-rwxr-xr-x 15K] 30-systemd-environment-d-generator +│ │ │ ├── [drwxr-xr-x 3] user-generators +│ │ │ │ └── [-rwxr-xr-x 31K] systemd-xdg-autostart-generator +│ │ │ └── [drwxr-xr-x 3] user-preset +│ │ │ └── [-rw-r--r-- 580] 90-systemd.preset +│ │ ├── [drwxr-xr-x 11] sysusers.d +│ │ │ ├── [-rw-r--r-- 359] README +│ │ │ ├── [-rw-r--r-- 1.3K] basic.conf +│ │ │ ├── [-rw-r--r-- 335] systemd-coredump.conf +│ │ │ ├── [-rw-r--r-- 314] systemd-journal.conf +│ │ │ ├── [-rw-r--r-- 341] systemd-network.conf +│ │ │ ├── [-rw-r--r-- 339] systemd-oom.conf +│ │ │ ├── [-rw-r--r-- 398] systemd-remote.conf +│ │ │ ├── [-rw-r--r-- 331] systemd-resolve.conf +│ │ │ └── [-rw-r--r-- 344] systemd-timesync.conf +│ │ ├── [drwxr-xr-x 20] tmpfiles.d +│ │ │ ├── [-rw-r--r-- 400] README +│ │ │ ├── [-rw-r--r-- 473] credstore.conf +│ │ │ ├── [-rw-r--r-- 524] etc.conf +│ │ │ ├── [-rw-r--r-- 362] home.conf +│ │ │ ├── [-rw-r--r-- 1.1K] journal-nocow.conf +│ │ │ ├── [-rw-r--r-- 841] legacy.conf +│ │ │ ├── [-rw-r--r-- 104] portables.conf +│ │ │ ├── [-rw-r--r-- 851] provision.conf +│ │ │ ├── [-rw-r--r-- 798] static-nodes-permissions.conf +│ │ │ ├── [-rw-r--r-- 583] systemd-network.conf +│ │ │ ├── [-rw-r--r-- 976] systemd-nspawn.conf +│ │ │ ├── [-rw-r--r-- 1.5K] systemd-pstore.conf +│ │ │ ├── [-rw-r--r-- 393] systemd-resolve.conf +│ │ │ ├── [-rw-r--r-- 823] systemd-tmp.conf +│ │ │ ├── [-rw-r--r-- 1.7K] systemd.conf +│ │ │ ├── [-rw-r--r-- 449] tmp.conf +│ │ │ ├── [-rw-r--r-- 568] var.conf +│ │ │ └── [-rw-r--r-- 617] x11.conf +│ │ └── [drwxr-xr-x 12] udev +│ │ ├── [-rwxr-xr-x 79K] ata_id +│ │ ├── [-rwxr-xr-x 91K] cdrom_id +│ │ ├── [-rwxr-xr-x 87K] dmi_memory_id +│ │ ├── [-rwxr-xr-x 135K] fido_id +│ │ ├── [drwxr-xr-x 33] hwdb.d +│ │ │ ├── [-rw-r--r-- 2.5M] 20-OUI.hwdb +│ │ │ ├── [-rw-r--r-- 151K] 20-acpi-vendor.hwdb +│ │ │ ├── [-rw-r--r-- 137K] 20-bluetooth-vendor-product.hwdb +│ │ │ ├── [-rw-r--r-- 832] 20-dmi-id.hwdb +│ │ │ ├── [-rw-r--r-- 111] 20-net-ifname.hwdb +│ │ │ ├── [-rw-r--r-- 16K] 20-pci-classes.hwdb +│ │ │ ├── [-rw-r--r-- 3.5M] 20-pci-vendor-model.hwdb +│ │ │ ├── [-rw-r--r-- 783] 20-sdio-classes.hwdb +│ │ │ ├── [-rw-r--r-- 4.1K] 20-sdio-vendor-model.hwdb +│ │ │ ├── [-rw-r--r-- 8.8K] 20-usb-classes.hwdb +│ │ │ ├── [-rw-r--r-- 1.4M] 20-usb-vendor-model.hwdb +│ │ │ ├── [-rw-r--r-- 1.8K] 20-vmbus-class.hwdb +│ │ │ ├── [-rw-r--r-- 2.7K] 60-autosuspend-chromiumos.hwdb +│ │ │ ├── [-rw-r--r-- 6.7K] 60-autosuspend-fingerprint-reader.hwdb +│ │ │ ├── [-rw-r--r-- 2.6K] 60-autosuspend.hwdb +│ │ │ ├── [-rw-r--r-- 25K] 60-evdev.hwdb +│ │ │ ├── [-rw-r--r-- 2.5K] 60-input-id.hwdb +│ │ │ ├── [-rw-r--r-- 97K] 60-keyboard.hwdb +│ │ │ ├── [-rw-r--r-- 1.1K] 60-seat.hwdb +│ │ │ ├── [-rw-r--r-- 45K] 60-sensor.hwdb +│ │ │ ├── [-rw-r--r-- 1.2K] 70-analyzers.hwdb +│ │ │ ├── [-rw-r--r-- 2.9K] 70-av-production.hwdb +│ │ │ ├── [-rw-r--r-- 679] 70-cameras.hwdb +│ │ │ ├── [-rw-r--r-- 1.5K] 70-joystick.hwdb +│ │ │ ├── [-rw-r--r-- 25K] 70-mouse.hwdb +│ │ │ ├── [-rw-r--r-- 926] 70-pda.hwdb +│ │ │ ├── [-rw-r--r-- 6.1K] 70-pointingstick.hwdb +│ │ │ ├── [-rw-r--r-- 700] 70-sound-card.hwdb +│ │ │ ├── [-rw-r--r-- 2.0K] 70-touchpad.hwdb +│ │ │ ├── [-rw-r--r-- 49K] 80-ieee1394-unit-function.hwdb +│ │ │ └── [-rw-r--r-- 518] README +│ │ ├── [-rwxr-xr-x 151K] iocost +│ │ ├── [-rwxr-xr-x 35K] mtd_probe +│ │ ├── [drwxr-xr-x 38] rules.d +│ │ │ ├── [-rw-r--r-- 5.2K] 50-udev-default.rules +│ │ │ ├── [-rw-r--r-- 704] 60-autosuspend.rules +│ │ │ ├── [-rw-r--r-- 703] 60-block.rules +│ │ │ ├── [-rw-r--r-- 1.0K] 60-cdrom_id.rules +│ │ │ ├── [-rw-r--r-- 637] 60-dmi-id.rules +│ │ │ ├── [-rw-r--r-- 834] 60-drm.rules +│ │ │ ├── [-rw-r--r-- 1.1K] 60-evdev.rules +│ │ │ ├── [-rw-r--r-- 491] 60-fido-id.rules +│ │ │ ├── [-rw-r--r-- 379] 60-infiniband.rules +│ │ │ ├── [-rw-r--r-- 282] 60-input-id.rules +│ │ │ ├── [-rw-r--r-- 727] 60-persistent-alsa.rules +│ │ │ ├── [-rw-r--r-- 3.2K] 60-persistent-input.rules +│ │ │ ├── [-rw-r--r-- 411] 60-persistent-storage-mtd.rules +│ │ │ ├── [-rw-r--r-- 2.5K] 60-persistent-storage-tape.rules +│ │ │ ├── [-rw-r--r-- 9.2K] 60-persistent-storage.rules +│ │ │ ├── [-rw-r--r-- 1.1K] 60-persistent-v4l.rules +│ │ │ ├── [-rw-r--r-- 1.6K] 60-sensor.rules +│ │ │ ├── [-rw-r--r-- 1.4K] 60-serial.rules +│ │ │ ├── [-rw-r--r-- 616] 64-btrfs.rules +│ │ │ ├── [-rw-r--r-- 280] 70-camera.rules +│ │ │ ├── [-rw-r--r-- 432] 70-joystick.rules +│ │ │ ├── [-rw-r--r-- 184] 70-memory.rules +│ │ │ ├── [-rw-r--r-- 734] 70-mouse.rules +│ │ │ ├── [-rw-r--r-- 576] 70-power-switch.rules +│ │ │ ├── [-rw-r--r-- 473] 70-touchpad.rules +│ │ │ ├── [-rw-r--r-- 3.7K] 71-seat.rules +│ │ │ ├── [-rw-r--r-- 587] 73-seat-late.rules +│ │ │ ├── [-rw-r--r-- 452] 75-net-description.rules +│ │ │ ├── [-rw-r--r-- 174] 75-probe_mtd.rules +│ │ │ ├── [-rw-r--r-- 4.7K] 78-sound-card.rules +│ │ │ ├── [-rw-r--r-- 295] 80-net-setup-link.rules +│ │ │ ├── [-rw-r--r-- 528] 81-net-dhcp.rules +│ │ │ ├── [-rw-r--r-- 769] 90-iocost.rules +│ │ │ ├── [-rw-r--r-- 518] 90-vconsole.rules +│ │ │ ├── [-rw-r--r-- 5.0K] 99-systemd.rules +│ │ │ └── [-rw-r--r-- 435] README +│ │ ├── [-rwxr-xr-x 92K] scsi_id +│ │ └── [-rwxr-xr-x 35K] v4l_id +│ ├── [drwxr-xr-x 15] lib64 +│ │ ├── [drwxr-xr-x 5] cryptsetup +│ │ │ ├── [-rwxr-xr-x 18K] libcryptsetup-token-systemd-fido2.so +│ │ │ ├── [-rwxr-xr-x 18K] libcryptsetup-token-systemd-pkcs11.so +│ │ │ └── [-rwxr-xr-x 22K] libcryptsetup-token-systemd-tpm2.so +│ │ ├── [-rwxr-xr-x 159K] libnss_myhostname.so.2 +│ │ ├── [-rwxr-xr-x 341K] libnss_mymachines.so.2 +│ │ ├── [-rwxr-xr-x 167K] libnss_resolve.so.2 +│ │ ├── [-rwxr-xr-x 367K] libnss_systemd.so.2 +│ │ ├── [lrwxrwxrwx 15] libsystemd.so -> libsystemd.so.0 +│ │ ├── [lrwxrwxrwx 20] libsystemd.so.0 -> libsystemd.so.0.38.0 +│ │ ├── [-rwxr-xr-x 902K] libsystemd.so.0.38.0 +│ │ ├── [lrwxrwxrwx 12] libudev.so -> libudev.so.1 +│ │ ├── [lrwxrwxrwx 16] libudev.so.1 -> libudev.so.1.7.8 +│ │ ├── [-rwxr-xr-x 199K] libudev.so.1.7.8 +│ │ ├── [drwxr-xr-x 4] pkgconfig +│ │ │ ├── [-rw-r--r-- 545] libsystemd.pc +│ │ │ └── [-rw-r--r-- 571] libudev.pc +│ │ └── [drwxr-xr-x 4] systemd +│ │ ├── [-rwxr-xr-x 2.1M] libsystemd-core-255.so +│ │ └── [-rwxr-xr-x 3.7M] libsystemd-shared-255.so +│ └── [drwxr-xr-x 11] sbin +│ ├── [lrwxrwxrwx 16] halt -> ../bin/systemctl +│ ├── [lrwxrwxrwx 22] init -> ../lib/systemd/systemd +│ ├── [lrwxrwxrwx 22] mount.ddi -> ../bin/systemd-dissect +│ ├── [lrwxrwxrwx 16] poweroff -> ../bin/systemctl +│ ├── [lrwxrwxrwx 16] reboot -> ../bin/systemctl +│ ├── [lrwxrwxrwx 17] resolvconf -> ../bin/resolvectl +│ ├── [lrwxrwxrwx 16] runlevel -> ../bin/systemctl +│ ├── [lrwxrwxrwx 16] shutdown -> ../bin/systemctl +│ └── [lrwxrwxrwx 16] telinit -> ../bin/systemctl +└── [drwxr-xr-x 3] var + └── [drwxr-xr-x 3] lib + └── [drwxr-xr-x 2] systemd + +101 directories, 692 files +``` diff --git a/images/packages/systemd/werf.inc.yaml b/images/packages/systemd/werf.inc.yaml new file mode 100644 index 0000000000..cfb32d880b --- /dev/null +++ b/images/packages/systemd/werf.inc.yaml @@ -0,0 +1,212 @@ +--- +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }} +final: false +fromImage: builder/scratch +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder + add: /out + to: /{{ .ImageName }} + before: setup + includePaths: + - usr/lib64 + - usr/include + +--- +{{- $version := get .PackageVersion .ImageName }} +{{- $gitRepoUrl := "systemd/systemd.git" }} + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact +final: false +fromImage: builder/src +secrets: +- id: SOURCE_REPO + value: {{ $.SOURCE_REPO_GIT }} +shell: + install: + - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src +--- + +{{- $name := print $.ImageName "-dependencies" -}} +{{- define "$name" -}} +altPackages: +- gcc gcc-c++ +- make autoconf automake libtool pkgconfig +- meson ninja-build +- gettext-devel +- git +- gperf libpasswdqc-devel libseccomp-devel libmicrohttpd-devel +- libcryptsetup-devel libiptables-devel libgpg-error-devel +- libfido2-devel libtpm2-tss-devel libdw-devel python3-module-jinja2 +- python3-module-elftools +- tree +packages: +- xz lz4 zlib zstd bzip2 openssl p11-kit libcurl libidn2 gnutls selinux libgcrypt +- libcap util-linux swtpm libunistring libssh2 +{{- end -}} + +{{ $builderDependencies := include "$name" . | fromYaml }} + +image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-builder +final: false +fromImage: builder/alt +import: +- image: {{ .ModuleNamePrefix }}{{ .PackagePath }}/{{ .ImageName }}-src-artifact + add: /src + to: /src + before: install +{{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} +shell: + beforeInstall: + {{- include "alt packages proxy" . | nindent 2 }} + - | + apt-get install -y \ + {{ $builderDependencies.altPackages | join " " }} + + {{- include "alt packages clean" . | nindent 2 }} + + install: + - | + # Install packages + PKGS="{{ $builderDependencies.packages | join " " }}" + for pkg in $PKGS; do + cp -a /$pkg/. / + rm -rf /$pkg + done + + OUTDIR=/out + + cd /src + + /usr/bin/meson setup _build . \ + -Doptimization=2 \ + --prefix=/usr \ + --bindir=/usr/bin \ + --libdir=/usr/lib64 \ + --wrap-mode=nodownload \ + -Dmode=release \ + -Dlink-udev-shared=false \ + -Dlink-systemctl-shared=false \ + -Dlink-networkd-shared=false \ + -Dlink-timesyncd-shared=false \ + -Dlink-journalctl-shared=false \ + -Dlink-boot-shared=false \ + -Dlink-portabled-shared=false \ + -Dstandalone-binaries=true \ + -Dxinitrcdir=/etc/X11/xinit.d \ + -Dpamlibdir=/lib64/security \ + -Dsplit-bin=true \ + -Dsysvinit-path=/etc/rc.d/init.d \ + -Dsysvrcnd-path=/etc/rc.d \ + -Drc-local=/etc/rc.d/rc.local \ + -Dinstall-sysconfdir=true \ + -Dkernel-install=true \ + -Dpamconfdir=/etc/pam.d \ + -Ddebug-shell=/bin/bash \ + -Dquotaon-path=/sbin/quotaon \ + -Dquotacheck-path=/sbin/quotacheck \ + -Dkmod-path=/bin/kmod \ + -Dkexec-path=/sbin/kexec \ + -Dsulogin-path=/sbin/sulogin \ + -Dmount-path=/bin/mount \ + -Dumount-path=/bin/umount \ + -Dloadkeys-path=/bin/loadkeys \ + -Dsetfont-path=/bin/setfont \ + -Dtelinit-path=/sbin/telinit \ + -Dnologin-path=/sbin/nologin \ + -Dcompat-mutable-uid-boundaries=true \ + -Dadm-gid=4 \ + -Daudio-gid=81 \ + -Dcdrom-gid=22 \ + -Ddisk-gid=6 \ + -Dkmem-gid=9 \ + -Dlp-gid=7 \ + -Dtty-gid=5 \ + -Dusers-gid=100 \ + -Dutmp-gid=72 \ + -Dwheel-gid=10 \ + -Dnobody-user=nobody \ + -Dnobody-group=nobody \ + -Dbump-proc-sys-fs-file-max=false \ + -Dbump-proc-sys-fs-nr-open=false \ + -Delfutils=enabled \ + -Dpasswdqc=enabled \ + -Dxz=enabled \ + -Dzlib=enabled \ + -Dbzip2=enabled \ + -Dlz4=enabled \ + -Dzstd=enabled \ + -Dlibcryptsetup=enabled \ + -Dtpm2=enabled \ + -Dlogind=true \ + -Dvconsole=true \ + -Dinitrd=true \ + -Dquotacheck=true \ + -Drandomseed=true \ + -Dcoredump=true \ + -Dpstore=true \ + -Dgcrypt=enabled \ + -Dqrencode=disabled \ + -Dmicrohttpd=enabled \ + -Dgnutls=enabled \ + -Dopenssl=enabled \ + -Dp11kit=enabled \ + -Dlibfido2=enabled \ + -Dlibcurl=enabled \ + -Dlibidn2=enabled \ + -Dlibiptc=enabled \ + -Dpolkit=enabled \ + -Defi=true \ + -Dvmspawn=enabled \ + -Dsbat-distro=DVP \ + -Dsbat-distro-generation=1 \ + -Dsbat-distro-summary=DVP \ + -Dsbat-distro-pkgname=systemd-boot-efi \ + -Dsbat-distro-version={{ (split "v" $version)._1 }} \ + -Dsbat-distro-url=https://github.com/systemd/systemd.git \ + -Dhomed=enabled \ + -Dnetworkd=true \ + -Dresolve=true \ + -Ddns-servers= \ + -Dtimesyncd=true \ + -Dntp-servers= \ + -Dsysusers=true \ + -Dbootloader=enabled \ + -Dukify=enabled \ + -Dfirstboot=false \ + -Dseccomp=enabled \ + -Dima=true \ + -Dselinux=enabled \ + -Dutmp=true \ + -Dfirst-boot-full-preset=true \ + -Ddefault-timeout-sec=45 \ + -Ddefault-user-timeout-sec=45 \ + -Doomd=true \ + -Dsysupdate=disabled \ + -Dstatus-unit-format-default=combined \ + -Dfallback-hostname=localhost \ + -Ddefault-dnssec=no \ + -Ddefault-mdns=no \ + -Ddefault-llmnr=yes \ + -Ddefault-hierarchy=unified \ + -Db_lto=true \ + -Db_pie=true \ + -Db_ndebug=false \ + -Dman=disabled \ + -Dcreate-log-dirs=false \ + -Durlify=false \ + -Dinstall-tests=false \ + -Dversion-tag={{ (split "v" $version)._1 }} \ + -Dcertificate-root=/etc/pki/tls \ + -Ddocdir=/usr/share/doc/systemd-{{ (split "v" $version)._1 }} + + DESTDIR=${OUTDIR} meson install -C _build + + rm -rf $OUTDIR/usr/share + + find $OUTDIR -type f -executable | while read -r execfile; do + if strip "$execfile"; then + echo "Stripped: $execfile" + fi + done + + tree -hp /out diff --git a/images/packages/ubdsrv/werf.inc.yaml b/images/packages/ubdsrv/werf.inc.yaml index 0a57261a4d..fd83759e9c 100644 --- a/images/packages/ubdsrv/werf.inc.yaml +++ b/images/packages/ubdsrv/werf.inc.yaml @@ -42,9 +42,9 @@ shell: altPackages: - gcc gcc-c++ - make autoconf automake libtool pkgconfig -- liburing-devel packages: - gnutls +- liburing {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -85,12 +85,12 @@ shell: --exec-prefix=/usr \ --libdir=/usr/lib64 \ --disable-static - + make -j$(nproc) make DESTDIR=$OUTDIR install rm -rf $OUTDIR/usr/share - + find $OUTDIR -type f -executable | while read -r execfile; do if strip "$execfile"; then echo "Stripped: $execfile" diff --git a/images/qemu/werf.inc.yaml b/images/qemu/werf.inc.yaml index 87a9266ac3..241c7aa185 100644 --- a/images/qemu/werf.inc.yaml +++ b/images/qemu/werf.inc.yaml @@ -32,9 +32,10 @@ shell: {{- include "alt packages clean" . | nindent 2}} install: - | - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /{{ $gitRepoName }}-{{ $version }} + mkdir -p ~/.ssh && echo "StrictHostKeyChecking accept-new" > ~/.ssh/config + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $version }} /src/{{ $gitRepoName }}-{{ $version }} - cd /{{ $gitRepoName }}-{{ $version }} + cd /src/{{ $gitRepoName }}-{{ $version }} if [[ "$(cat /run/secrets/SOURCE_REPO)" =~ "github.com" ]] ; then echo "Checkout submodules" @@ -105,20 +106,19 @@ altLibraries: - libalsa-devel libpulseaudio-devel - pipewire-libs pipewire-jack-libs-devel - libsoundio-devel -- libjpeg-devel libxkbcommon-devel xkeyboard-config-devel +- libjpeg-devel xkeyboard-config-devel - glusterfs11 libgtk+3-devel libvte libvte-devel libvte3-devel -- libvirglrenderer-devel libusb-devel liburing-devel libbpf-devel +- libvirglrenderer-devel libusb-devel libbpf-devel - libspice-server-devel spice-protocol ceph-devel - libnfs-devel libseccomp-devel - libgnutls-devel -- libudev-devel libmultipath-devel libblkio-devel libpmem-devel +- libblkio-devel libpmem-devel - libdaxctl-devel - bzlib-devel liblzo2-devel - libcacard-devel libusbredir-devel libepoxy-devel libgbm-devel - libvitastor-devel libiscsi-devel glusterfs-coreutils - libglusterfs11-api-devel - libvdeplug-devel -- glib2-devel packages: - dmidecode libgcrypt nettle libcap-ng libcapstone - openssl libcurl e2fsprogs libxcrypt numactl @@ -133,7 +133,12 @@ packages: - linux-pam - snappy - ngtcp2 libtasn1 ncurses +- glib2 util-linux +- libxkbcommon +- libgcc1 - libaio +- liburing libuserspace-rcu libunistring systemd +- multipath-tools {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -143,7 +148,7 @@ final: false fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/alt" "builder/alt-go-svace" }} import: - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /{{ $gitRepoName }}-{{ $version }} + add: /src/{{ $gitRepoName }}-{{ $version }} to: /{{ $gitRepoName }}-{{ $version }} before: install - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact diff --git a/images/virt-artifact/werf.inc.yaml b/images/virt-artifact/werf.inc.yaml index f76dcf3163..5d2afca419 100644 --- a/images/virt-artifact/werf.inc.yaml +++ b/images/virt-artifact/werf.inc.yaml @@ -1,9 +1,11 @@ --- # Source https://github.com/kubevirt/kubevirt/blob/v1.3.1/hack/dockerized#L15 {{- $gitRepoName := "3p-kubevirt" }} +{{- $gitRepoUrl := (printf "%s/%s" "deckhouse" $gitRepoName) }} {{- $tag := get $.Core $gitRepoName }} {{- $version := (split "-" $tag)._0 }} + --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact final: false @@ -13,7 +15,11 @@ secrets: value: {{ $.SOURCE_REPO }} shell: install: - - git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/deckhouse/3p-kubevirt --branch {{ $tag }} /kubevirt + - | + echo "Git clone {{ $gitRepoName }} repository..." + git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $tag }} /src/kubevirt + + rm -rf /src/kubevirt/.git --- @@ -54,7 +60,7 @@ secrets: value: {{ .GOPROXY }} import: - image: {{ .ModuleNamePrefix }}{{ .ImageName }}-src-artifact - add: /kubevirt + add: /src/kubevirt to: /kubevirt before: install {{- include "importPackageImages" (list . $builderDependencies.packages "install") -}} @@ -79,30 +85,8 @@ shell: export GOPROXY=$(cat /run/secrets/GOPROXY) mkdir -p ~/.ssh && echo "StrictHostKeyChecking accept-new" > ~/.ssh/config cd /kubevirt - go mod download - go get github.com/opencontainers/runc@v1.1.14 - go get github.com/containers/common@v0.60.4 - - | - echo Download Go modules. go mod download - - echo Update modules to mitigate CVEs... - go get github.com/opencontainers/runc@v1.1.14 - go get github.com/containers/common@v0.60.4 - - go get github.com/go-openapi/strfmt@v0.23.0 - go get github.com/onsi/gomega/matchers/support/goraph/bipartitegraph@v1.34.1 - go get github.com/cilium/ebpf/btf@v0.11.0 - go get github.com/cilium/ebpf/internal@v0.11.0 - - # CVE-2024-45337,CVE-2025-22869 - go get golang.org/x/crypto@v0.38.0 - # CVE-2025-22870, CVE-2025-22872 - go get golang.org/x/net@v0.40.0 - # CVE-2025-22868 - go get golang.org/x/oauth2@v0.27.0 - go mod vendor setup: diff --git a/images/virt-launcher/mount-points.yaml b/images/virt-launcher/mount-points.yaml index 643cfdb1fe..f07fda66e1 100644 --- a/images/virt-launcher/mount-points.yaml +++ b/images/virt-launcher/mount-points.yaml @@ -43,6 +43,6 @@ dirs: - /var/lib/libvirt/qemu/nvram - /var/lib/kubevirt-node-labeller - /var/lib/swtpm-localca - - /var/log + - /var/log/libvirt - /path # For hot-plugged disks, used in "hp Pods". - /init/usr/bin # For attaching images as "container disks". diff --git a/images/virt-launcher/werf.inc.yaml b/images/virt-launcher/werf.inc.yaml index df494bfdc7..0d03e69fbe 100644 --- a/images/virt-launcher/werf.inc.yaml +++ b/images/virt-launcher/werf.inc.yaml @@ -39,7 +39,6 @@ altLibs: - libsoundio-devel - libjpeg-devel - libpng-devel - - libxkbcommon-devel - xkeyboard-config-devel - libgtk+3-devel - libvte @@ -48,7 +47,6 @@ altLibs: - libvirglrenderer-devel - libdbus - libusb-devel - - liburing-devel - libbpf-devel - libspice-server-devel - ceph-devel @@ -79,8 +77,6 @@ altLibs: - libfuse-devel - libsystemd-devel - systemtap-sdt-devel - - glib2-devel - - libgio-devel - libclocale - libLLVMSPIRVLib-devel - ethtool @@ -130,7 +126,10 @@ packages: - cyrus-sasl2 - snappy - libtasn1 libtirpc +- glib2 +- libxkbcommon - libaio +- liburing libunistring {{- end -}} {{ $builderDependencies := include "$name" . | fromYaml }} @@ -450,7 +449,7 @@ shell: --- image: {{ .ModuleNamePrefix }}{{ .ImageName }}-cbuilder final: false -fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.23" "builder/alt-go-svace" }} +fromImage: {{ eq $.SVACE_ENABLED "false" | ternary "builder/golang-bookworm-1.24" "builder/alt-go-svace" }} git: - add: {{ .ModuleDir }}/images/{{ .ImageName }}/static_binaries to: /static_binaries diff --git a/images/virtualization-artifact/.golangci.yaml b/images/virtualization-artifact/.golangci.yaml index 0867b18310..1be21e2a37 100644 --- a/images/virtualization-artifact/.golangci.yaml +++ b/images/virtualization-artifact/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: subv1alpha2 + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/images/virtualization-artifact/Taskfile.yaml b/images/virtualization-artifact/Taskfile.yaml index 6724b73dd4..2fe117d704 100644 --- a/images/virtualization-artifact/Taskfile.yaml +++ b/images/virtualization-artifact/Taskfile.yaml @@ -72,7 +72,7 @@ tasks: deps: - _ensure:mirrord cmd: | - ./hack/mirrord.sh run --app=$PWD/cmd/virtualization-controller/main.go \ + TMPDIR=/tmp ./hack/mirrord.sh run --app=$PWD/cmd/virtualization-controller/main.go \ --deployment=virtualization-controller \ --namespace={{ .BaseNamespace }} \ --container-name=virtualization-controller @@ -92,15 +92,15 @@ tasks: flags+=( "--kubevirt-cabundle=/etc/virt-api/certificates/ca.crt" ) flags+=( "--kubevirt-endpoint=virt-api.{{ .BaseNamespace }}.svc" ) flags+=( "--secure-port=8443" ) - flags+=( "--tls-private-key-file=/etc/virtualziation-api/certificates/tls.key" ) - flags+=( "--tls-cert-file=/etc/virtualziation-api/certificates/tls.crt" ) + flags+=( "--tls-private-key-file=/etc/virtualization-api/certificates/tls.key" ) + flags+=( "--tls-cert-file=/etc/virtualization-api/certificates/tls.crt" ) flags+=( "--v=7" ) - flags+=( "--proxy-client-cert-file=/etc/virtualziation-api-proxy/certificates/tls.crt" ) - flags+=( "--proxy-client-key-file=/etc/virtualziation-api-proxy/certificates/tls.key" ) + flags+=( "--proxy-client-cert-file=/etc/virtualization-api-proxy/certificates/tls.crt" ) + flags+=( "--proxy-client-key-file=/etc/virtualization-api-proxy/certificates/tls.key" ) flags+=( "--service-account-name=virtualization-api" ) flags+=( "--service-account-namespace={{ .BaseNamespace }}" ) - ./hack/mirrord.sh run --app="$PWD/cmd/virtualization-api/main.go" \ + TMPDIR=/tmp ./hack/mirrord.sh run --app="$PWD/cmd/virtualization-api/main.go" \ --deployment="virtualization-api" \ --namespace="{{ .BaseNamespace }}" \ --flags="\"${flags[@]}\"" diff --git a/images/virtualization-artifact/cmd/virtualization-controller/main.go b/images/virtualization-artifact/cmd/virtualization-controller/main.go index d43fba35b0..f4b0f9d26a 100644 --- a/images/virtualization-artifact/cmd/virtualization-controller/main.go +++ b/images/virtualization-artifact/cmd/virtualization-controller/main.go @@ -66,7 +66,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/migration" "github.com/deckhouse/virtualization-controller/pkg/version" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -221,7 +221,7 @@ func main() { for _, f := range []func(*apiruntime.Scheme) error{ clientgoscheme.AddToScheme, extv1.AddToScheme, - virtv2alpha1.AddToScheme, + v1alpha2.AddToScheme, cdiv1beta1.AddToScheme, virtv1.AddToScheme, vsv1.AddToScheme, diff --git a/images/virtualization-artifact/go.mod b/images/virtualization-artifact/go.mod index 81a3b679f6..2a65806fc1 100644 --- a/images/virtualization-artifact/go.mod +++ b/images/virtualization-artifact/go.mod @@ -67,7 +67,7 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/docker v25.0.6+incompatible // indirect + github.com/docker/docker v28.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -155,6 +155,7 @@ replace ( k8s.io/client-go => k8s.io/client-go v0.33.3 k8s.io/component-base => k8s.io/component-base v0.33.3 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 + kubevirt.io/api => github.com/deckhouse/3p-kubevirt/staging/src/kubevirt.io/api v1.3.1-v12n.17 ) // CVE Replaces diff --git a/images/virtualization-artifact/go.sum b/images/virtualization-artifact/go.sum index 409ba2d635..ec2bb8a255 100644 --- a/images/virtualization-artifact/go.sum +++ b/images/virtualization-artifact/go.sum @@ -45,14 +45,16 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/3p-kubevirt/staging/src/kubevirt.io/api v1.3.1-v12n.17 h1:IQPK5oGRSONOKPH8TIuDq7vCjbFTj0NEWQzo6ZBD7uY= +github.com/deckhouse/3p-kubevirt/staging/src/kubevirt.io/api v1.3.1-v12n.17/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= github.com/deckhouse/deckhouse/pkg/log v0.0.0-20250226105106-176cd3afcdd5 h1:PsN1E0oxC/+4zdA977txrqUCuObFL3HAuu5Xnud8m8c= github.com/deckhouse/deckhouse/pkg/log v0.0.0-20250226105106-176cd3afcdd5/go.mod h1:Mk5HRzkc5pIcDIZ2JJ6DPuuqnwhXVkb3you8M8Mg+4w= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= -github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM= +github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -643,8 +645,6 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= -kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= kubevirt.io/containerized-data-importer-api v1.60.3 h1:kQEXi7scpzUa0RPf3/3MKk1Kmem0ZlqqiuK3kDF5L2I= kubevirt.io/containerized-data-importer-api v1.60.3/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs= kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= diff --git a/images/virtualization-artifact/hack/mirrord.sh b/images/virtualization-artifact/hack/mirrord.sh index 0b335fa979..1349afe0be 100755 --- a/images/virtualization-artifact/hack/mirrord.sh +++ b/images/virtualization-artifact/hack/mirrord.sh @@ -103,7 +103,7 @@ chmod +x "${BIN_DIR}/${BINARY}" if ! kubectl -n "${NAMESPACE}" get "deployment/${NEW_NAME}" &>/dev/null; then kubectl -n "${NAMESPACE}" get "deployment/${DEPLOYMENT}" -ojson | \ jq --arg CONTAINER_NAME "$CONTAINER_NAME" --arg NEW_NAME "$NEW_NAME" '.metadata.name = $NEW_NAME | - (.spec.template.spec.containers[] | select(.name == $CONTAINER_NAME) ) |= (.command= [ "/bin/sh", "-c", "--" ] | .args = [ "while true; do sleep 60; done;" ] | .image = "alpine:3.20.1") | + (.spec.template.spec.containers[] | select(.name == $CONTAINER_NAME) ) |= (.command= [ "/bin/sh", "-c", "--" ] | .args = [ "while true; do sleep 60; done;" ] | .image = "alpine:3.20.1" | del(.livenessProbe) | del(.readinessProbe)) | .spec.replicas = 1 | .spec.template.metadata.labels.mirror = "true" | .spec.template.metadata.labels.ownerName = $NEW_NAME' | \ diff --git a/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go b/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go index cbf532813d..2d9cbad7bb 100644 --- a/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go +++ b/images/virtualization-artifact/pkg/apiserver/api/generated/openapi/zz_generated.openapi.go @@ -4912,14 +4912,14 @@ func schema_virtualization_api_core_v1alpha2_VirtualMachineOperationCloneCustomi Properties: map[string]spec.Schema{ "namePrefix": { SchemaProps: spec.SchemaProps{ - Description: "NamePrefix adds a prefix to resource names during cloning. Applied to VirtualDisk, VirtualMachineIPAddress, VirtualMachineMACAddress, and Secret resources.", + Description: "NamePrefix adds a prefix to resource names during cloning. Applied to VirtualMachine, VirtualDisk, VirtualMachineBlockDeviceAttachment, and Secret resources.", Type: []string{"string"}, Format: "", }, }, "nameSuffix": { SchemaProps: spec.SchemaProps{ - Description: "NameSuffix adds a suffix to resource names during cloning. Applied to VirtualDisk, VirtualMachineIPAddress, VirtualMachineMACAddress, and Secret resources.", + Description: "NameSuffix adds a suffix to resource names during cloning. Applied to VirtualMachine, VirtualDisk, VirtualMachineBlockDeviceAttachment, and Secret resources.", Type: []string{"string"}, Format: "", }, diff --git a/images/virtualization-artifact/pkg/apiserver/api/install.go b/images/virtualization-artifact/pkg/apiserver/api/install.go index 071c333b55..c4575f66fe 100644 --- a/images/virtualization-artifact/pkg/apiserver/api/install.go +++ b/images/virtualization-artifact/pkg/apiserver/api/install.go @@ -32,7 +32,7 @@ import ( virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" "github.com/deckhouse/virtualization/api/subresources" "github.com/deckhouse/virtualization/api/subresources/install" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) var ( @@ -68,7 +68,7 @@ func Build(store *storage.VirtualMachineStorage) genericapiserver.APIGroupInfo { "virtualmachines/unfreeze": store.UnfreezeREST(), "virtualmachines/cancelevacuation": store.CancelEvacuationREST(), } - apiGroupInfo.VersionedResourcesStorageMap[v1alpha2.SchemeGroupVersion.Version] = resources + apiGroupInfo.VersionedResourcesStorageMap[subv1alpha2.SchemeGroupVersion.Version] = resources return apiGroupInfo } diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go index 3cba0d50e9..edbfc11f16 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/rest/stream.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -67,17 +67,17 @@ func (p pather) Path(namespace, name string) string { return fmt.Sprintf(p.template, namespace, name, p.subresource) } -type preconditionVirtualMachine func(vm *virtv2.VirtualMachine) error +type preconditionVirtualMachine func(vm *v1alpha2.VirtualMachine) error -func virtualMachineShouldBeRunning(vm *virtv2.VirtualMachine) error { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning { +func virtualMachineShouldBeRunning(vm *v1alpha2.VirtualMachine) error { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning { return fmt.Errorf("VirtualMachine is not Running") } return nil } -func virtualMachineShouldBeRunningOrMigrating(vm *virtv2.VirtualMachine) error { - if vm == nil || (vm.Status.Phase != virtv2.MachineRunning && vm.Status.Phase != virtv2.MachineMigrating) { +func virtualMachineShouldBeRunningOrMigrating(vm *v1alpha2.VirtualMachine) error { + if vm == nil || (vm.Status.Phase != v1alpha2.MachineRunning && vm.Status.Phase != v1alpha2.MachineMigrating) { return fmt.Errorf("VirtualMachine is not Running or Migrating") } return nil diff --git a/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go b/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go index fe13f2887d..c0fa67b68c 100644 --- a/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go +++ b/images/virtualization-artifact/pkg/apiserver/registry/vm/storage/storage.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager" versionedv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" virtlisters "github.com/deckhouse/virtualization/api/client/generated/listers/core/v1alpha2" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineStorage struct { @@ -138,7 +138,7 @@ func (store VirtualMachineStorage) CancelEvacuationREST() *vmrest.CancelEvacuati // New implements rest.Storage interface func (store VirtualMachineStorage) New() runtime.Object { - return &virtv2.VirtualMachine{} + return &v1alpha2.VirtualMachine{} } // Destroy implements rest.Storage interface @@ -173,7 +173,7 @@ func (store VirtualMachineStorage) Get(ctx context.Context, name string, _ *meta } func (store VirtualMachineStorage) NewList() runtime.Object { - return &virtv2.VirtualMachineList{} + return &v1alpha2.VirtualMachineList{} } func (store VirtualMachineStorage) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) { @@ -199,8 +199,8 @@ func (store VirtualMachineStorage) List(ctx context.Context, options *internalve return nil, k8serrors.NewInternalError(err) } - filtered := &virtv2.VirtualMachineList{} - filtered.Items = make([]virtv2.VirtualMachine, 0, len(items)) + filtered := &v1alpha2.VirtualMachineList{} + filtered.Items = make([]v1alpha2.VirtualMachine, 0, len(items)) for _, vm := range items { if matches(vm, name) { filtered.Items = append(filtered.Items, *vm) diff --git a/images/virtualization-artifact/pkg/apiserver/server/config.go b/images/virtualization-artifact/pkg/apiserver/server/config.go index f98fcdf1bf..d924f42481 100644 --- a/images/virtualization-artifact/pkg/apiserver/server/config.go +++ b/images/virtualization-artifact/pkg/apiserver/server/config.go @@ -30,7 +30,7 @@ import ( vmrest "github.com/deckhouse/virtualization-controller/pkg/apiserver/registry/vm/rest" "github.com/deckhouse/virtualization-controller/pkg/tls/certmanager/filesystem" virtClient "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var ErrConfigInvalid = errors.New("configuration is invalid") @@ -89,7 +89,7 @@ func (c Config) Complete() (*Server, error) { if err != nil { return nil, err } - crd, err := kubeclient.CustomResourceDefinitions().Get(context.Background(), virtv2.Resource(virtv2.VirtualMachineResource).String(), metav1.GetOptions{}) + crd, err := kubeclient.CustomResourceDefinitions().Get(context.Background(), v1alpha2.Resource(v1alpha2.VirtualMachineResource).String(), metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go index 590e6a7b88..cef7a71b7d 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_access_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmAccessTestArgs struct { @@ -50,8 +50,8 @@ type vmAccessTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -77,29 +77,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go index 8c41e18abf..c4987fb80f 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_control_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmControlTestArgs struct { @@ -54,8 +54,8 @@ type vmControlTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node var pod *corev1.Pod @@ -103,29 +103,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go index b6b8843d86..8af16eed25 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vm_manage_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmManageTestArgs struct { @@ -52,8 +52,8 @@ type vmManageTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -78,29 +78,29 @@ var _ = Describe("VMOP Events", func() { }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } diff --git a/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go b/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go index 6622ffde5d..5f0dc9d177 100644 --- a/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go +++ b/images/virtualization-artifact/pkg/audit/events/vm/vmop_control_test.go @@ -34,11 +34,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/audit/events" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - v1alpha "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type vmopTestArgs struct { - vmopType v1alpha.VMOPType + vmopType v1alpha2.VMOPType expectedName string expectedLevel string expectedActionType string @@ -55,9 +55,9 @@ type vmopTestArgs struct { var _ = Describe("VMOP Events", func() { var event *audit.Event - var vmop *v1alpha.VirtualMachineOperation - var vm *v1alpha.VirtualMachine - var vd *v1alpha.VirtualDisk + var vmop *v1alpha2.VirtualMachineOperation + var vm *v1alpha2.VirtualMachine + var vd *v1alpha2.VirtualDisk var node *corev1.Node currentTime := time.Now() @@ -82,35 +82,35 @@ var _ = Describe("VMOP Events", func() { }, } - vmop = &v1alpha.VirtualMachineOperation{ - Spec: v1alpha.VirtualMachineOperationSpec{ + vmop = &v1alpha2.VirtualMachineOperation{ + Spec: v1alpha2.VirtualMachineOperationSpec{ VirtualMachine: "test-vm", }, } - vm = &v1alpha.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "test-vm", Namespace: "test", UID: "0000-0000-4567"}, - Spec: v1alpha.VirtualMachineSpec{ - BlockDeviceRefs: []v1alpha.BlockDeviceSpecRef{ - {Kind: v1alpha.VirtualDiskKind, Name: "test-disk"}, - {Kind: v1alpha.VirtualImageKind, Name: "test-image"}, + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Kind: v1alpha2.VirtualDiskKind, Name: "test-disk"}, + {Kind: v1alpha2.VirtualImageKind, Name: "test-image"}, }, }, - Status: v1alpha.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Node: "test-node", GuestOSInfo: virtv1.VirtualMachineInstanceGuestOSInfo{ Name: "test-os", }, - Versions: v1alpha.Versions{ + Versions: v1alpha2.Versions{ Qemu: "9.9.9", Libvirt: "1.1.1", }, }, } - vd = &v1alpha.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "test-disk", Namespace: "test", UID: "0000-0000-4567"}, - Status: v1alpha.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "test-storageclass", }, } @@ -263,65 +263,65 @@ var _ = Describe("VMOP Events", func() { shouldFailMatch: true, }), Entry("Start VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", }), Entry("Stop VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStop, + vmopType: v1alpha2.VMOPTypeStop, expectedName: "VM stopped", expectedLevel: "warn", expectedActionType: "stop", }), Entry("Restart VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeRestart, + vmopType: v1alpha2.VMOPTypeRestart, expectedName: "VM restarted", expectedLevel: "warn", expectedActionType: "restart", }), Entry("Migrate VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeMigrate, + vmopType: v1alpha2.VMOPTypeMigrate, expectedName: "VM migrated", expectedLevel: "warn", expectedActionType: "migrate", }), Entry("Evict VMOP event should filled without errors", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeEvict, + vmopType: v1alpha2.VMOPTypeEvict, expectedName: "VM evicted", expectedLevel: "warn", expectedActionType: "evict", }), Entry("Evict VMOP event should filled without errors, but with unknown VDs", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVD: true, }), Entry("Evict VMOP event should filled without errors, but with unknown Node's IPs", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostNode: true, }), Entry("VMOP event should filled with VM exist error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVM: true, }), Entry("VMOP event should filled with VMOP exist error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", shouldLostVMOP: true, }), Entry("VMOP event should filled with JSON encode error", vmopTestArgs{ - vmopType: v1alpha.VMOPTypeStart, + vmopType: v1alpha2.VMOPTypeStart, expectedName: "VM started", expectedLevel: "info", expectedActionType: "start", diff --git a/images/virtualization-artifact/pkg/builder/vd/option.go b/images/virtualization-artifact/pkg/builder/vd/option.go index 36817f8277..1d191f37fb 100644 --- a/images/virtualization-artifact/pkg/builder/vd/option.go +++ b/images/virtualization-artifact/pkg/builder/vd/option.go @@ -42,15 +42,11 @@ func WithDatasource(datasource *v1alpha2.VirtualDiskDataSource) func(vd *v1alpha } } -func WithDataSourceHTTP(url string, checksum *v1alpha2.Checksum, caBundle []byte) Option { +func WithDataSourceHTTP(dataSourceHTTP *v1alpha2.DataSourceHTTP) Option { return func(vd *v1alpha2.VirtualDisk) { vd.Spec.DataSource = &v1alpha2.VirtualDiskDataSource{ Type: v1alpha2.DataSourceTypeHTTP, - HTTP: &v1alpha2.DataSourceHTTP{ - URL: url, - Checksum: checksum, - CABundle: caBundle, - }, + HTTP: dataSourceHTTP, } } } diff --git a/images/virtualization-artifact/pkg/builder/vm/option.go b/images/virtualization-artifact/pkg/builder/vm/option.go index 3a34e6e7db..ada9473c89 100644 --- a/images/virtualization-artifact/pkg/builder/vm/option.go +++ b/images/virtualization-artifact/pkg/builder/vm/option.go @@ -35,6 +35,12 @@ var ( WithAnnotations = meta.WithAnnotations[*v1alpha2.VirtualMachine] ) +func WithBootloader(bootloader v1alpha2.BootloaderType) Option { + return func(vm *v1alpha2.VirtualMachine) { + vm.Spec.Bootloader = bootloader + } +} + func WithCPU(cores int, coreFraction *string) Option { return func(vm *v1alpha2.VirtualMachine) { vm.Spec.CPU.Cores = cores diff --git a/images/virtualization-artifact/pkg/builder/vmbda/option.go b/images/virtualization-artifact/pkg/builder/vmbda/option.go new file mode 100644 index 0000000000..ecb902244a --- /dev/null +++ b/images/virtualization-artifact/pkg/builder/vmbda/option.go @@ -0,0 +1,50 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmbda + +import ( + "github.com/deckhouse/virtualization-controller/pkg/builder/meta" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +type Option func(vd *v1alpha2.VirtualMachineBlockDeviceAttachment) + +var ( + WithName = meta.WithName[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithNamespace = meta.WithNamespace[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithGenerateName = meta.WithGenerateName[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithLabel = meta.WithLabel[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithLabels = meta.WithLabels[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithAnnotation = meta.WithAnnotation[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithAnnotations = meta.WithAnnotations[*v1alpha2.VirtualMachineBlockDeviceAttachment] + WithFinalizer = meta.WithFinalizer[*v1alpha2.VirtualMachineBlockDeviceAttachment] +) + +func WithVirtualMachineName(name string) func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + return func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + vmbda.Spec.VirtualMachineName = name + } +} + +func WithBlockDeviceRef(kind v1alpha2.VMBDAObjectRefKind, name string) func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + return func(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) { + vmbda.Spec.BlockDeviceRef = v1alpha2.VMBDAObjectRef{ + Kind: kind, + Name: name, + } + } +} diff --git a/images/virtualization-artifact/pkg/builder/vmbda/vd.go b/images/virtualization-artifact/pkg/builder/vmbda/vd.go new file mode 100644 index 0000000000..e5f9f6c075 --- /dev/null +++ b/images/virtualization-artifact/pkg/builder/vmbda/vd.go @@ -0,0 +1,51 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmbda + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func New(options ...Option) *v1alpha2.VirtualMachineBlockDeviceAttachment { + vmbda := NewEmpty("", "") + ApplyOptions(vmbda, options...) + return vmbda +} + +func ApplyOptions(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment, opts ...Option) { + if vmbda == nil { + return + } + for _, opt := range opts { + opt(vmbda) + } +} + +func NewEmpty(name, namespace string) *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualMachineBlockDeviceAttachmentKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} diff --git a/images/virtualization-artifact/pkg/common/annotations/annotations.go b/images/virtualization-artifact/pkg/common/annotations/annotations.go index 43a15840f8..ba7453b2a8 100644 --- a/images/virtualization-artifact/pkg/common/annotations/annotations.go +++ b/images/virtualization-artifact/pkg/common/annotations/annotations.go @@ -160,6 +160,9 @@ const ( // AnnAccessMode is the annotation for indicating that access mode. (USED IN STORAGE sds controllers) AnnAccessModes = AnnAPIGroupV + "/access-mode" AnnAccessModesDeprecated = "accessModes" + // AnnStorageProvisioner is the annotation for indicating storage provisioner + AnnStorageProvisioner = "volume.kubernetes.io/storage-provisioner" + AnnStorageProvisionerDeprecated = "volume.beta.kubernetes.io/storage-provisioner" // AppLabel is the app name label. AppLabel = "app" diff --git a/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go b/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go index 7452951176..a4b0b66150 100644 --- a/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go +++ b/images/virtualization-artifact/pkg/common/datasource/ca_bundle.go @@ -19,12 +19,12 @@ package datasource import ( "k8s.io/apimachinery/pkg/types" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type CABundle struct { - Type virtv2.DataSourceType - HTTP *virtv2.DataSourceHTTP + Type v1alpha2.DataSourceType + HTTP *v1alpha2.DataSourceHTTP ContainerImage *ContainerRegistry } @@ -34,14 +34,14 @@ type ContainerRegistry struct { CABundle []byte } -func NewCABundleForCVMI(ds virtv2.ClusterVirtualImageDataSource) *CABundle { +func NewCABundleForCVMI(ds v1alpha2.ClusterVirtualImageDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -58,14 +58,14 @@ func NewCABundleForCVMI(ds virtv2.ClusterVirtualImageDataSource) *CABundle { return &CABundle{Type: ds.Type} } -func NewCABundleForVMI(namespace string, ds virtv2.VirtualImageDataSource) *CABundle { +func NewCABundleForVMI(namespace string, ds v1alpha2.VirtualImageDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -82,14 +82,14 @@ func NewCABundleForVMI(namespace string, ds virtv2.VirtualImageDataSource) *CABu return &CABundle{Type: ds.Type} } -func NewCABundleForVMD(namespace string, ds *virtv2.VirtualDiskDataSource) *CABundle { +func NewCABundleForVMD(namespace string, ds *v1alpha2.VirtualDiskDataSource) *CABundle { switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: return &CABundle{ Type: ds.Type, HTTP: ds.HTTP, } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: return &CABundle{ Type: ds.Type, ContainerImage: &ContainerRegistry{ @@ -115,11 +115,11 @@ func (ds *CABundle) GetCABundle() string { return "" } switch ds.Type { - case virtv2.DataSourceTypeHTTP: + case v1alpha2.DataSourceTypeHTTP: if ds.HTTP != nil { return string(ds.HTTP.CABundle) } - case virtv2.DataSourceTypeContainerImage: + case v1alpha2.DataSourceTypeContainerImage: if ds.ContainerImage != nil { return string(ds.ContainerImage.CABundle) } diff --git a/images/virtualization-artifact/pkg/common/mac/mac.go b/images/virtualization-artifact/pkg/common/mac/mac.go index a45f3dc0d8..81a7420ea1 100644 --- a/images/virtualization-artifact/pkg/common/mac/mac.go +++ b/images/virtualization-artifact/pkg/common/mac/mac.go @@ -20,12 +20,12 @@ import ( "regexp" "strings" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const macPrefix = "mac-" -type AllocatedMACs map[string]*virtv2.VirtualMachineMACAddressLease +type AllocatedMACs map[string]*v1alpha2.VirtualMachineMACAddressLease // AddressToLeaseName generate the Virtual Machine MAC Address Lease's name from the MAC address func AddressToLeaseName(address string) string { diff --git a/images/virtualization-artifact/pkg/common/network/network.go b/images/virtualization-artifact/pkg/common/network/network.go index b294285bfe..c53142c755 100644 --- a/images/virtualization-artifact/pkg/common/network/network.go +++ b/images/virtualization-artifact/pkg/common/network/network.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -48,7 +48,7 @@ type InterfaceStatus struct { type InterfaceSpecList []InterfaceSpec -func CreateNetworkSpec(vm *virtv2.VirtualMachine, vmmacs []*virtv2.VirtualMachineMACAddress) InterfaceSpecList { +func CreateNetworkSpec(vm *v1alpha2.VirtualMachine, vmmacs []*v1alpha2.VirtualMachineMACAddress) InterfaceSpecList { var ( all []string status []struct{ Name, MAC string } @@ -64,7 +64,7 @@ func CreateNetworkSpec(vm *virtv2.VirtualMachine, vmmacs []*virtv2.VirtualMachin } } for _, n := range vm.Status.Networks { - if n.Type == virtv2.NetworksTypeMain { + if n.Type == v1alpha2.NetworksTypeMain { continue } status = append(status, struct{ Name, MAC string }{n.Name, n.MAC}) @@ -76,7 +76,7 @@ func CreateNetworkSpec(vm *virtv2.VirtualMachine, vmmacs []*virtv2.VirtualMachin } } for _, n := range vm.Spec.Networks { - if n.Type == virtv2.NetworksTypeMain { + if n.Type == v1alpha2.NetworksTypeMain { continue } var mac string @@ -118,9 +118,9 @@ func generateInterfaceName(macAddress, networkType string) string { hashHex := hex.EncodeToString(hash[:]) switch networkType { - case virtv2.NetworksTypeNetwork: + case v1alpha2.NetworksTypeNetwork: name = fmt.Sprintf("veth_n%s", hashHex[:8]) - case virtv2.NetworksTypeClusterNetwork: + case v1alpha2.NetworksTypeClusterNetwork: name = fmt.Sprintf("veth_cn%s", hashHex[:8]) } return name diff --git a/images/virtualization-artifact/pkg/common/network/network_test.go b/images/virtualization-artifact/pkg/common/network/network_test.go index ef21d93f68..2ac2f823a0 100644 --- a/images/virtualization-artifact/pkg/common/network/network_test.go +++ b/images/virtualization-artifact/pkg/common/network/network_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestHandlers(t *testing.T) { @@ -32,11 +32,11 @@ func TestHandlers(t *testing.T) { } var _ = Describe("Network Config Generation", func() { - vm := &virtv2.VirtualMachine{} - var vmmacs []*virtv2.VirtualMachineMACAddress + vm := &v1alpha2.VirtualMachine{} + var vmmacs []*v1alpha2.VirtualMachineMACAddress - newMACAddress := func(name, address string, phase virtv2.VirtualMachineMACAddressPhase, attachedVM string) *virtv2.VirtualMachineMACAddress { - mac := &virtv2.VirtualMachineMACAddress{ + newMACAddress := func(name, address string, phase v1alpha2.VirtualMachineMACAddressPhase, attachedVM string) *v1alpha2.VirtualMachineMACAddress { + mac := &v1alpha2.VirtualMachineMACAddress{ TypeMeta: metav1.TypeMeta{ Kind: "VirtualMachineMACAddress", APIVersion: "virtualization.deckhouse.io/v1alpha2", @@ -45,7 +45,7 @@ var _ = Describe("Network Config Generation", func() { Name: name, Namespace: "ns", }, - Status: virtv2.VirtualMachineMACAddressStatus{ + Status: v1alpha2.VirtualMachineMACAddressStatus{ Address: address, }, } @@ -59,16 +59,16 @@ var _ = Describe("Network Config Generation", func() { } BeforeEach(func() { - vm.Spec.Networks = []virtv2.NetworksSpec{} - vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", virtv2.VirtualMachineMACAddressPhaseBound, "vm1") - vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", virtv2.VirtualMachineMACAddressPhaseBound, "vm2") - vmmacs = []*virtv2.VirtualMachineMACAddress{vmmac1, vmmac2} + vm.Spec.Networks = []v1alpha2.NetworksSpec{} + vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", v1alpha2.VirtualMachineMACAddressPhaseBound, "vm1") + vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", v1alpha2.VirtualMachineMACAddressPhaseBound, "vm2") + vmmacs = []*v1alpha2.VirtualMachineMACAddress{vmmac1, vmmac2} }) It("should return empty list interfaces", func() { - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } @@ -78,12 +78,12 @@ var _ = Describe("Network Config Generation", func() { }) It("should generate correct interface name for Network type", func() { - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "mynet", }, } @@ -91,18 +91,18 @@ var _ = Describe("Network Config Generation", func() { configs := CreateNetworkSpec(vm, vmmacs) Expect(configs).To(HaveLen(1)) - Expect(configs[0].Type).To(Equal(virtv2.NetworksTypeNetwork)) + Expect(configs[0].Type).To(Equal(v1alpha2.NetworksTypeNetwork)) Expect(configs[0].Name).To(Equal("mynet")) Expect(configs[0].InterfaceName).To(HavePrefix("veth_n")) }) It("should generate correct interface name for ClusterNetwork type", func() { - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeClusterNetwork, + Type: v1alpha2.NetworksTypeClusterNetwork, Name: "clusternet", }, } @@ -110,22 +110,22 @@ var _ = Describe("Network Config Generation", func() { configs := CreateNetworkSpec(vm, vmmacs) Expect(configs).To(HaveLen(1)) - Expect(configs[0].Type).To(Equal(virtv2.NetworksTypeClusterNetwork)) + Expect(configs[0].Type).To(Equal(v1alpha2.NetworksTypeClusterNetwork)) Expect(configs[0].Name).To(Equal("clusternet")) Expect(configs[0].InterfaceName).To(HavePrefix("veth_cn")) }) It("should generate unique names for different networks", func() { - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "net1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "net1", }, } @@ -137,12 +137,12 @@ var _ = Describe("Network Config Generation", func() { }) It("should preserve MAC order for existing networks and assign free MAC to new network", func() { - vm.Status.Networks = []virtv2.NetworksStatus{ + vm.Status.Networks = []v1alpha2.NetworksStatus{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", MAC: "00:1A:2B:3C:4D:5E", }, @@ -156,30 +156,30 @@ var _ = Describe("Network Config Generation", func() { }, } - vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac3 := newMACAddress("mac3", "00:1A:2B:3C:4D:6A", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac4 := newMACAddress("mac4", "00:1A:2B:3C:4D:7F", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac3 := newMACAddress("mac3", "00:1A:2B:3C:4D:6A", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac4 := newMACAddress("mac4", "00:1A:2B:3C:4D:7F", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") vmmacs = append(vmmacs, vmmac1, vmmac2, vmmac3, vmmac4) - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name2", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, } @@ -202,12 +202,12 @@ var _ = Describe("Network Config Generation", func() { }) It("should preserve MAC order when delete network", func() { - vm.Status.Networks = []virtv2.NetworksStatus{ + vm.Status.Networks = []v1alpha2.NetworksStatus{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", MAC: "00:1A:2B:3C:4D:5E", }, @@ -216,7 +216,7 @@ var _ = Describe("Network Config Generation", func() { MAC: "00:1A:2B:3C:4D:5F", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name2", MAC: "00:1A:2B:3C:4D:7F", }, @@ -226,26 +226,26 @@ var _ = Describe("Network Config Generation", func() { }, } - vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac3 := newMACAddress("mac3", "00:1A:2B:3C:4D:6A", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") - vmmac4 := newMACAddress("mac4", "00:1A:2B:3C:4D:7F", virtv2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac1 := newMACAddress("mac1", "00:1A:2B:3C:4D:5E", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac2 := newMACAddress("mac2", "00:1A:2B:3C:4D:5F", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac3 := newMACAddress("mac3", "00:1A:2B:3C:4D:6A", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") + vmmac4 := newMACAddress("mac4", "00:1A:2B:3C:4D:7F", v1alpha2.VirtualMachineMACAddressPhaseAttached, "vm1") vmmacs = append(vmmacs, vmmac1, vmmac2, vmmac3, vmmac4) - vm.Spec.Networks = []virtv2.NetworksSpec{ + vm.Spec.Networks = []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "name1", }, } diff --git a/images/virtualization-artifact/pkg/common/steptaker/runner.go b/images/virtualization-artifact/pkg/common/steptaker/runner.go index fa015bbe43..cc8ed7e6eb 100644 --- a/images/virtualization-artifact/pkg/common/steptaker/runner.go +++ b/images/virtualization-artifact/pkg/common/steptaker/runner.go @@ -22,11 +22,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Resource interface { - *virtv2.VirtualDisk | *virtv2.VirtualImage | *virtv2.VirtualMachineIPAddress | *virtv2.VirtualMachineMACAddress | *virtv2.VirtualMachineOperation + *v1alpha2.VirtualDisk | *v1alpha2.VirtualImage | *v1alpha2.VirtualMachineIPAddress | *v1alpha2.VirtualMachineMACAddress | *v1alpha2.VirtualMachineOperation } type StepTaker[R Resource] interface { diff --git a/images/virtualization-artifact/pkg/common/testutil/testutil.go b/images/virtualization-artifact/pkg/common/testutil/testutil.go index 3d29063a63..05fa961d64 100644 --- a/images/virtualization-artifact/pkg/common/testutil/testutil.go +++ b/images/virtualization-artifact/pkg/common/testutil/testutil.go @@ -32,13 +32,13 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewFakeClientWithObjects(objs ...client.Object) (client.WithWatch, error) { scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, cdiv1.AddToScheme, clientgoscheme.AddToScheme, @@ -66,7 +66,7 @@ func NewFakeClientWithObjects(objs ...client.Object) (client.WithWatch, error) { func NewFakeClientWithInterceptorWithObjects(interceptor interceptor.Funcs, objs ...client.Object) (client.WithWatch, error) { scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, cdiv1.AddToScheme, clientgoscheme.AddToScheme, diff --git a/images/virtualization-artifact/pkg/common/vm/vm.go b/images/virtualization-artifact/pkg/common/vm/vm.go index cf3c996ae5..86b3f42593 100644 --- a/images/virtualization-artifact/pkg/common/vm/vm.go +++ b/images/virtualization-artifact/pkg/common/vm/vm.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -63,14 +63,14 @@ func CalculateCoresAndSockets(desiredCores int) (sockets, coresPerSocket int) { return sockets, coresPerSocket } -func ApprovalMode(vm *virtv2.VirtualMachine) virtv2.RestartApprovalMode { +func ApprovalMode(vm *v1alpha2.VirtualMachine) v1alpha2.RestartApprovalMode { if vm.Spec.Disruptions == nil { - return virtv2.Manual + return v1alpha2.Manual } return vm.Spec.Disruptions.RestartApprovalMode } -func RestartRequired(vm *virtv2.VirtualMachine) bool { +func RestartRequired(vm *v1alpha2.VirtualMachine) bool { if vm == nil { return false } diff --git a/images/virtualization-artifact/pkg/common/vmop/vmop.go b/images/virtualization-artifact/pkg/common/vmop/vmop.go index 882d51570d..3cac1a3d9d 100644 --- a/images/virtualization-artifact/pkg/common/vmop/vmop.go +++ b/images/virtualization-artifact/pkg/common/vmop/vmop.go @@ -17,7 +17,11 @@ limitations under the License. package vmop import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) func IsInProgressOrPending(vmop *v1alpha2.VirtualMachineOperation) bool { @@ -44,3 +48,8 @@ func InProgressOrPendingExists(vmops []v1alpha2.VirtualMachineOperation) bool { } return false } + +func IsOperationInProgress(vmop *v1alpha2.VirtualMachineOperation) bool { + sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) + return sent.Status == metav1.ConditionTrue && !IsFinished(vmop) +} diff --git a/images/virtualization-artifact/pkg/controller/conditions/builder.go b/images/virtualization-artifact/pkg/controller/conditions/builder.go index 4ffbf9edaf..8fdefacd44 100644 --- a/images/virtualization-artifact/pkg/controller/conditions/builder.go +++ b/images/virtualization-artifact/pkg/controller/conditions/builder.go @@ -66,6 +66,8 @@ func SetCondition(c Conder, conditions *[]metav1.Condition) { if !newCondition.LastTransitionTime.IsZero() && newCondition.LastTransitionTime.After(existingCondition.LastTransitionTime.Time) { existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } else { + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } } diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go index c7f4bbb370..cd4631c642 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_controller.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" cvicollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/cvi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -46,7 +46,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error + Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error } func NewController( @@ -60,17 +60,17 @@ func NewController( ns string, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerCVIProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerCVIProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) disk := service.NewDiskService(mgr.GetClient(), dvcr, protection, ControllerName) recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, ns)) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), ns)) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient(), ns)) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, ns)) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, ns)) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), ns)) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient(), ns)) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, ns)) reconciler := NewReconciler( mgr.GetClient(), @@ -96,7 +96,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.ClusterVirtualImage{}). + For(&v1alpha2.ClusterVirtualImage{}). WithValidator(NewValidator(log)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go index 7898b2ac50..a568441588 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_reconciler.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -41,7 +41,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) + Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) } type Reconciler struct { @@ -85,10 +85,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.ClusterVirtualImage{}, - &handler.TypedEnqueueRequestForObject[*virtv2.ClusterVirtualImage]{}, - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + &v1alpha2.ClusterVirtualImage{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.ClusterVirtualImage]{}, + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, @@ -109,13 +109,13 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr } } - cviFromVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.ClusterVirtualImageObjectRefKindVirtualImage) + cviFromVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), cviFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - cviFromCVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage) + cviFromCVIEnqueuer := watchers.NewClusterVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), cviFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -124,10 +124,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.ClusterVirtualImage { - return &virtv2.ClusterVirtualImage{} +func (r *Reconciler) factory() *v1alpha2.ClusterVirtualImage { + return &v1alpha2.ClusterVirtualImage{} } -func (r *Reconciler) statusGetter(obj *virtv2.ClusterVirtualImage) virtv2.ClusterVirtualImageStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.ClusterVirtualImage) v1alpha2.ClusterVirtualImageStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go b/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go index dc48e9b882..f85200f208 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go +++ b/images/virtualization-artifact/pkg/controller/cvi/cvi_webhook.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/validate" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -43,7 +43,7 @@ func NewValidator(logger *log.Logger) *Validator { } func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - cvi, ok := obj.(*virtv2.ClusterVirtualImage) + cvi, ok := obj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected a new ClusterVirtualImage but got a %T", obj) } @@ -60,12 +60,12 @@ func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) (admis } func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldCVI, ok := oldObj.(*virtv2.ClusterVirtualImage) + oldCVI, ok := oldObj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected an old ClusterVirtualImage but got a %T", newObj) } - newCVI, ok := newObj.(*virtv2.ClusterVirtualImage) + newCVI, ok := newObj.(*v1alpha2.ClusterVirtualImage) if !ok { return nil, fmt.Errorf("expected a new ClusterVirtualImage but got a %T", newObj) } @@ -79,7 +79,7 @@ func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Obj } ready, _ := conditions.GetCondition(cvicondition.ReadyType, newCVI.Status.Conditions) - if newCVI.Status.Phase == virtv2.ImageReady || ready.Status == metav1.ConditionTrue { + if newCVI.Status.Phase == v1alpha2.ImageReady || ready.Status == metav1.ConditionTrue { return nil, fmt.Errorf("ClusterVirtualImage is in a Ready state: configuration changes are not available") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go b/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go index 6668de0150..3f59de235c 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/attachee.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttacheeHandler struct { @@ -38,7 +38,7 @@ func NewAttacheeHandler(client client.Client) *AttacheeHandler { } } -func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h AttacheeHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("attachee")) hasAttachedVM, err := h.hasAttachedVM(ctx, cvi) @@ -49,10 +49,10 @@ func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI switch { case !hasAttachedVM: log.Debug("Allow cluster virtual image deletion") - controllerutil.RemoveFinalizer(cvi, virtv2.FinalizerCVIProtection) + controllerutil.RemoveFinalizer(cvi, v1alpha2.FinalizerCVIProtection) case cvi.DeletionTimestamp == nil: log.Debug("Protect cluster virtual image from deletion") - controllerutil.AddFinalizer(cvi, virtv2.FinalizerCVIProtection) + controllerutil.AddFinalizer(cvi, v1alpha2.FinalizerCVIProtection) default: log.Debug("Cluster virtual image deletion is delayed: it's protected by virtual machines") } @@ -61,7 +61,7 @@ func (h AttacheeHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI } func (h AttacheeHandler) hasAttachedVM(ctx context.Context, cvi client.Object) (bool, error) { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{}) if err != nil { return false, fmt.Errorf("error getting virtual machines: %w", err) @@ -76,9 +76,9 @@ func (h AttacheeHandler) hasAttachedVM(ctx context.Context, cvi client.Object) ( return false, nil } -func (h AttacheeHandler) isCVIAttachedToVM(cviName string, vm virtv2.VirtualMachine) bool { +func (h AttacheeHandler) isCVIAttachedToVM(cviName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ClusterImageDevice && bda.Name == cviName { + if bda.Kind == v1alpha2.ClusterImageDevice && bda.Name == cviName { return true } } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go index c87e55f72c..4969630ee1 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/datasource_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -41,7 +41,7 @@ func NewDatasourceReadyHandler(sources *source.Sources) *DatasourceReadyHandler } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(cvicondition.DatasourceReadyType).Generation(cvi.Generation) defer func() { conditions.SetCondition(cb, &cvi.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go b/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go index a3f90a5303..80c94dfb71 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/deletion.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -40,7 +40,7 @@ func NewDeletionHandler(sources *source.Sources) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if cvi.DeletionTimestamp != nil { @@ -54,10 +54,10 @@ func (h DeletionHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualI } log.Info("Deletion observed: remove cleanup finalizer from ClusterVirtualImage") - controllerutil.RemoveFinalizer(cvi, virtv2.FinalizerCVICleanup) + controllerutil.RemoveFinalizer(cvi, v1alpha2.FinalizerCVICleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(cvi, virtv2.FinalizerCVICleanup) + controllerutil.AddFinalizer(cvi, v1alpha2.FinalizerCVICleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go index d3c281ef5d..5f89d827b0 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/life_cycle.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/cvi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -42,7 +42,7 @@ func NewLifeCycleHandler(sources *source.Sources, client client.Client) *LifeCyc } } -func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) if !ok { cb := conditions.NewConditionBuilder(cvicondition.ReadyType). @@ -55,12 +55,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtual } if cvi.DeletionTimestamp != nil { - cvi.Status.Phase = virtv2.ImageTerminating + cvi.Status.Phase = v1alpha2.ImageTerminating return reconcile.Result{}, nil } if cvi.Status.Phase == "" { - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending } dataSourceReadyCondition, exists := conditions.GetCondition(cvicondition.DatasourceReadyType, cvi.Status.Conditions) @@ -73,8 +73,8 @@ func (h LifeCycleHandler) Handle(ctx context.Context, cvi *virtv2.ClusterVirtual } if readyCondition.Status != metav1.ConditionTrue && h.sources.Changed(ctx, cvi) { - cvi.Status = virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImagePending, + cvi.Status = v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImagePending, Conditions: cvi.Status.Conditions, ObservedGeneration: cvi.Status.ObservedGeneration, } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go index 2f0b23e55e..5a8ba64cdf 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -66,7 +66,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -92,7 +92,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -107,14 +107,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import has started", ) @@ -126,14 +126,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb.Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). Message("DVCR Provisioner not found: create the new one.") @@ -144,11 +144,11 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) @@ -161,7 +161,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) @@ -169,7 +169,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -181,7 +181,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -190,7 +190,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) @@ -209,7 +209,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) cvi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(cvi.GetUID(), pod) @@ -220,7 +220,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) requeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -231,11 +231,11 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtual return requeue, nil } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualImage) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.ClusterVirtualImage) error { return nil } -func (ds HTTPDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, cvi.Spec.DataSource.HTTP, supgen) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go index 97a7c94a62..09cc02d606 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Importer Uploader Stat @@ -62,9 +62,9 @@ type Uploader interface { type Stat interface { GetFormat(pod *corev1.Pod) string GetCDROM(pod *corev1.Pod) bool - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string - GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed GetProgress(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string IsUploaderReady(pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) bool IsUploadStarted(ownerUID types.UID, pod *corev1.Pod) bool diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go index 56d87ea1bd..270a719a89 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go @@ -10,7 +10,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1090,7 +1090,7 @@ var _ Stat = &StatMock{} // GetDVCRImageNameFunc: func(pod *corev1.Pod) string { // panic("mock out the GetDVCRImageName method") // }, -// GetDownloadSpeedFunc: func(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +// GetDownloadSpeedFunc: func(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { // panic("mock out the GetDownloadSpeed method") // }, // GetFormatFunc: func(pod *corev1.Pod) string { @@ -1099,7 +1099,7 @@ var _ Stat = &StatMock{} // GetProgressFunc: func(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string { // panic("mock out the GetProgress method") // }, -// GetSizeFunc: func(pod *corev1.Pod) virtv2.ImageStatusSize { +// GetSizeFunc: func(pod *corev1.Pod) v1alpha2.ImageStatusSize { // panic("mock out the GetSize method") // }, // IsUploadStartedFunc: func(ownerUID types.UID, pod *corev1.Pod) bool { @@ -1125,7 +1125,7 @@ type StatMock struct { GetDVCRImageNameFunc func(pod *corev1.Pod) string // GetDownloadSpeedFunc mocks the GetDownloadSpeed method. - GetDownloadSpeedFunc func(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeedFunc func(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed // GetFormatFunc mocks the GetFormat method. GetFormatFunc func(pod *corev1.Pod) string @@ -1134,7 +1134,7 @@ type StatMock struct { GetProgressFunc func(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string // GetSizeFunc mocks the GetSize method. - GetSizeFunc func(pod *corev1.Pod) virtv2.ImageStatusSize + GetSizeFunc func(pod *corev1.Pod) v1alpha2.ImageStatusSize // IsUploadStartedFunc mocks the IsUploadStarted method. IsUploadStartedFunc func(ownerUID types.UID, pod *corev1.Pod) bool @@ -1312,7 +1312,7 @@ func (mock *StatMock) GetDVCRImageNameCalls() []struct { } // GetDownloadSpeed calls GetDownloadSpeedFunc. -func (mock *StatMock) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +func (mock *StatMock) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { if mock.GetDownloadSpeedFunc == nil { panic("StatMock.GetDownloadSpeedFunc: method is nil but Stat.GetDownloadSpeed was just called") } @@ -1424,7 +1424,7 @@ func (mock *StatMock) GetProgressCalls() []struct { } // GetSize calls GetSizeFunc. -func (mock *StatMock) GetSize(pod *corev1.Pod) virtv2.ImageStatusSize { +func (mock *StatMock) GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize { if mock.GetSizeFunc == nil { panic("StatMock.GetSizeFunc: method is nil but Stat.GetSize was just called") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go index f6f36249f1..466c875aa4 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -83,7 +83,7 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -96,9 +96,9 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu }() switch cvi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vi, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + vi, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -107,12 +107,12 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{}, fmt.Errorf("VI object ref source %s is nil", cvi.Spec.DataSource.ObjectRef.Name) } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viOnPvcSyncer.Sync(ctx, cvi, vi, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: vdKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vd, err := object.FetchObject(ctx, vdKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, vdKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", vdKey, err) } @@ -123,9 +123,9 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return ds.vdSyncer.Sync(ctx, cvi, vd, cb) - case virtv2.VirtualDiskSnapshotKind: + case v1alpha2.VirtualDiskSnapshotKind: vdSnapshotKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VDSnapshot %s: %w", vdSnapshotKey, err) } @@ -152,7 +152,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -167,14 +167,14 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) var dvcrDataSource controller.DVCRDataSource @@ -194,14 +194,14 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -213,11 +213,11 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -233,7 +233,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady var dvcrDataSource controller.DVCRDataSource dvcrDataSource, err = controller.NewDVCRDataSourcesForCVMI(ctx, cvi.Spec.DataSource, ds.client) @@ -259,7 +259,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -269,7 +269,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -285,7 +285,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) log.Info("Ready", "progress", cvi.Status.Progress, "pod.phase", pod.Status.Phase) @@ -294,7 +294,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { viRefResult, err := ds.viOnPvcSyncer.CleanUp(ctx, cvi) if err != nil { return false, err @@ -315,15 +315,15 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVi return viRefResult || vdRefResult || objRefRequeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { if cvi.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("nil object ref: %s", cvi.Spec.DataSource.Type) } switch cvi.Spec.DataSource.ObjectRef.Kind { - case virtv2.ClusterVirtualImageObjectRefKindVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage: viKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vi, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + vi, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -332,8 +332,8 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { - if vi.Status.Phase != virtv2.ImageReady { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { + if vi.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } return nil @@ -349,7 +349,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return NewImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) - case virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForCVMI(ctx, cvi.Spec.DataSource, ds.client) if err != nil { return err @@ -360,9 +360,9 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return NewClusterImageNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualDiskSnapshotKind: + case v1alpha2.VirtualDiskSnapshotKind: vdSnapshotKey := types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return fmt.Errorf("unable to get VDSnapshot %s: %w", vdSnapshotKey, err) } @@ -372,14 +372,14 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterV } return ds.vdSnapshotSyncer.Validate(ctx, cvi) - case virtv2.ClusterVirtualImageObjectRefKindVirtualDisk: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk: return ds.vdSyncer.Validate(ctx, cvi) default: return fmt.Errorf("unexpected object ref kind: %s", cvi.Spec.DataSource.ObjectRef.Kind) } } -func (ds ObjectRefDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 3c67e1bed4..33f590be4c 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -40,7 +40,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -65,7 +65,7 @@ func NewObjectRefVirtualDisk(recorder eventrecord.EventRecorderLogger, importerS } } -func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, vdRef.Namespace, cvi.UID) @@ -84,7 +84,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -98,14 +98,14 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) @@ -120,14 +120,14 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -139,11 +139,11 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -159,7 +159,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -170,7 +170,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -180,7 +180,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -201,7 +201,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -211,11 +211,11 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *virtv2.ClusterVirt return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -228,20 +228,20 @@ func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *virtv2.ClusterVirtualImage, s return &settings } -func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDisk { - return fmt.Errorf("not a %s data source", virtv2.ClusterVirtualImageObjectRefKindVirtualDisk) +func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk { + return fmt.Errorf("not a %s data source", v1alpha2.ClusterVirtualImageObjectRefKindVirtualDisk) } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace}, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: cvi.Spec.DataSource.ObjectRef.Name, Namespace: cvi.Spec.DataSource.ObjectRef.Namespace}, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } - if vd == nil || vd.Status.Phase != virtv2.DiskReady { + if vd == nil || vd.Status.Phase != v1alpha2.DiskReady { return NewVirtualDiskNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } - if cvi.Status.Phase != virtv2.ImageReady { + if cvi.Status.Phase != v1alpha2.ImageReady { inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) if inUseCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(inUseCondition, vd) { return NewVirtualDiskNotReadyForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go index 1ac44b4504..6c81e18a8c 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go @@ -42,7 +42,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -76,7 +76,7 @@ func NewObjectRefVirtualDiskSnapshot( } } -func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, vdSnapshotRef *virtv2.VirtualDiskSnapshot, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, vdSnapshotRef *v1alpha2.VirtualDiskSnapshot, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, vdSnapshotRef.Namespace, cvi.UID) @@ -105,7 +105,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -114,7 +114,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return ds.CleanUpSupplements(ctx, cvi) case object.AnyTerminating(pod, pvc): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionTrue). @@ -126,7 +126,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -193,7 +193,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -216,14 +216,14 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -235,11 +235,11 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -255,7 +255,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -266,12 +266,12 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): if strings.Contains(err.Error(), "pod has unbound immediate PersistentVolumeClaims") { - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -286,7 +286,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -307,7 +307,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -317,7 +317,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *virtv2.Clu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, cvi.Spec.DataSource.ObjectRef.Namespace, cvi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -342,7 +342,7 @@ func (ds ObjectRefVirtualDiskSnapshot) CleanUpSupplements(ctx context.Context, c } } -func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, cvi.Spec.DataSource.ObjectRef.Namespace, cvi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,7 +358,7 @@ func (ds ObjectRefVirtualDiskSnapshot) CleanUp(ctx context.Context, cvi *virtv2. return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -371,9 +371,9 @@ func (ds ObjectRefVirtualDiskSnapshot) getEnvSettings(cvi *virtv2.ClusterVirtual return &settings } -func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { - return fmt.Errorf("not a %s data source", virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot) +func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + return fmt.Errorf("not a %s data source", v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot) } vdSnapshot, err := ds.diskService.GetVirtualDiskSnapshot(ctx, cvi.Spec.DataSource.ObjectRef.Name, cvi.Spec.DataSource.ObjectRef.Namespace) @@ -381,7 +381,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, cvi *virtv2 return err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(cvi.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go index 4a8d243abc..0eee3e9f81 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -60,7 +60,7 @@ func NewObjectRefVirtualImageOnPvc(recorder eventrecord.EventRecorderLogger, imp } } -func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, viRef.Namespace, cvi.UID) @@ -78,7 +78,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -92,14 +92,14 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) @@ -114,14 +114,14 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -133,11 +133,11 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -153,7 +153,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = viRef.Status.Size cvi.Status.CDROM = viRef.Status.CDROM cvi.Status.Format = viRef.Status.Format @@ -164,7 +164,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -174,7 +174,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -195,7 +195,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -205,11 +205,11 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *virtv2.Clust return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { return ds.importerService.DeletePod(ctx, cvi, controllerName) } -func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *virtv2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go index 27341c0f45..f2fded59c7 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go @@ -40,7 +40,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -71,7 +71,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "registry") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -98,7 +98,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. err = ds.importerService.Unprotect(ctx, pod) @@ -113,14 +113,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua return reconcile.Result{}, nil case object.IsTerminating(pod): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import has started", ) cvi.Status.Progress = "0%" @@ -131,14 +131,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -150,11 +150,11 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -168,7 +168,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) @@ -177,7 +177,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -188,7 +188,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua default: err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -198,7 +198,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -214,7 +214,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = "0%" cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -224,13 +224,13 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) return ds.importerService.CleanUp(ctx, supgen) } -func (ds RegistryDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error { +func (ds RegistryDataSource) Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error { if cvi.Spec.DataSource.ContainerImage.ImagePullSecret.Name != "" { secretName := types.NamespacedName{ Namespace: cvi.Spec.DataSource.ContainerImage.ImagePullSecret.Namespace, @@ -249,7 +249,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, cvi *virtv2.ClusterVi return nil } -func (ds RegistryDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go index 631a115902..744b915c9a 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/sources.go @@ -27,40 +27,40 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) type Handler interface { - Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) - CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) - Validate(ctx context.Context, cvi *virtv2.ClusterVirtualImage) error + Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) + Validate(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) error } type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) Get(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) Get(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, cvi *virtv2.ClusterVirtualImage) bool { +func (s Sources) Changed(_ context.Context, cvi *v1alpha2.ClusterVirtualImage) bool { return cvi.Generation != cvi.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { var requeue bool for _, source := range s.sources { @@ -76,10 +76,10 @@ func (s Sources) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) ( } type Cleaner interface { - CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) + CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) } -func CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage, c Cleaner) (bool, error) { +func CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage, c Cleaner) (bool, error) { if object.ShouldCleanupSubResources(cvi) { return c.CleanUp(ctx, cvi) } @@ -93,8 +93,8 @@ func isDiskProvisioningFinished(c metav1.Condition) bool { const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.ImageFailed cb.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed) @@ -107,8 +107,8 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cbReady *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cbReady *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cbReady.Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error())) diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go index bd0009ede8..ece31eb6ed 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -66,7 +66,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "upload") condition, _ := conditions.GetCondition(cvicondition.ReadyType, cvi.Status.Conditions) @@ -101,7 +101,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady // Unprotect upload time supplements to delete them later. err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) @@ -116,7 +116,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI return reconcile.Result{}, nil case object.AnyTerminating(pod, svc, ing): - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil || svc == nil || ing == nil: @@ -126,14 +126,14 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &cvi.Status.Phase, err, cvi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &cvi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(cvicondition.Provisioning). @@ -145,11 +145,11 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -163,7 +163,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI ds.recorder.Event( cvi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) @@ -172,7 +172,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Ready). Message("") - cvi.Status.Phase = virtv2.ImageReady + cvi.Status.Phase = v1alpha2.ImageReady cvi.Status.Size = ds.statService.GetSize(pod) cvi.Status.CDROM = ds.statService.GetCDROM(pod) cvi.Status.Format = ds.statService.GetFormat(pod) @@ -184,7 +184,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI case ds.statService.IsUploadStarted(cvi.GetUID(), pod): err = ds.statService.CheckPod(pod) if err != nil { - cvi.Status.Phase = virtv2.ImageFailed + cvi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -194,7 +194,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(cvi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(cvi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(cvicondition.ProvisioningFailed). @@ -210,7 +210,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - cvi.Status.Phase = virtv2.ImageProvisioning + cvi.Status.Phase = v1alpha2.ImageProvisioning cvi.Status.Progress = ds.statService.GetProgress(cvi.GetUID(), pod, cvi.Status.Progress) cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) cvi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(cvi.GetUID(), pod) @@ -227,9 +227,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.WaitForUserUpload). Message("Waiting for the user upload.") - cvi.Status.Phase = virtv2.ImageWaitForUserUpload + cvi.Status.Phase = v1alpha2.ImageWaitForUserUpload cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) - cvi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + cvi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } @@ -241,7 +241,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI Reason(cvicondition.ProvisioningNotStarted). Message(fmt.Sprintf("Waiting for the uploader %q to be ready to process the user's upload.", pod.Name)) - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) } @@ -249,17 +249,17 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *virtv2.ClusterVirtualI return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) return ds.uploaderService.CleanUp(ctx, supgen) } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.ClusterVirtualImage) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.ClusterVirtualImage) error { return nil } -func (ds UploadDataSource) getEnvSettings(cvi *virtv2.ClusterVirtualImage, supgen supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go index f026d4af2a..58591441ee 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/vdsnapshot_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -45,17 +45,17 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -66,8 +66,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var cvis virtv2.ClusterVirtualImageList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var cvis v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cvis, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldCVIByVDSnapshot, types.NamespacedName{ Namespace: vdSnapshot.Namespace, @@ -95,12 +95,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds virtv2.ClusterVirtualImageDataSource, vdSnapshot metav1.Object) bool { - if ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds v1alpha2.ClusterVirtualImageDataSource, vdSnapshot metav1.Object) bool { + if ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go index 13f543ed37..a47aa9751b 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualdisk_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -50,10 +50,10 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDisk{}, + &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequestsFromVDs), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) @@ -71,8 +71,8 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var cviList virtv2.ClusterVirtualImageList +func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var cviList v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cviList, &client.ListOptions{}) if err != nil { slog.Default().Error(fmt.Sprintf("failed to list cvi: %s", err)) @@ -80,11 +80,11 @@ func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *vir } for _, cvi := range cviList.Items { - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || cvi.Spec.DataSource.ObjectRef == nil { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || cvi.Spec.DataSource.ObjectRef == nil { continue } - if cvi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskKind || cvi.Spec.DataSource.ObjectRef.Name != vd.GetName() && cvi.Spec.DataSource.ObjectRef.Namespace != vd.GetNamespace() { + if cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskKind || cvi.Spec.DataSource.ObjectRef.Name != vd.GetName() && cvi.Spec.DataSource.ObjectRef.Namespace != vd.GetNamespace() { continue } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go index 8c37696d9d..aaf2c5a5aa 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/watcher/virtualmachine_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct{} @@ -41,16 +41,16 @@ func NewVirtualMachineWatcher() *VirtualMachineWatcher { func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( source.Kind(mgr.GetCache(), - &virtv2.VirtualMachine{}, + &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueClusterImagesAttachedToVM), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedClusterImages(e.ObjectOld) || w.vmHasAttachedClusterImages(e.ObjectNew) }, }, @@ -61,11 +61,11 @@ func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Contro return nil } -func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind != virtv2.ClusterImageDevice { + if bda.Kind != v1alpha2.ClusterImageDevice { continue } @@ -77,9 +77,9 @@ func (w *VirtualMachineWatcher) enqueueClusterImagesAttachedToVM(_ context.Conte return requests } -func (w *VirtualMachineWatcher) vmHasAttachedClusterImages(vm *virtv2.VirtualMachine) bool { +func (w *VirtualMachineWatcher) vmHasAttachedClusterImages(vm *v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ClusterImageDevice { + if bda.Kind == v1alpha2.ClusterImageDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/dvcr_data_source.go b/images/virtualization-artifact/pkg/controller/dvcr_data_source.go index d74970833f..5def1931d2 100644 --- a/images/virtualization-artifact/pkg/controller/dvcr_data_source.go +++ b/images/virtualization-artifact/pkg/controller/dvcr_data_source.go @@ -26,11 +26,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/imageformat" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DVCRDataSource struct { - size virtv2.ImageStatusSize + size v1alpha2.ImageStatusSize meta metav1.Object uid types.UID format string @@ -38,7 +38,7 @@ type DVCRDataSource struct { isReady bool } -func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImageDataSource, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForCVMI(ctx context.Context, ds v1alpha2.ClusterVirtualImageDataSource, client client.Client) (DVCRDataSource, error) { if ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -46,11 +46,11 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.ClusterVirtualImageObjectRefKindVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := ds.ObjectRef.Namespace if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -60,14 +60,14 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.ClusterVirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.ClusterVirtualImageObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -77,7 +77,7 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -86,7 +86,7 @@ func NewDVCRDataSourcesForCVMI(ctx context.Context, ds virtv2.ClusterVirtualImag return dsDVCR, nil } -func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForVMI(ctx context.Context, ds v1alpha2.VirtualImageDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { if ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -94,17 +94,17 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.VirtualImageObjectRefKindVirtualImage: + case v1alpha2.VirtualImageObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := obj.GetNamespace() if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } if vmi != nil { - if vmi.Spec.Storage == virtv2.StorageKubernetes || vmi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vmi.Spec.Storage == v1alpha2.StorageKubernetes || vmi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return DVCRDataSource{}, fmt.Errorf("the DVCR not used for virtual images with storage type '%s'", vmi.Spec.Storage) } @@ -112,14 +112,14 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.VirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualImageObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -129,7 +129,7 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -138,7 +138,7 @@ func NewDVCRDataSourcesForVMI(ctx context.Context, ds virtv2.VirtualImageDataSou return dsDVCR, nil } -func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { +func NewDVCRDataSourcesForVMD(ctx context.Context, ds *v1alpha2.VirtualDiskDataSource, obj metav1.Object, client client.Client) (DVCRDataSource, error) { if ds == nil || ds.ObjectRef == nil { return DVCRDataSource{}, nil } @@ -146,11 +146,11 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou var dsDVCR DVCRDataSource switch ds.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage: vmiName := ds.ObjectRef.Name vmiNS := obj.GetNamespace() if vmiName != "" && vmiNS != "" { - vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &virtv2.VirtualImage{}) + vmi, err := object.FetchObject(ctx, types.NamespacedName{Name: vmiName, Namespace: vmiNS}, client, &v1alpha2.VirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -160,14 +160,14 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou dsDVCR.size = vmi.Status.Size dsDVCR.format = vmi.Status.Format dsDVCR.meta = vmi.GetObjectMeta() - dsDVCR.isReady = vmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = vmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = vmi.Status.Target.RegistryURL } } - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: cvmiName := ds.ObjectRef.Name if cvmiName != "" { - cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &virtv2.ClusterVirtualImage{}) + cvmi, err := object.FetchObject(ctx, types.NamespacedName{Name: cvmiName}, client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return DVCRDataSource{}, err } @@ -177,7 +177,7 @@ func NewDVCRDataSourcesForVMD(ctx context.Context, ds *virtv2.VirtualDiskDataSou dsDVCR.size = cvmi.Status.Size dsDVCR.meta = cvmi.GetObjectMeta() dsDVCR.format = cvmi.Status.Format - dsDVCR.isReady = cvmi.Status.Phase == virtv2.ImageReady + dsDVCR.isReady = cvmi.Status.Phase == v1alpha2.ImageReady dsDVCR.target = cvmi.Status.Target.RegistryURL } } @@ -198,7 +198,7 @@ func (ds *DVCRDataSource) GetUID() types.UID { return ds.uid } -func (ds *DVCRDataSource) GetSize() virtv2.ImageStatusSize { +func (ds *DVCRDataSource) GetSize() v1alpha2.ImageStatusSize { return ds.size } diff --git a/images/virtualization-artifact/pkg/controller/importer/settings.go b/images/virtualization-artifact/pkg/controller/importer/settings.go index 6e01588a5c..8a312f65b7 100644 --- a/images/virtualization-artifact/pkg/controller/importer/settings.go +++ b/images/virtualization-artifact/pkg/controller/importer/settings.go @@ -21,7 +21,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/datasource" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" - virtv2alpha1 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -77,7 +77,7 @@ func ApplyDVCRDestinationSettings(podEnvVars *Settings, dvcrSettings *dvcr.Setti } // ApplyHTTPSourceSettings updates importer Pod settings to use http source. -func ApplyHTTPSourceSettings(podEnvVars *Settings, http *virtv2alpha1.DataSourceHTTP, supGen supplements.Generator) { +func ApplyHTTPSourceSettings(podEnvVars *Settings, http *v1alpha2.DataSourceHTTP, supGen supplements.Generator) { podEnvVars.Source = SourceHTTP podEnvVars.Endpoint = http.URL diff --git a/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go index d0c764fce3..f5c4dfa630 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/cvi_indexer.go @@ -20,21 +20,21 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexCVIByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.ClusterVirtualImage{}, IndexFieldCVIByVDSnapshot, func(object client.Object) []string { - cvi, ok := object.(*virtv2.ClusterVirtualImage) + return &v1alpha2.ClusterVirtualImage{}, IndexFieldCVIByVDSnapshot, func(object client.Object) []string { + cvi, ok := object.(*v1alpha2.ClusterVirtualImage) if !ok || cvi == nil { return nil } - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != virtv2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { + if cvi.Spec.DataSource.ObjectRef == nil || cvi.Spec.DataSource.ObjectRef.Kind != v1alpha2.ClusterVirtualImageObjectRefKindVirtualDiskSnapshot { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/indexer.go b/images/virtualization-artifact/pkg/controller/indexer/indexer.go index bc24be429f..0e3a302269 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/indexer.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -94,8 +94,8 @@ func IndexALL(ctx context.Context, mgr manager.Manager) error { } func IndexVMByClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByClass, func(object client.Object) []string { - vm, ok := object.(*virtv2.VirtualMachine) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByClass, func(object client.Object) []string { + vm, ok := object.(*v1alpha2.VirtualMachine) if !ok || vm == nil { return nil } @@ -104,26 +104,26 @@ func IndexVMByClass() (obj client.Object, field string, extractValue client.Inde } func IndexVMByVD() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByVD, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.DiskDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByVD, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.DiskDevice) } } func IndexVMByVI() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByVI, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.ImageDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByVI, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.ImageDevice) } } func IndexVMByCVI() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByCVI, func(object client.Object) []string { - return getBlockDeviceNamesByKind(object, virtv2.ClusterImageDevice) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByCVI, func(object client.Object) []string { + return getBlockDeviceNamesByKind(object, v1alpha2.ClusterImageDevice) } } func IndexVMByNode() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachine{}, IndexFieldVMByNode, func(object client.Object) []string { - vm, ok := object.(*virtv2.VirtualMachine) + return &v1alpha2.VirtualMachine{}, IndexFieldVMByNode, func(object client.Object) []string { + vm, ok := object.(*v1alpha2.VirtualMachine) if !ok || vm == nil || vm.Status.Node == "" { return nil } @@ -131,8 +131,8 @@ func IndexVMByNode() (obj client.Object, field string, extractValue client.Index } } -func getBlockDeviceNamesByKind(obj client.Object, kind virtv2.BlockDeviceKind) []string { - vm, ok := obj.(*virtv2.VirtualMachine) +func getBlockDeviceNamesByKind(obj client.Object, kind v1alpha2.BlockDeviceKind) []string { + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok || vm == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go index 0a4e96d6e5..7c6c4a0d43 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vd_indexer.go @@ -19,21 +19,21 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVDByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualDisk{}, IndexFieldVDByVDSnapshot, func(object client.Object) []string { - vd, ok := object.(*virtv2.VirtualDisk) + return &v1alpha2.VirtualDisk{}, IndexFieldVDByVDSnapshot, func(object client.Object) []string { + vd, ok := object.(*v1alpha2.VirtualDisk) if !ok || vd == nil { return nil } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if vd.Spec.DataSource.ObjectRef == nil || vd.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot { + if vd.Spec.DataSource.ObjectRef == nil || vd.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot { return nil } @@ -42,8 +42,8 @@ func IndexVDByVDSnapshot() (obj client.Object, field string, extractValue client } func IndexVDByStorageClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualDisk{}, IndexFieldVDByStorageClass, func(object client.Object) []string { - vd, ok := object.(*virtv2.VirtualDisk) + return &v1alpha2.VirtualDisk{}, IndexFieldVDByStorageClass, func(object client.Object) []string { + vd, ok := object.(*v1alpha2.VirtualDisk) if !ok || vd == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go index a00f0c3ed2..b7682995b0 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vi_indexer.go @@ -19,21 +19,21 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVIByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualImage{}, IndexFieldVIByVDSnapshot, func(object client.Object) []string { - vi, ok := object.(*virtv2.VirtualImage) + return &v1alpha2.VirtualImage{}, IndexFieldVIByVDSnapshot, func(object client.Object) []string { + vi, ok := object.(*v1alpha2.VirtualImage) if !ok || vi == nil { return nil } - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { return nil } - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return nil } @@ -42,8 +42,8 @@ func IndexVIByVDSnapshot() (obj client.Object, field string, extractValue client } func IndexVIByStorageClass() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualImage{}, IndexFieldVIByStorageClass, func(object client.Object) []string { - vi, ok := object.(*virtv2.VirtualImage) + return &v1alpha2.VirtualImage{}, IndexFieldVIByStorageClass, func(object client.Object) []string { + vi, ok := object.(*v1alpha2.VirtualImage) if !ok || vi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go index 0bfb0b3ac9..d488b6c05f 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vm_restore_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMRestoreByVMSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineRestore{}, IndexFieldVMRestoreByVMSnapshot, func(object client.Object) []string { - vmRestore, ok := object.(*virtv2.VirtualMachineRestore) + return &v1alpha2.VirtualMachineRestore{}, IndexFieldVMRestoreByVMSnapshot, func(object client.Object) []string { + vmRestore, ok := object.(*v1alpha2.VirtualMachineRestore) if !ok || vmRestore == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go index 7098a20f74..939e20b650 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vm_snapshot_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMSnapshotByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVM, func(object client.Object) []string { - vmSnapshot, ok := object.(*virtv2.VirtualMachineSnapshot) + return &v1alpha2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVM, func(object client.Object) []string { + vmSnapshot, ok := object.(*v1alpha2.VirtualMachineSnapshot) if !ok || vmSnapshot == nil { return nil } @@ -34,8 +34,8 @@ func IndexVMSnapshotByVM() (obj client.Object, field string, extractValue client } func IndexVMSnapshotByVDSnapshot() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVDSnapshot, func(object client.Object) []string { - vmSnapshot, ok := object.(*virtv2.VirtualMachineSnapshot) + return &v1alpha2.VirtualMachineSnapshot{}, IndexFieldVMSnapshotByVDSnapshot, func(object client.Object) []string { + vmSnapshot, ok := object.(*v1alpha2.VirtualMachineSnapshot) if !ok || vmSnapshot == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go index 1f9a87f6f9..c72ff07de0 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmbda_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMBDAByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineBlockDeviceAttachment{}, IndexFieldVMBDAByVM, func(object client.Object) []string { - vmbda, ok := object.(*virtv2.VirtualMachineBlockDeviceAttachment) + return &v1alpha2.VirtualMachineBlockDeviceAttachment{}, IndexFieldVMBDAByVM, func(object client.Object) []string { + vmbda, ok := object.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok || vmbda == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go index f7f9aecbfb..44743ae9bb 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmip_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMIPByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineIPAddress{}, IndexFieldVMIPByVM, func(object client.Object) []string { - vmip, ok := object.(*virtv2.VirtualMachineIPAddress) + return &v1alpha2.VirtualMachineIPAddress{}, IndexFieldVMIPByVM, func(object client.Object) []string { + vmip, ok := object.(*v1alpha2.VirtualMachineIPAddress) if !ok || vmip == nil { return nil } @@ -35,7 +35,7 @@ func IndexVMIPByVM() (obj client.Object, field string, extractValue client.Index } for _, ownerRef := range vmip.OwnerReferences { - if ownerRef.Kind != virtv2.VirtualMachineKind { + if ownerRef.Kind != v1alpha2.VirtualMachineKind { continue } @@ -51,8 +51,8 @@ func IndexVMIPByVM() (obj client.Object, field string, extractValue client.Index } func IndexVMIPByAddress() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineIPAddress{}, IndexFieldVMIPByAddress, func(object client.Object) []string { - vmip, ok := object.(*virtv2.VirtualMachineIPAddress) + return &v1alpha2.VirtualMachineIPAddress{}, IndexFieldVMIPByAddress, func(object client.Object) []string { + vmip, ok := object.(*v1alpha2.VirtualMachineIPAddress) if !ok || vmip == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmmac_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmmac_indexer.go index 3544229f93..e5f17fba54 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmmac_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmmac_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMMACByVM() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineMACAddress{}, IndexFieldVMMACByVM, func(object client.Object) []string { - vmmac, ok := object.(*virtv2.VirtualMachineMACAddress) + return &v1alpha2.VirtualMachineMACAddress{}, IndexFieldVMMACByVM, func(object client.Object) []string { + vmmac, ok := object.(*v1alpha2.VirtualMachineMACAddress) if !ok || vmmac == nil { return nil } @@ -35,7 +35,7 @@ func IndexVMMACByVM() (obj client.Object, field string, extractValue client.Inde } for _, ownerRef := range vmmac.OwnerReferences { - if ownerRef.Kind != virtv2.VirtualMachineKind { + if ownerRef.Kind != v1alpha2.VirtualMachineKind { continue } @@ -51,8 +51,8 @@ func IndexVMMACByVM() (obj client.Object, field string, extractValue client.Inde } func IndexVMMACByAddress() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineMACAddress{}, IndexFieldVMMACByAddress, func(object client.Object) []string { - vmmac, ok := object.(*virtv2.VirtualMachineMACAddress) + return &v1alpha2.VirtualMachineMACAddress{}, IndexFieldVMMACByAddress, func(object client.Object) []string { + vmmac, ok := object.(*v1alpha2.VirtualMachineMACAddress) if !ok || vmmac == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/indexer/vmmaclease_indexer.go b/images/virtualization-artifact/pkg/controller/indexer/vmmaclease_indexer.go index 0b7298913d..8c740533b7 100644 --- a/images/virtualization-artifact/pkg/controller/indexer/vmmaclease_indexer.go +++ b/images/virtualization-artifact/pkg/controller/indexer/vmmaclease_indexer.go @@ -19,12 +19,12 @@ package indexer import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func IndexVMMACLeaseByVMMAC() (obj client.Object, field string, extractValue client.IndexerFunc) { - return &virtv2.VirtualMachineMACAddressLease{}, IndexFieldVMMACLeaseByVMMAC, func(object client.Object) []string { - lease, ok := object.(*virtv2.VirtualMachineMACAddressLease) + return &v1alpha2.VirtualMachineMACAddressLease{}, IndexFieldVMMACLeaseByVMMAC, func(object client.Object) []string { + lease, ok := object.(*v1alpha2.VirtualMachineMACAddressLease) if !ok || lease == nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go b/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go deleted file mode 100644 index ad900f5108..0000000000 --- a/images/virtualization-artifact/pkg/controller/kvapi/kvapi.go +++ /dev/null @@ -1,274 +0,0 @@ -/* -Copyright 2024 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kvapi - -import ( - "context" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - virtv1 "kubevirt.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/deckhouse/virtualization-controller/pkg/common/patch" -) - -type Kubevirt interface { - HotplugVolumesEnabled() bool -} - -// Deprecated: use virt client. -func New(cli client.Client, kv Kubevirt) *KvAPI { - return &KvAPI{ - Client: cli, - kubevirt: kv, - } -} - -// Deprecated: use virt client. -type KvAPI struct { - client.Client - kubevirt Kubevirt -} - -// Deprecated: use virt client. -func (api *KvAPI) AddVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.AddVolumeOptions) error { - return api.addVolume(ctx, kvvm, opts) -} - -// Deprecated: use virt client. -func (api *KvAPI) RemoveVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.RemoveVolumeOptions) error { - return api.removeVolume(ctx, kvvm, opts) -} - -func (api *KvAPI) addVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.AddVolumeOptions) error { - if kvvm == nil { - return nil - } - if !api.kubevirt.HotplugVolumesEnabled() { - return fmt.Errorf("unable to add volume because HotplugVolumes feature gate is not enabled") - } - // Validate AddVolumeOptions - switch { - case opts.Name == "": - return fmt.Errorf("AddVolumeOptions requires name to be set") - case opts.Disk == nil: - return fmt.Errorf("AddVolumeOptions requires disk to not be nil") - case opts.VolumeSource == nil: - return fmt.Errorf("AddVolumeOptions requires VolumeSource to not be nil") - } - - opts.Disk.Name = opts.Name - - volumeRequest := virtv1.VirtualMachineVolumeRequest{ - AddVolumeOptions: opts, - } - - switch { - case opts.VolumeSource.DataVolume != nil: - opts.VolumeSource.DataVolume.Hotpluggable = true - case opts.VolumeSource.PersistentVolumeClaim != nil: - opts.VolumeSource.PersistentVolumeClaim.Hotpluggable = true - } - - return api.vmVolumePatchStatus(ctx, kvvm, &volumeRequest) -} - -func (api *KvAPI) removeVolume(ctx context.Context, kvvm *virtv1.VirtualMachine, opts *virtv1.RemoveVolumeOptions) error { - if kvvm == nil { - return nil - } - if !api.kubevirt.HotplugVolumesEnabled() { - return fmt.Errorf("unable to remove volume because HotplugVolumes feature gate is not enabled") - } - - if opts.Name == "" { - return fmt.Errorf("RemoveVolumeOptions requires name to be set") - } - - volumeRequest := virtv1.VirtualMachineVolumeRequest{ - RemoveVolumeOptions: opts, - } - - return api.vmVolumePatchStatus(ctx, kvvm, &volumeRequest) -} - -func (api *KvAPI) vmVolumePatchStatus(ctx context.Context, kvvm *virtv1.VirtualMachine, volumeRequest *virtv1.VirtualMachineVolumeRequest) error { - if kvvm == nil { - return nil - } - err := verifyVolumeOption(kvvm.Spec.Template.Spec.Volumes, volumeRequest) - if err != nil { - return err - } - - jp, err := generateVMVolumeRequestPatch(kvvm, volumeRequest) - if err != nil { - return err - } - - dryRunOption := api.getDryRunOption(volumeRequest) - err = api.Client.Status().Patch(ctx, kvvm, - client.RawPatch(types.JSONPatchType, []byte(jp)), - &client.SubResourcePatchOptions{ - PatchOptions: client.PatchOptions{DryRun: dryRunOption}, - }) - if err != nil { - return fmt.Errorf("unable to patch kvvm: %w", err) - } - - return nil -} - -func (api *KvAPI) getDryRunOption(volumeRequest *virtv1.VirtualMachineVolumeRequest) []string { - var dryRunOption []string - if options := volumeRequest.AddVolumeOptions; options != nil && options.DryRun != nil && options.DryRun[0] == metav1.DryRunAll { - dryRunOption = volumeRequest.AddVolumeOptions.DryRun - } else if options := volumeRequest.RemoveVolumeOptions; options != nil && options.DryRun != nil && options.DryRun[0] == metav1.DryRunAll { - dryRunOption = volumeRequest.RemoveVolumeOptions.DryRun - } - return dryRunOption -} - -func verifyVolumeOption(volumes []virtv1.Volume, volumeRequest *virtv1.VirtualMachineVolumeRequest) error { - foundRemoveVol := false - for _, volume := range volumes { - if volumeRequest.AddVolumeOptions != nil { - volSourceName := volumeSourceName(volumeRequest.AddVolumeOptions.VolumeSource) - if volumeNameExists(volume, volumeRequest.AddVolumeOptions.Name) { - return fmt.Errorf("unable to add volume [%s] because volume with that name already exists", volumeRequest.AddVolumeOptions.Name) - } - if volumeSourceExists(volume, volSourceName) { - return fmt.Errorf("unable to add volume source [%s] because it already exists", volSourceName) - } - } else if volumeRequest.RemoveVolumeOptions != nil && VolumeExists(volume, volumeRequest.RemoveVolumeOptions.Name) { - if !volumeHotpluggable(volume) { - return fmt.Errorf("unable to remove volume [%s] because it is not hotpluggable", volume.Name) - } - foundRemoveVol = true - break - } - } - - if volumeRequest.RemoveVolumeOptions != nil && !foundRemoveVol { - return fmt.Errorf("unable to remove volume [%s] because it does not exist", volumeRequest.RemoveVolumeOptions.Name) - } - - return nil -} - -func volumeSourceName(volumeSource *virtv1.HotplugVolumeSource) string { - if volumeSource.DataVolume != nil { - return volumeSource.DataVolume.Name - } - if volumeSource.PersistentVolumeClaim != nil { - return volumeSource.PersistentVolumeClaim.ClaimName - } - return "" -} - -func VolumeExists(volume virtv1.Volume, volumeName string) bool { - return volumeNameExists(volume, volumeName) || volumeSourceExists(volume, volumeName) -} - -func volumeNameExists(volume virtv1.Volume, volumeName string) bool { - return volume.Name == volumeName -} - -func volumeSourceExists(volume virtv1.Volume, volumeName string) bool { - // Do not add ContainerDisk!!! - return (volume.DataVolume != nil && volume.DataVolume.Name == volumeName) || - (volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == volumeName) -} - -func volumeHotpluggable(volume virtv1.Volume) bool { - return (volume.DataVolume != nil && volume.DataVolume.Hotpluggable) || (volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable) -} - -func generateVMVolumeRequestPatch(vm *virtv1.VirtualMachine, volumeRequest *virtv1.VirtualMachineVolumeRequest) (string, error) { - vmCopy := vm.DeepCopy() - - // We only validate the list against other items in the list at this point. - // The VM validation webhook will validate the list against the VMI spec - // during the Patch command - if volumeRequest.AddVolumeOptions != nil { - if err := addAddVolumeRequests(vmCopy, volumeRequest); err != nil { - return "", err - } - } else if volumeRequest.RemoveVolumeOptions != nil { - if err := addRemoveVolumeRequests(vmCopy, volumeRequest); err != nil { - return "", err - } - } - - verb := patch.PatchAddOp - if len(vm.Status.VolumeRequests) > 0 { - verb = patch.PatchReplaceOp - } - jop := patch.NewJSONPatchOperation(verb, "/status/volumeRequests", vmCopy.Status.VolumeRequests) - jp := patch.NewJSONPatch(jop) - - return jp.String() -} - -func addAddVolumeRequests(vm *virtv1.VirtualMachine, volumeRequest *virtv1.VirtualMachineVolumeRequest) error { - name := volumeRequest.AddVolumeOptions.Name - for _, request := range vm.Status.VolumeRequests { - if err := validateAddVolumeRequest(request, name); err != nil { - return err - } - } - vm.Status.VolumeRequests = append(vm.Status.VolumeRequests, *volumeRequest) - return nil -} - -func validateAddVolumeRequest(request virtv1.VirtualMachineVolumeRequest, name string) error { - if addVolumeRequestExists(request, name) { - return fmt.Errorf("add volume request for volume [%s] already exists", name) - } - if removeVolumeRequestExists(request, name) { - return fmt.Errorf("unable to add volume since a remove volume request for volume [%s] already exists and is still being processed", name) - } - return nil -} - -func addRemoveVolumeRequests(vm *virtv1.VirtualMachine, volumeRequest *virtv1.VirtualMachineVolumeRequest) error { - name := volumeRequest.RemoveVolumeOptions.Name - var volumeRequestsList []virtv1.VirtualMachineVolumeRequest - for _, request := range vm.Status.VolumeRequests { - if addVolumeRequestExists(request, name) { - // Filter matching AddVolume requests from the new list. - continue - } - if removeVolumeRequestExists(request, name) { - return fmt.Errorf("a remove volume request for volume [%s] already exists and is still being processed", name) - } - volumeRequestsList = append(volumeRequestsList, request) - } - volumeRequestsList = append(volumeRequestsList, *volumeRequest) - vm.Status.VolumeRequests = volumeRequestsList - return nil -} - -func addVolumeRequestExists(request virtv1.VirtualMachineVolumeRequest, name string) bool { - return request.AddVolumeOptions != nil && request.AddVolumeOptions.Name == name -} - -func removeVolumeRequestExists(request virtv1.VirtualMachineVolumeRequest, name string) bool { - return request.RemoveVolumeOptions != nil && request.RemoveVolumeOptions.Name == name -} diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go index 41d0fb3719..07c3df0628 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/common/resource_builder" "github.com/deckhouse/virtualization-controller/pkg/common/vm" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // TODO(VM): Implement at this level some mechanics supporting "effectiveSpec" logic @@ -46,12 +46,12 @@ const ( SysprepDiskName = "sysprep" // GenericCPUModel specifies the base CPU model for Features and Discovery CPU model types. - GenericCPUModel = "kvm64" + GenericCPUModel = "qemu64" ) type KVVMOptions struct { EnableParavirtualization bool - OsType virtv2.OsType + OsType v1alpha2.OsType // These options are for local development mode DisableHypervSyNIC bool @@ -69,7 +69,7 @@ func NewKVVM(currentKVVM *virtv1.VirtualMachine, opts KVVMOptions) *KVVM { } } -func DefaultOptions(current *virtv2.VirtualMachine) KVVMOptions { +func DefaultOptions(current *v1alpha2.VirtualMachine) KVVMOptions { return KVVMOptions{ EnableParavirtualization: current.Spec.EnableParavirtualization, OsType: current.Spec.OsType, @@ -120,32 +120,40 @@ func (b *KVVM) SetKVVMIAnnotation(annoKey, annoValue string) { b.Resource.Spec.Template.ObjectMeta.SetAnnotations(anno) } -func (b *KVVM) SetCPUModel(class *virtv2.VirtualMachineClass) error { +func (b *KVVM) SetCPUModel(class *v1alpha2.VirtualMachineClass) error { if b.Resource.Spec.Template.Spec.Domain.CPU == nil { b.Resource.Spec.Template.Spec.Domain.CPU = &virtv1.CPU{} } cpu := b.Resource.Spec.Template.Spec.Domain.CPU switch class.Spec.CPU.Type { - case virtv2.CPUTypeHost: + case v1alpha2.CPUTypeHost: cpu.Model = virtv1.CPUModeHostModel - case virtv2.CPUTypeHostPassthrough: + case v1alpha2.CPUTypeHostPassthrough: cpu.Model = virtv1.CPUModeHostPassthrough - case virtv2.CPUTypeModel: + case v1alpha2.CPUTypeModel: cpu.Model = class.Spec.CPU.Model - case virtv2.CPUTypeDiscovery, virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeDiscovery, v1alpha2.CPUTypeFeatures: cpu.Model = GenericCPUModel - features := make([]virtv1.CPUFeature, len(class.Status.CpuFeatures.Enabled)) + l := len(class.Status.CpuFeatures.Enabled) + features := make([]virtv1.CPUFeature, l, l+1) + hasSvm := false for i, feature := range class.Status.CpuFeatures.Enabled { policy := "require" if feature == "invtsc" { policy = "optional" } + if feature == "svm" { + hasSvm = true + } features[i] = virtv1.CPUFeature{ Name: feature, Policy: policy, } } + if !hasSvm { + features = append(features, virtv1.CPUFeature{Name: "svm", Policy: "optional"}) + } cpu.Features = features default: return fmt.Errorf("unexpected cpu type: %q", class.Spec.CPU.Type) @@ -153,13 +161,13 @@ func (b *KVVM) SetCPUModel(class *virtv2.VirtualMachineClass) error { return nil } -func (b *KVVM) SetRunPolicy(runPolicy virtv2.RunPolicy) error { +func (b *KVVM) SetRunPolicy(runPolicy v1alpha2.RunPolicy) error { switch runPolicy { - case virtv2.AlwaysOnPolicy, - virtv2.AlwaysOffPolicy, - virtv2.ManualPolicy: + case v1alpha2.AlwaysOnPolicy, + v1alpha2.AlwaysOffPolicy, + v1alpha2.ManualPolicy: b.Resource.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyManual) - case virtv2.AlwaysOnUnlessStoppedManually: + case v1alpha2.AlwaysOnUnlessStoppedManually: if !b.ResourceExists { // initialize only b.Resource.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyAlways) @@ -294,7 +302,7 @@ func GetCPULimit(cores int) *resource.Quantity { } type SetDiskOptions struct { - Provisioning *virtv2.Provisioning + Provisioning *v1alpha2.Provisioning ContainerDisk *string PersistentVolumeClaim *string @@ -365,18 +373,19 @@ func (b *KVVM) SetDisk(name string, opts SetDiskOptions) error { case opts.ContainerDisk != nil: vs.ContainerDisk = &virtv1.ContainerDiskSource{ - Image: *opts.ContainerDisk, + Image: *opts.ContainerDisk, + Hotpluggable: opts.IsHotplugged, } case opts.Provisioning != nil: switch opts.Provisioning.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: if opts.Provisioning.SysprepRef == nil { return fmt.Errorf("nil sysprep ref: %s", opts.Provisioning.Type) } switch opts.Provisioning.SysprepRef.Kind { - case virtv2.SysprepRefKindSecret: + case v1alpha2.SysprepRefKindSecret: vs.Sysprep = &virtv1.SysprepSource{ Secret: &corev1.LocalObjectReference{ Name: opts.Provisioning.SysprepRef.Name, @@ -385,17 +394,17 @@ func (b *KVVM) SetDisk(name string, opts SetDiskOptions) error { default: return fmt.Errorf("unexpected sysprep ref kind: %s", opts.Provisioning.SysprepRef.Kind) } - case virtv2.ProvisioningTypeUserData: + case v1alpha2.ProvisioningTypeUserData: vs.CloudInitNoCloud = &virtv1.CloudInitNoCloudSource{ UserData: opts.Provisioning.UserData, } - case virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserDataRef: if opts.Provisioning.UserDataRef == nil { return fmt.Errorf("nil user data ref: %s", opts.Provisioning.Type) } switch opts.Provisioning.UserDataRef.Kind { - case virtv2.UserDataRefKindSecret: + case v1alpha2.UserDataRefKindSecret: vs.CloudInitNoCloud = &virtv1.CloudInitNoCloudSource{ UserDataSecretRef: &corev1.LocalObjectReference{ Name: opts.Provisioning.UserDataRef.Name, @@ -450,24 +459,24 @@ func (b *KVVM) HasTablet(name string) bool { return false } -func (b *KVVM) SetProvisioning(p *virtv2.Provisioning) error { +func (b *KVVM) SetProvisioning(p *v1alpha2.Provisioning) error { if p == nil { return nil } switch p.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: return b.SetDisk(SysprepDiskName, SetDiskOptions{Provisioning: p, IsCdrom: true}) - case virtv2.ProvisioningTypeUserData, virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserData, v1alpha2.ProvisioningTypeUserDataRef: return b.SetDisk(CloudInitDiskName, SetDiskOptions{Provisioning: p}) default: return fmt.Errorf("unexpected provisioning type %s. %w", p.Type, common.ErrUnknownType) } } -func (b *KVVM) SetOsType(osType virtv2.OsType) error { +func (b *KVVM) SetOsType(osType v1alpha2.OsType) error { switch osType { - case virtv2.Windows: + case v1alpha2.Windows: // Need for `029-use-OFVM_CODE-for-linux.patch` // b.SetKVVMIAnnotation(annotations.AnnOsType, string(virtv2.Windows)) @@ -505,7 +514,7 @@ func (b *KVVM) SetOsType(osType virtv2.OsType) error { } } - case virtv2.GenericOs: + case v1alpha2.GenericOs: b.Resource.Spec.Template.Spec.Domain.Machine = &virtv1.Machine{ Type: "q35", } @@ -583,21 +592,21 @@ func (b *KVVM) SetNetworkInterface(name, macAddress string) { } } -func (b *KVVM) SetBootloader(bootloader virtv2.BootloaderType) error { +func (b *KVVM) SetBootloader(bootloader v1alpha2.BootloaderType) error { if b.Resource.Spec.Template.Spec.Domain.Firmware == nil { b.Resource.Spec.Template.Spec.Domain.Firmware = &virtv1.Firmware{} } switch bootloader { - case "", virtv2.BIOS: + case "", v1alpha2.BIOS: b.Resource.Spec.Template.Spec.Domain.Firmware.Bootloader = nil - case virtv2.EFI: + case v1alpha2.EFI: b.Resource.Spec.Template.Spec.Domain.Firmware.Bootloader = &virtv1.Bootloader{ EFI: &virtv1.EFI{ SecureBoot: pointer.GetPointer(false), }, } - case virtv2.EFIWithSecureBoot: + case v1alpha2.EFIWithSecureBoot: if b.Resource.Spec.Template.Spec.Domain.Features == nil { b.Resource.Spec.Template.Spec.Domain.Features = &virtv1.Features{} } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go index 1bb9b0f0df..8a14050251 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_test.go @@ -23,7 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestSetAffinity(t *testing.T) { @@ -126,7 +126,7 @@ func TestSetOsType(t *testing.T) { t.Run("Change from Windows to Generic should remove TPM", func(t *testing.T) { builder := NewEmptyKVVM(types.NamespacedName{Name: name, Namespace: namespace}, KVVMOptions{}) - err := builder.SetOsType(virtv2.Windows) + err := builder.SetOsType(v1alpha2.Windows) if err != nil { t.Fatalf("SetOsType(Windows) failed: %v", err) } @@ -135,7 +135,7 @@ func TestSetOsType(t *testing.T) { t.Error("TPM should be present after setting Windows OS") } - err = builder.SetOsType(virtv2.GenericOs) + err = builder.SetOsType(v1alpha2.GenericOs) if err != nil { t.Fatalf("SetOsType(GenericOs) failed: %v", err) } diff --git a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go index 4ac6ae376f..4b703cc0a8 100644 --- a/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go +++ b/images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/netmanager" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -56,14 +56,14 @@ func GenerateCVIDiskName(name string) string { return CVIDiskPrefix + name } -func GetOriginalDiskName(prefixedName string) (string, virtv2.BlockDeviceKind) { +func GetOriginalDiskName(prefixedName string) (string, v1alpha2.BlockDeviceKind) { switch { case strings.HasPrefix(prefixedName, VDDiskPrefix): - return strings.TrimPrefix(prefixedName, VDDiskPrefix), virtv2.DiskDevice + return strings.TrimPrefix(prefixedName, VDDiskPrefix), v1alpha2.DiskDevice case strings.HasPrefix(prefixedName, VIDiskPrefix): - return strings.TrimPrefix(prefixedName, VIDiskPrefix), virtv2.ImageDevice + return strings.TrimPrefix(prefixedName, VIDiskPrefix), v1alpha2.ImageDevice case strings.HasPrefix(prefixedName, CVIDiskPrefix): - return strings.TrimPrefix(prefixedName, CVIDiskPrefix), virtv2.ClusterImageDevice + return strings.TrimPrefix(prefixedName, CVIDiskPrefix), v1alpha2.ClusterImageDevice } return prefixedName, "" @@ -83,15 +83,17 @@ func GenerateSerial(input string) string { type HotPlugDeviceSettings struct { VolumeName string PVCName string + Image string DataVolumeName string } func ApplyVirtualMachineSpec( - kvvm *KVVM, vm *virtv2.VirtualMachine, - vdByName map[string]*virtv2.VirtualDisk, - viByName map[string]*virtv2.VirtualImage, - cviByName map[string]*virtv2.ClusterVirtualImage, - class *virtv2.VirtualMachineClass, + kvvm *KVVM, vm *v1alpha2.VirtualMachine, + vdByName map[string]*v1alpha2.VirtualDisk, + viByName map[string]*v1alpha2.VirtualImage, + cviByName map[string]*v1alpha2.ClusterVirtualImage, + vmbdas map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment, + class *v1alpha2.VirtualMachineClass, ipAddress string, networkSpec network.InterfaceSpecList, ) error { @@ -113,7 +115,7 @@ func ApplyVirtualMachineSpec( kvvm.SetTablet("default-0") kvvm.SetNodeSelector(vm.Spec.NodeSelector, class.Spec.NodeSelector.MatchLabels) kvvm.SetTolerations(vm.Spec.Tolerations, class.Spec.Tolerations) - kvvm.SetAffinity(virtv2.NewAffinityFromVMAffinity(vm.Spec.Affinity), class.Spec.NodeSelector.MatchExpressions) + kvvm.SetAffinity(v1alpha2.NewAffinityFromVMAffinity(vm.Spec.Affinity), class.Spec.NodeSelector.MatchExpressions) kvvm.SetPriorityClassName(vm.Spec.PriorityClassName) kvvm.SetTerminationGracePeriod(vm.Spec.TerminationGracePeriodSeconds) kvvm.SetTopologySpreadConstraint(vm.Spec.TopologySpreadConstraints) @@ -130,11 +132,11 @@ func ApplyVirtualMachineSpec( PVCName: volume.PersistentVolumeClaim.ClaimName, }) } - // FIXME(VM): not used, now only supports PVC - if volume.DataVolume != nil && volume.DataVolume.Hotpluggable { + + if volume.ContainerDisk != nil && volume.ContainerDisk.Hotpluggable { hotpluggedDevices = append(hotpluggedDevices, HotPlugDeviceSettings{ - VolumeName: volume.Name, - DataVolumeName: volume.DataVolume.Name, + VolumeName: volume.Name, + Image: volume.ContainerDisk.Image, }) } } @@ -144,7 +146,7 @@ func ApplyVirtualMachineSpec( for _, bd := range vm.Spec.BlockDeviceRefs { // bootOrder starts from 1. switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: // Attach ephemeral disk for storage: Kubernetes. // Attach containerDisk for storage: ContainerRegistry (i.e. image from DVCR). @@ -152,8 +154,8 @@ func ApplyVirtualMachineSpec( name := GenerateVIDiskName(bd.Name) switch vi.Spec.Storage { - case virtv2.StorageKubernetes, - virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, + v1alpha2.StoragePersistentVolumeClaim: // Attach PVC as ephemeral volume: its data will be restored to initial state on VM restart. if err := kvvm.SetDisk(name, SetDiskOptions{ PersistentVolumeClaim: pointer.GetPointer(vi.Status.Target.PersistentVolumeClaim), @@ -163,7 +165,7 @@ func ApplyVirtualMachineSpec( }); err != nil { return err } - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: if err := kvvm.SetDisk(name, SetDiskOptions{ ContainerDisk: pointer.GetPointer(vi.Status.Target.RegistryURL), IsCdrom: imageformat.IsISO(vi.Status.Format), @@ -177,7 +179,7 @@ func ApplyVirtualMachineSpec( } bootOrder++ - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: // ClusterVirtualImage is attached as containerDisk. cvi := cviByName[bd.Name] @@ -193,7 +195,7 @@ func ApplyVirtualMachineSpec( } bootOrder++ - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: // VirtualDisk is attached as a regular disk. vd := vdByName[bd.Name] @@ -218,6 +220,10 @@ func ApplyVirtualMachineSpec( } } + if err := kvvm.SetProvisioning(vm.Spec.Provisioning); err != nil { + return err + } + for _, device := range hotpluggedDevices { switch { case device.PVCName != "": @@ -227,20 +233,22 @@ func ApplyVirtualMachineSpec( }); err != nil { return err } - // FIXME(VM): not used, now only supports PVC - case device.DataVolumeName != "": + case device.Image != "": + if err := kvvm.SetDisk(device.VolumeName, SetDiskOptions{ + ContainerDisk: pointer.GetPointer(device.Image), + IsHotplugged: true, + }); err != nil { + return err + } } } - if err := kvvm.SetProvisioning(vm.Spec.Provisioning); err != nil { - return err - } kvvm.SetOwnerRef(vm, schema.GroupVersionKind{ - Group: virtv2.SchemeGroupVersion.Group, - Version: virtv2.SchemeGroupVersion.Version, + Group: v1alpha2.SchemeGroupVersion.Group, + Version: v1alpha2.SchemeGroupVersion.Version, Kind: "VirtualMachine", }) - kvvm.AddFinalizer(virtv2.FinalizerKVVMProtection) + kvvm.AddFinalizer(v1alpha2.FinalizerKVVMProtection) // Set ip address cni request annotation. kvvm.SetKVVMIAnnotation(netmanager.AnnoIPAddressCNIRequest, ipAddress) @@ -257,12 +265,12 @@ func ApplyVirtualMachineSpec( return nil } -func ApplyMigrationVolumes(kvvm *KVVM, vm *virtv2.VirtualMachine, vdsByName map[string]*virtv2.VirtualDisk) error { +func ApplyMigrationVolumes(kvvm *KVVM, vm *v1alpha2.VirtualMachine, vdsByName map[string]*v1alpha2.VirtualDisk) error { bootOrder := uint(1) - var updateVolumesStrategy *virtv1.UpdateVolumesStrategy + var updateVolumesStrategy *virtv1.UpdateVolumesStrategy = nil for _, bd := range vm.Spec.BlockDeviceRefs { - if bd.Kind != virtv2.DiskDevice { + if bd.Kind != v1alpha2.DiskDevice { bootOrder++ continue } diff --git a/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go b/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go index 936e9fb352..471680fb74 100644 --- a/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go +++ b/images/virtualization-artifact/pkg/controller/moduleconfig/remove_cidrs_validator.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" mcapi "github.com/deckhouse/virtualization-controller/pkg/controller/moduleconfig/api" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type removeCIDRsValidator struct { @@ -65,7 +65,7 @@ loop: return nil, nil } - leases := &virtv2.VirtualMachineIPAddressLeaseList{} + leases := &v1alpha2.VirtualMachineIPAddressLeaseList{} if err := v.client.List(ctx, leases); err != nil { return nil, fmt.Errorf("failed to list VirtualMachineIPAddressLeases: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/netmanager/ipam.go b/images/virtualization-artifact/pkg/controller/netmanager/ipam.go index 5aa0c620f5..5f2e4a5cc9 100644 --- a/images/virtualization-artifact/pkg/controller/netmanager/ipam.go +++ b/images/virtualization-artifact/pkg/controller/netmanager/ipam.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -38,7 +38,7 @@ func NewIPAM() *IPAM { type IPAM struct{} -func (m IPAM) IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool { +func (m IPAM) IsBound(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) bool { if vmip == nil { return false } @@ -56,7 +56,7 @@ func (m IPAM) IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool return vmip.Status.VirtualMachine == vmName } -func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.VirtualMachineIPAddress) error { +func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) error { if vmip == nil { return errors.New("cannot to bind with empty ip address") } @@ -64,9 +64,9 @@ func (m IPAM) CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.Virt return nil } -func (m IPAM) CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client) error { +func (m IPAM) CreateIPAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client) error { ownerRef := metav1.NewControllerRef(vm, vm.GroupVersionKind()) - return client.Create(ctx, &virtv2.VirtualMachineIPAddress{ + return client.Create(ctx, &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineUID: string(vm.GetUID()), @@ -75,22 +75,22 @@ func (m IPAM) CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, cl Namespace: vm.Namespace, OwnerReferences: []metav1.OwnerReference{*ownerRef}, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, }) } const generateNameSuffix = "-" -func GenerateName(vm *virtv2.VirtualMachine) string { +func GenerateName(vm *v1alpha2.VirtualMachine) string { if vm == nil { return "" } return vm.GetName() + generateNameSuffix } -func GetVirtualMachineName(vmip *virtv2.VirtualMachineIPAddress) string { +func GetVirtualMachineName(vmip *v1alpha2.VirtualMachineIPAddress) string { if vmip == nil { return "" } @@ -100,7 +100,7 @@ func GetVirtualMachineName(vmip *virtv2.VirtualMachineIPAddress) string { name := vmip.GetName() for _, ow := range vmip.GetOwnerReferences() { - if ow.Kind == virtv2.VirtualMachineKind { + if ow.Kind == v1alpha2.VirtualMachineKind { name = ow.Name break } diff --git a/images/virtualization-artifact/pkg/controller/netmanager/mac.go b/images/virtualization-artifact/pkg/controller/netmanager/mac.go index c267a14454..a9daf74a56 100644 --- a/images/virtualization-artifact/pkg/controller/netmanager/mac.go +++ b/images/virtualization-artifact/pkg/controller/netmanager/mac.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewMACManager() *MACManager { @@ -33,19 +33,19 @@ func NewMACManager() *MACManager { type MACManager struct{} -func (m MACManager) IsBound(vmName string, vmmac *virtv2.VirtualMachineMACAddress) bool { +func (m MACManager) IsBound(vmName string, vmmac *v1alpha2.VirtualMachineMACAddress) bool { if vmmac == nil { return false } - if vmmac.Status.Phase != virtv2.VirtualMachineMACAddressPhaseBound && vmmac.Status.Phase != virtv2.VirtualMachineMACAddressPhaseAttached { + if vmmac.Status.Phase != v1alpha2.VirtualMachineMACAddressPhaseBound && vmmac.Status.Phase != v1alpha2.VirtualMachineMACAddressPhaseAttached { return false } return vmmac.Status.VirtualMachine == vmName } -func (m MACManager) CheckMACAddressAvailableForBinding(vmmac *virtv2.VirtualMachineMACAddress) error { +func (m MACManager) CheckMACAddressAvailableForBinding(vmmac *v1alpha2.VirtualMachineMACAddress) error { if vmmac == nil { return errors.New("cannot to bind with empty MAC address") } @@ -53,9 +53,9 @@ func (m MACManager) CheckMACAddressAvailableForBinding(vmmac *virtv2.VirtualMach return nil } -func (m MACManager) CreateMACAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client, macAddress string) error { +func (m MACManager) CreateMACAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client, macAddress string) error { ownerRef := metav1.NewControllerRef(vm, vm.GroupVersionKind()) - vmmac := &virtv2.VirtualMachineMACAddress{ + vmmac := &v1alpha2.VirtualMachineMACAddress{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineUID: string(vm.GetUID()), diff --git a/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go b/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go index 1e4366034c..63d3fa7616 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/kvvm_request.go @@ -21,7 +21,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/equality" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" "github.com/deckhouse/virtualization-controller/pkg/common/patch" ) @@ -37,17 +37,17 @@ var ErrChangesAlreadyExist = errors.New("changes already exist in the current st // start replace error error // restart(stop+start) replace error error // empty add add add -func BuildPatch(vm *kvv1.VirtualMachine, changes ...kvv1.VirtualMachineStateChangeRequest) ([]byte, error) { +func BuildPatch(vm *virtv1.VirtualMachine, changes ...virtv1.VirtualMachineStateChangeRequest) ([]byte, error) { jp := patch.NewJSONPatch() // Special case: if there's no status field at all, add one. - newStatus := kvv1.VirtualMachineStatus{} + newStatus := virtv1.VirtualMachineStatus{} if equality.Semantic.DeepEqual(vm.Status, newStatus) { newStatus.StateChangeRequests = changes jp.Append(patch.NewJSONPatchOperation(patch.PatchAddOp, "/status", newStatus)) } else { verb := patch.PatchAddOp failOnConflict := true - if len(changes) == 1 && changes[0].Action == kvv1.StopRequest { + if len(changes) == 1 && changes[0].Action == virtv1.StopRequest { // If this is a stopRequest, replace all existing StateChangeRequests. failOnConflict = false } @@ -72,18 +72,18 @@ func BuildPatch(vm *kvv1.VirtualMachine, changes ...kvv1.VirtualMachineStateChan // BuildPatchSafeRestart creates a patch to restart a VM in case no other operations are present. // This method respects other operations that was issued during VM reboot. -func BuildPatchSafeRestart(kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance) ([]byte, error) { +func BuildPatchSafeRestart(kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) ([]byte, error) { // Restart only if current request is empty. if len(kvvm.Status.StateChangeRequests) > 0 { return nil, nil } - restartRequest := []kvv1.VirtualMachineStateChangeRequest{ - {Action: kvv1.StopRequest, UID: &kvvmi.UID}, - {Action: kvv1.StartRequest}, + restartRequest := []virtv1.VirtualMachineStateChangeRequest{ + {Action: virtv1.StopRequest, UID: &kvvmi.UID}, + {Action: virtv1.StartRequest}, } jp := patch.NewJSONPatch() - newStatus := kvv1.VirtualMachineStatus{} + newStatus := virtv1.VirtualMachineStatus{} if equality.Semantic.DeepEqual(kvvm.Status, newStatus) { // Add /status if it's not exists. newStatus.StateChangeRequests = restartRequest diff --git a/images/virtualization-artifact/pkg/controller/powerstate/operations.go b/images/virtualization-artifact/pkg/controller/powerstate/operations.go index 82e6d3d929..2d3b7e490b 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/operations.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/operations.go @@ -22,7 +22,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" kvvmutil "github.com/deckhouse/virtualization-controller/pkg/common/kvvm" @@ -30,12 +30,12 @@ import ( ) // StartVM starts VM via adding change request to the KVVM status. -func StartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine) error { +func StartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } jp, err := BuildPatch(kvvm, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StartRequest}) + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StartRequest}) if err != nil { if errors.Is(err, ErrChangesAlreadyExist) { return nil @@ -47,7 +47,7 @@ func StartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine) e // StopVM stops VM via deleting kvvmi. // It implements force stop by immediately deleting VM's Pod. -func StopVM(ctx context.Context, cl client.Client, kvvmi *kvv1.VirtualMachineInstance, force *bool) error { +func StopVM(ctx context.Context, cl client.Client, kvvmi *virtv1.VirtualMachineInstance, force *bool) error { if kvvmi == nil { return fmt.Errorf("kvvmi must not be empty") } @@ -62,7 +62,7 @@ func StopVM(ctx context.Context, cl client.Client, kvvmi *kvv1.VirtualMachineIns // RestartVM restarts VM via adding stop and start change requests to the KVVM status. // It implements force stop by immediately deleting VM's Pod. -func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance, force bool) error { +func RestartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, force bool) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } @@ -71,8 +71,8 @@ func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, } jp, err := BuildPatch(kvvm, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StopRequest, UID: &kvvmi.UID}, - kvv1.VirtualMachineStateChangeRequest{Action: kvv1.StartRequest}) + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StopRequest, UID: &kvvmi.UID}, + virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StartRequest}) if err != nil { if errors.Is(err, ErrChangesAlreadyExist) { return nil @@ -91,7 +91,7 @@ func RestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, } // SafeRestartVM restarts VM via adding stop and start change requests to the KVVM status if no other requests are in progress. -func SafeRestartVM(ctx context.Context, cl client.Client, kvvm *kvv1.VirtualMachine, kvvmi *kvv1.VirtualMachineInstance) error { +func SafeRestartVM(ctx context.Context, cl client.Client, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) error { if kvvm == nil { return fmt.Errorf("kvvm must not be empty") } diff --git a/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go b/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go index cbc49ae3bc..40f05de2b8 100644 --- a/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go +++ b/images/virtualization-artifact/pkg/controller/powerstate/shutdown_reason.go @@ -21,7 +21,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - kvv1 "kubevirt.io/api/core/v1" + virtv1 "kubevirt.io/api/core/v1" vmutil "github.com/deckhouse/virtualization-controller/pkg/common/vm" ) @@ -46,8 +46,8 @@ const ( // Reset termination message // {"event":"SHUTDOWN","details":"{\"guest\":true,\"reason\":\"guest-reset\"}"} // {"event":"SHUTDOWN","details":"{\"guest\":false,\"reason\":\"host-signal\"}"} -func ShutdownReason(kvvmi *kvv1.VirtualMachineInstance, kvPods *corev1.PodList) ShutdownInfo { - if kvvmi == nil || kvvmi.Status.Phase != kvv1.Succeeded { +func ShutdownReason(kvvmi *virtv1.VirtualMachineInstance, kvPods *corev1.PodList) ShutdownInfo { + if kvvmi == nil || kvvmi.Status.Phase != virtv1.Succeeded { return ShutdownInfo{} } if kvPods == nil || len(kvPods.Items) == 0 { diff --git a/images/virtualization-artifact/pkg/controller/reconciler/resource.go b/images/virtualization-artifact/pkg/controller/reconciler/resource.go index 4529797772..73a5f66139 100644 --- a/images/virtualization-artifact/pkg/controller/reconciler/resource.go +++ b/images/virtualization-artifact/pkg/controller/reconciler/resource.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ResourceObject[T, ST any] interface { @@ -111,41 +111,41 @@ func rewriteObject(obj client.Object) { var conds []metav1.Condition switch obj.GetObjectKind().GroupVersionKind().Kind { - case virtv2.VirtualMachineKind: - vm := obj.(*virtv2.VirtualMachine) + case v1alpha2.VirtualMachineKind: + vm := obj.(*v1alpha2.VirtualMachine) conds = vm.Status.Conditions - case virtv2.VirtualDiskKind: - vd := obj.(*virtv2.VirtualDisk) + case v1alpha2.VirtualDiskKind: + vd := obj.(*v1alpha2.VirtualDisk) conds = vd.Status.Conditions - case virtv2.VirtualImageKind: - vi := obj.(*virtv2.VirtualImage) + case v1alpha2.VirtualImageKind: + vi := obj.(*v1alpha2.VirtualImage) conds = vi.Status.Conditions - case virtv2.ClusterVirtualImageKind: - cvi := obj.(*virtv2.ClusterVirtualImage) + case v1alpha2.ClusterVirtualImageKind: + cvi := obj.(*v1alpha2.ClusterVirtualImage) conds = cvi.Status.Conditions - case virtv2.VirtualMachineBlockDeviceAttachmentKind: - vmbda := obj.(*virtv2.VirtualMachineBlockDeviceAttachment) + case v1alpha2.VirtualMachineBlockDeviceAttachmentKind: + vmbda := obj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) conds = vmbda.Status.Conditions - case virtv2.VirtualMachineIPAddressKind: - ip := obj.(*virtv2.VirtualMachineIPAddress) + case v1alpha2.VirtualMachineIPAddressKind: + ip := obj.(*v1alpha2.VirtualMachineIPAddress) conds = ip.Status.Conditions - case virtv2.VirtualMachineIPAddressLeaseKind: - ipl := obj.(*virtv2.VirtualMachineIPAddressLease) + case v1alpha2.VirtualMachineIPAddressLeaseKind: + ipl := obj.(*v1alpha2.VirtualMachineIPAddressLease) conds = ipl.Status.Conditions - case virtv2.VirtualMachineOperationKind: - vmop := obj.(*virtv2.VirtualMachineOperation) + case v1alpha2.VirtualMachineOperationKind: + vmop := obj.(*v1alpha2.VirtualMachineOperation) conds = vmop.Status.Conditions - case virtv2.VirtualDiskSnapshotKind: - snap := obj.(*virtv2.VirtualDiskSnapshot) + case v1alpha2.VirtualDiskSnapshotKind: + snap := obj.(*v1alpha2.VirtualDiskSnapshot) conds = snap.Status.Conditions - case virtv2.VirtualMachineClassKind: - class := obj.(*virtv2.VirtualMachineClass) + case v1alpha2.VirtualMachineClassKind: + class := obj.(*v1alpha2.VirtualMachineClass) conds = class.Status.Conditions - case virtv2.VirtualMachineRestoreKind: - restore := obj.(*virtv2.VirtualMachineRestore) + case v1alpha2.VirtualMachineRestoreKind: + restore := obj.(*v1alpha2.VirtualMachineRestore) conds = restore.Status.Conditions - case virtv2.VirtualMachineSnapshotKind: - snap := obj.(*virtv2.VirtualMachineSnapshot) + case v1alpha2.VirtualMachineSnapshotKind: + snap := obj.(*v1alpha2.VirtualMachineSnapshot) conds = snap.Status.Conditions } diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service.go b/images/virtualization-artifact/pkg/controller/service/attachment_service.go index b63aec3ff5..6fda25d52c 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service.go @@ -30,8 +30,8 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type AttachmentService struct { @@ -54,7 +54,7 @@ var ( ErrHotPlugRequestAlreadySent = errors.New("attachment request is already sent") ) -func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { +func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) (bool, error) { if ad == nil { return false, errors.New("cannot check if a empty AttachmentDisk is hot plugged") } @@ -80,7 +80,7 @@ func (s AttachmentService) IsHotPlugged(ad *AttachmentDisk, vm *virtv2.VirtualMa return false, nil } -func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { +func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) (bool, error) { if ad == nil { return false, errors.New("cannot hot plug a nil AttachmentDisk") } @@ -126,7 +126,7 @@ func (s AttachmentService) CanHotPlug(ad *AttachmentDisk, vm *virtv2.VirtualMach return true, nil } -func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { +func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { if ad == nil { return errors.New("cannot hot plug a nil AttachmentDisk") } @@ -139,7 +139,7 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, return errors.New("cannot hot plug a disk into a nil KVVM") } - return s.virtClient.VirtualMachines(vm.GetNamespace()).AddVolume(ctx, vm.GetName(), v1alpha2.VirtualMachineAddVolume{ + return s.virtClient.VirtualMachines(vm.GetNamespace()).AddVolume(ctx, vm.GetName(), subv1alpha2.VirtualMachineAddVolume{ VolumeKind: string(ad.Kind), Name: ad.GenerateName, Image: ad.Image, @@ -149,13 +149,13 @@ func (s AttachmentService) HotPlugDisk(ctx context.Context, ad *AttachmentDisk, }) } -func (s AttachmentService) IsAttached(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) bool { +func (s AttachmentService) IsAttached(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) bool { if vm == nil || kvvm == nil { return false } for _, bdRef := range vm.Status.BlockDeviceRefs { - if bdRef.Kind == virtv2.BlockDeviceKind(vmbda.Spec.BlockDeviceRef.Kind) && bdRef.Name == vmbda.Spec.BlockDeviceRef.Name { + if bdRef.Kind == v1alpha2.BlockDeviceKind(vmbda.Spec.BlockDeviceRef.Kind) && bdRef.Name == vmbda.Spec.BlockDeviceRef.Name { return bdRef.Hotplugged && bdRef.VirtualMachineBlockDeviceAttachmentName == vmbda.Name } } @@ -170,7 +170,7 @@ func (s AttachmentService) UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualM if diskName == "" { return errors.New("cannot unplug a disk with a empty DiskName") } - return s.virtClient.VirtualMachines(kvvm.GetNamespace()).RemoveVolume(ctx, kvvm.GetName(), v1alpha2.VirtualMachineRemoveVolume{ + return s.virtClient.VirtualMachines(kvvm.GetNamespace()).RemoveVolume(ctx, kvvm.GetName(), subv1alpha2.VirtualMachineRemoveVolume{ Name: diskName, }) } @@ -195,13 +195,13 @@ func (s AttachmentService) UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualM // // T1: -->VMBDA A Should be Non-Conflicted lexicographically // T1: VMBDA B Phase: "" -func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (bool, string, error) { +func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (bool, string, error) { // CVI and VI always has no conflicts. Skip - if vmbda.Spec.BlockDeviceRef.Kind == virtv2.ClusterVirtualImageKind || vmbda.Spec.BlockDeviceRef.Kind == virtv2.VirtualImageKind { + if vmbda.Spec.BlockDeviceRef.Kind == v1alpha2.ClusterVirtualImageKind || vmbda.Spec.BlockDeviceRef.Kind == v1alpha2.VirtualImageKind { return false, "", nil } - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{Namespace: vmbda.Namespace}) if err != nil { return false, "", err @@ -214,7 +214,7 @@ func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *vi } // There is already a Non-Conflicted VMBDA. - if vmbdas.Items[i].Status.Phase != "" && vmbdas.Items[i].Status.Phase != virtv2.BlockDeviceAttachmentPhaseFailed { + if vmbdas.Items[i].Status.Phase != "" && vmbdas.Items[i].Status.Phase != v1alpha2.BlockDeviceAttachmentPhaseFailed { return true, vmbdas.Items[i].Name, nil } @@ -234,40 +234,40 @@ func (s AttachmentService) IsConflictedAttachment(ctx context.Context, vmbda *vi return false, "", nil } -func (s AttachmentService) GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDisk{}) +func (s AttachmentService) GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDisk{}) } -func (s AttachmentService) GetVirtualImage(ctx context.Context, name, namespace string) (*virtv2.VirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualImage{}) +func (s AttachmentService) GetVirtualImage(ctx context.Context, name, namespace string) (*v1alpha2.VirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualImage{}) } -func (s AttachmentService) GetClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &virtv2.ClusterVirtualImage{}) +func (s AttachmentService) GetClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &v1alpha2.ClusterVirtualImage{}) } func (s AttachmentService) GetPersistentVolumeClaim(ctx context.Context, ad *AttachmentDisk) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: ad.Namespace, Name: ad.PVCName}, s.client, &corev1.PersistentVolumeClaim{}) } -func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualMachine{}) +func (s AttachmentService) GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualMachine{}) } -func (s AttachmentService) GetKVVM(ctx context.Context, vm *virtv2.VirtualMachine) (*virtv1.VirtualMachine, error) { +func (s AttachmentService) GetKVVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (*virtv1.VirtualMachine, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: vm.Namespace, Name: vm.Name}, s.client, &virtv1.VirtualMachine{}) } -func (s AttachmentService) GetKVVMI(ctx context.Context, vm *virtv2.VirtualMachine) (*virtv1.VirtualMachineInstance, error) { +func (s AttachmentService) GetKVVMI(ctx context.Context, vm *v1alpha2.VirtualMachine) (*virtv1.VirtualMachineInstance, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: vm.Namespace, Name: vm.Name}, s.client, &virtv1.VirtualMachineInstance{}) } -func isSameBlockDeviceRefs(a, b virtv2.VMBDAObjectRef) bool { +func isSameBlockDeviceRefs(a, b v1alpha2.VMBDAObjectRef) bool { return a.Kind == b.Kind && a.Name == b.Name } type AttachmentDisk struct { - Kind virtv2.BlockDeviceKind + Kind v1alpha2.BlockDeviceKind Name string Namespace string GenerateName string @@ -277,9 +277,9 @@ type AttachmentDisk struct { IsCdrom bool } -func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { +func NewAttachmentDiskFromVirtualDisk(vd *v1alpha2.VirtualDisk) *AttachmentDisk { return &AttachmentDisk{ - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.GetName(), Namespace: vd.GetNamespace(), GenerateName: kvbuilder.GenerateVDDiskName(vd.GetName()), @@ -288,13 +288,13 @@ func NewAttachmentDiskFromVirtualDisk(vd *virtv2.VirtualDisk) *AttachmentDisk { } } -func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk { +func NewAttachmentDiskFromVirtualImage(vi *v1alpha2.VirtualImage) *AttachmentDisk { serial := "" if !vi.Status.CDROM { serial = kvbuilder.GenerateSerialFromObject(vi) } ad := AttachmentDisk{ - Kind: virtv2.ImageDevice, + Kind: v1alpha2.ImageDevice, Name: vi.GetName(), Namespace: vi.GetNamespace(), GenerateName: kvbuilder.GenerateVIDiskName(vi.GetName()), @@ -302,7 +302,7 @@ func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk IsCdrom: vi.Status.CDROM, } - if vi.Spec.Storage == virtv2.StorageContainerRegistry { + if vi.Spec.Storage == v1alpha2.StorageContainerRegistry { ad.Image = vi.Status.Target.RegistryURL } else { ad.PVCName = vi.Status.Target.PersistentVolumeClaim @@ -311,13 +311,13 @@ func NewAttachmentDiskFromVirtualImage(vi *virtv2.VirtualImage) *AttachmentDisk return &ad } -func NewAttachmentDiskFromClusterVirtualImage(cvi *virtv2.ClusterVirtualImage) *AttachmentDisk { +func NewAttachmentDiskFromClusterVirtualImage(cvi *v1alpha2.ClusterVirtualImage) *AttachmentDisk { serial := "" if !cvi.Status.CDROM { serial = kvbuilder.GenerateSerialFromObject(cvi) } return &AttachmentDisk{ - Kind: virtv2.ClusterImageDevice, + Kind: v1alpha2.ClusterImageDevice, Name: cvi.GetName(), GenerateName: kvbuilder.GenerateCVIDiskName(cvi.GetName()), Image: cvi.Status.Target.RegistryURL, diff --git a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go index 8fa52a3c21..3643c0ef2a 100644 --- a/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go +++ b/images/virtualization-artifact/pkg/controller/service/attachment_service_test.go @@ -25,24 +25,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("AttachmentService method IsConflictedAttachment", func() { var clientMock *ClientMock - var vmbdaAlpha *virtv2.VirtualMachineBlockDeviceAttachment - var vmbdaBeta *virtv2.VirtualMachineBlockDeviceAttachment + var vmbdaAlpha *v1alpha2.VirtualMachineBlockDeviceAttachment + var vmbdaBeta *v1alpha2.VirtualMachineBlockDeviceAttachment - spec := virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + spec := v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: "vm", - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindVirtualDisk, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindVirtualDisk, Name: "vd", }, } BeforeEach(func() { - vmbdaAlpha = &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaAlpha = &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: "vmbda-a", CreationTimestamp: metav1.Time{ @@ -52,7 +52,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { Spec: spec, } - vmbdaBeta = &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaBeta = &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: "vmbda-b", CreationTimestamp: vmbdaAlpha.CreationTimestamp, @@ -66,9 +66,9 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: -->VMBDA A Should be Conflicted // T1: VMBDA B Phase: "Attached" It("Should be Conflicted: there is another vmbda that is not Failed", func() { - vmbdaBeta.Status.Phase = virtv2.BlockDeviceAttachmentPhaseAttached + vmbdaBeta.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseAttached clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -85,9 +85,9 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: -->VMBDA A Should be Non-Conflicted // T1: VMBDA B Phase: "Failed" It("Should be Non-Conflicted: there is another vmbda that is Failed", func() { - vmbdaBeta.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbdaBeta.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -106,7 +106,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Should be Conflicted: there is another vmbda that created earlier", func() { vmbdaBeta.CreationTimestamp = metav1.Time{Time: vmbdaBeta.CreationTimestamp.Add(-time.Hour)} clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -125,7 +125,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Should be Non-Conflicted: there is another vmbda that created later", func() { vmbdaBeta.CreationTimestamp = metav1.Time{Time: vmbdaBeta.CreationTimestamp.Add(time.Hour)} clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -143,7 +143,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { // T1: VMBDA B Phase: "" It("Should be Non-Conflicted lexicographically", func() { clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, *vmbdaBeta, } @@ -159,7 +159,7 @@ var _ = Describe("AttachmentService method IsConflictedAttachment", func() { It("Only one vmbda", func() { clientMock.ListFunc = func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - list.(*virtv2.VirtualMachineBlockDeviceAttachmentList).Items = []virtv2.VirtualMachineBlockDeviceAttachment{ + list.(*v1alpha2.VirtualMachineBlockDeviceAttachmentList).Items = []v1alpha2.VirtualMachineBlockDeviceAttachment{ *vmbdaAlpha, } return nil diff --git a/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go b/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go index d0d6ee63c4..4b747590dd 100644 --- a/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go +++ b/images/virtualization-artifact/pkg/controller/service/blockdevice_service.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type BlockDeviceService struct { @@ -36,10 +36,10 @@ func NewBlockDeviceService(client client.Client) *BlockDeviceService { } } -func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) { +func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) { count := len(vm.Spec.BlockDeviceRefs) - var vmbdaList virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdaList v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdaList, client.InNamespace(vm.Namespace), &client.MatchingFields{ @@ -56,7 +56,7 @@ func (s *BlockDeviceService) CountBlockDevicesAttachedToVM(ctx context.Context, func (s *BlockDeviceService) CountBlockDevicesAttachedToVMName(ctx context.Context, vmName, namespace string) (int, error) { count := 0 - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine err := s.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: namespace}, &vm) if err == nil { @@ -65,7 +65,7 @@ func (s *BlockDeviceService) CountBlockDevicesAttachedToVMName(ctx context.Conte return 0, err } - var vmbdaList virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdaList v1alpha2.VirtualMachineBlockDeviceAttachmentList err = s.client.List(ctx, &vmbdaList, client.InNamespace(namespace), &client.MatchingFields{ diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 2cdca7c8d0..4865cf173f 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DiskService struct { @@ -561,16 +561,16 @@ func (s DiskService) GetVolumeSnapshot(ctx context.Context, name, namespace stri return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &vsv1.VolumeSnapshot{}) } -func (s DiskService) GetVirtualImage(ctx context.Context, name, namespace string) (*virtv2.VirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &virtv2.VirtualImage{}) +func (s DiskService) GetVirtualImage(ctx context.Context, name, namespace string) (*v1alpha2.VirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &v1alpha2.VirtualImage{}) } -func (s DiskService) GetClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &virtv2.ClusterVirtualImage{}) +func (s DiskService) GetClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &v1alpha2.ClusterVirtualImage{}) } -func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace string) ([]virtv2.VirtualDiskSnapshot, error) { - var vdSnapshots virtv2.VirtualDiskSnapshotList +func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace string) ([]v1alpha2.VirtualDiskSnapshot, error) { + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: namespace, }) @@ -581,8 +581,8 @@ func (s DiskService) ListVirtualDiskSnapshots(ctx context.Context, namespace str return vdSnapshots.Items, nil } -func (s DiskService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) +func (s DiskService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name, Namespace: namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) } func (s DiskService) CheckImportProcess(ctx context.Context, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/common/common.go b/images/virtualization-artifact/pkg/controller/service/restorer/common/common.go index f50955ac75..8373eb78a2 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/common/common.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/common/common.go @@ -20,13 +20,13 @@ import ( "errors" "fmt" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization-controller/pkg/common/validate" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( - // MaxKubernetesResourceNameLength defines the maximum allowed length for Kubernetes resource names - // according to DNS label standard (RFC 1123) - 63 characters for labels - MaxKubernetesResourceNameLength = 63 + // MaxKubernetesResourceNameLength specifies the maximum allowable length for Kubernetes resource names. + MaxKubernetesResourceNameLength = 253 ) var ( @@ -54,7 +54,7 @@ var ( ) // OverrideName overrides the name of the resource with the given rules -func OverrideName(kind, name string, rules []virtv2.NameReplacement) string { +func OverrideName(kind, name string, rules []v1alpha2.NameReplacement) string { if name == "" { return "" } @@ -72,12 +72,22 @@ func OverrideName(kind, name string, rules []virtv2.NameReplacement) string { return name } -// ValidateResourceNameLength validates that the resource name does not exceed -// the maximum allowed length for Kubernetes resources -func ValidateResourceNameLength(resourceName string) error { - if len(resourceName) > MaxKubernetesResourceNameLength { +// ValidateResourceNameLength checks if the given resource name exceeds +// the maximum allowed length for the specified Kubernetes resource kind. +// By default, the maximum length is set to MaxKubernetesResourceNameLength, +// but for VirtualMachine and VirtualDisk resources, it uses +// MaxVirtualMachineNameLen and MaxDiskNameLen respectively. +func ValidateResourceNameLength(resourceName, kind string) error { + maxLength := MaxKubernetesResourceNameLength + switch kind { + case v1alpha2.VirtualMachineKind: + maxLength = validate.MaxVirtualMachineNameLen + case v1alpha2.VirtualDiskKind: + maxLength = validate.MaxDiskNameLen + } + if len(resourceName) > maxLength { return fmt.Errorf("name %q too long (%d > %d): %w", - resourceName, len(resourceName), MaxKubernetesResourceNameLength, ErrResourceNameTooLong) + resourceName, len(resourceName), maxLength, ErrResourceNameTooLong) } return nil } diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/mock.go b/images/virtualization-artifact/pkg/controller/service/restorer/mock.go new file mode 100644 index 0000000000..1a16956c90 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/restorer/mock.go @@ -0,0 +1,340 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package restorer + +import ( + "context" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sync" +) + +// Ensure, that ObjectHandlerMock does implement ObjectHandler. +// If this is not the case, regenerate this file with moq. +var _ ObjectHandler = &ObjectHandlerMock{} + +// ObjectHandlerMock is a mock implementation of ObjectHandler. +// +// func TestSomethingThatUsesObjectHandler(t *testing.T) { +// +// // make and configure a mocked ObjectHandler +// mockedObjectHandler := &ObjectHandlerMock{ +// CustomizeFunc: func(prefix string, suffix string) { +// panic("mock out the Customize method") +// }, +// ObjectFunc: func() client.Object { +// panic("mock out the Object method") +// }, +// OverrideFunc: func(rules []v1alpha2.NameReplacement) { +// panic("mock out the Override method") +// }, +// ProcessCloneFunc: func(ctx context.Context) error { +// panic("mock out the ProcessClone method") +// }, +// ProcessRestoreFunc: func(ctx context.Context) error { +// panic("mock out the ProcessRestore method") +// }, +// ValidateCloneFunc: func(ctx context.Context) error { +// panic("mock out the ValidateClone method") +// }, +// ValidateRestoreFunc: func(ctx context.Context) error { +// panic("mock out the ValidateRestore method") +// }, +// } +// +// // use mockedObjectHandler in code that requires ObjectHandler +// // and then make assertions. +// +// } +type ObjectHandlerMock struct { + // CustomizeFunc mocks the Customize method. + CustomizeFunc func(prefix string, suffix string) + + // ObjectFunc mocks the Object method. + ObjectFunc func() client.Object + + // OverrideFunc mocks the Override method. + OverrideFunc func(rules []v1alpha2.NameReplacement) + + // ProcessCloneFunc mocks the ProcessClone method. + ProcessCloneFunc func(ctx context.Context) error + + // ProcessRestoreFunc mocks the ProcessRestore method. + ProcessRestoreFunc func(ctx context.Context) error + + // ValidateCloneFunc mocks the ValidateClone method. + ValidateCloneFunc func(ctx context.Context) error + + // ValidateRestoreFunc mocks the ValidateRestore method. + ValidateRestoreFunc func(ctx context.Context) error + + // calls tracks calls to the methods. + calls struct { + // Customize holds details about calls to the Customize method. + Customize []struct { + // Prefix is the prefix argument value. + Prefix string + // Suffix is the suffix argument value. + Suffix string + } + // Object holds details about calls to the Object method. + Object []struct { + } + // Override holds details about calls to the Override method. + Override []struct { + // Rules is the rules argument value. + Rules []v1alpha2.NameReplacement + } + // ProcessClone holds details about calls to the ProcessClone method. + ProcessClone []struct { + // Ctx is the ctx argument value. + Ctx context.Context + } + // ProcessRestore holds details about calls to the ProcessRestore method. + ProcessRestore []struct { + // Ctx is the ctx argument value. + Ctx context.Context + } + // ValidateClone holds details about calls to the ValidateClone method. + ValidateClone []struct { + // Ctx is the ctx argument value. + Ctx context.Context + } + // ValidateRestore holds details about calls to the ValidateRestore method. + ValidateRestore []struct { + // Ctx is the ctx argument value. + Ctx context.Context + } + } + lockCustomize sync.RWMutex + lockObject sync.RWMutex + lockOverride sync.RWMutex + lockProcessClone sync.RWMutex + lockProcessRestore sync.RWMutex + lockValidateClone sync.RWMutex + lockValidateRestore sync.RWMutex +} + +// Customize calls CustomizeFunc. +func (mock *ObjectHandlerMock) Customize(prefix string, suffix string) { + if mock.CustomizeFunc == nil { + panic("ObjectHandlerMock.CustomizeFunc: method is nil but ObjectHandler.Customize was just called") + } + callInfo := struct { + Prefix string + Suffix string + }{ + Prefix: prefix, + Suffix: suffix, + } + mock.lockCustomize.Lock() + mock.calls.Customize = append(mock.calls.Customize, callInfo) + mock.lockCustomize.Unlock() + mock.CustomizeFunc(prefix, suffix) +} + +// CustomizeCalls gets all the calls that were made to Customize. +// Check the length with: +// +// len(mockedObjectHandler.CustomizeCalls()) +func (mock *ObjectHandlerMock) CustomizeCalls() []struct { + Prefix string + Suffix string +} { + var calls []struct { + Prefix string + Suffix string + } + mock.lockCustomize.RLock() + calls = mock.calls.Customize + mock.lockCustomize.RUnlock() + return calls +} + +// Object calls ObjectFunc. +func (mock *ObjectHandlerMock) Object() client.Object { + if mock.ObjectFunc == nil { + panic("ObjectHandlerMock.ObjectFunc: method is nil but ObjectHandler.Object was just called") + } + callInfo := struct { + }{} + mock.lockObject.Lock() + mock.calls.Object = append(mock.calls.Object, callInfo) + mock.lockObject.Unlock() + return mock.ObjectFunc() +} + +// ObjectCalls gets all the calls that were made to Object. +// Check the length with: +// +// len(mockedObjectHandler.ObjectCalls()) +func (mock *ObjectHandlerMock) ObjectCalls() []struct { +} { + var calls []struct { + } + mock.lockObject.RLock() + calls = mock.calls.Object + mock.lockObject.RUnlock() + return calls +} + +// Override calls OverrideFunc. +func (mock *ObjectHandlerMock) Override(rules []v1alpha2.NameReplacement) { + if mock.OverrideFunc == nil { + panic("ObjectHandlerMock.OverrideFunc: method is nil but ObjectHandler.Override was just called") + } + callInfo := struct { + Rules []v1alpha2.NameReplacement + }{ + Rules: rules, + } + mock.lockOverride.Lock() + mock.calls.Override = append(mock.calls.Override, callInfo) + mock.lockOverride.Unlock() + mock.OverrideFunc(rules) +} + +// OverrideCalls gets all the calls that were made to Override. +// Check the length with: +// +// len(mockedObjectHandler.OverrideCalls()) +func (mock *ObjectHandlerMock) OverrideCalls() []struct { + Rules []v1alpha2.NameReplacement +} { + var calls []struct { + Rules []v1alpha2.NameReplacement + } + mock.lockOverride.RLock() + calls = mock.calls.Override + mock.lockOverride.RUnlock() + return calls +} + +// ProcessClone calls ProcessCloneFunc. +func (mock *ObjectHandlerMock) ProcessClone(ctx context.Context) error { + if mock.ProcessCloneFunc == nil { + panic("ObjectHandlerMock.ProcessCloneFunc: method is nil but ObjectHandler.ProcessClone was just called") + } + callInfo := struct { + Ctx context.Context + }{ + Ctx: ctx, + } + mock.lockProcessClone.Lock() + mock.calls.ProcessClone = append(mock.calls.ProcessClone, callInfo) + mock.lockProcessClone.Unlock() + return mock.ProcessCloneFunc(ctx) +} + +// ProcessCloneCalls gets all the calls that were made to ProcessClone. +// Check the length with: +// +// len(mockedObjectHandler.ProcessCloneCalls()) +func (mock *ObjectHandlerMock) ProcessCloneCalls() []struct { + Ctx context.Context +} { + var calls []struct { + Ctx context.Context + } + mock.lockProcessClone.RLock() + calls = mock.calls.ProcessClone + mock.lockProcessClone.RUnlock() + return calls +} + +// ProcessRestore calls ProcessRestoreFunc. +func (mock *ObjectHandlerMock) ProcessRestore(ctx context.Context) error { + if mock.ProcessRestoreFunc == nil { + panic("ObjectHandlerMock.ProcessRestoreFunc: method is nil but ObjectHandler.ProcessRestore was just called") + } + callInfo := struct { + Ctx context.Context + }{ + Ctx: ctx, + } + mock.lockProcessRestore.Lock() + mock.calls.ProcessRestore = append(mock.calls.ProcessRestore, callInfo) + mock.lockProcessRestore.Unlock() + return mock.ProcessRestoreFunc(ctx) +} + +// ProcessRestoreCalls gets all the calls that were made to ProcessRestore. +// Check the length with: +// +// len(mockedObjectHandler.ProcessRestoreCalls()) +func (mock *ObjectHandlerMock) ProcessRestoreCalls() []struct { + Ctx context.Context +} { + var calls []struct { + Ctx context.Context + } + mock.lockProcessRestore.RLock() + calls = mock.calls.ProcessRestore + mock.lockProcessRestore.RUnlock() + return calls +} + +// ValidateClone calls ValidateCloneFunc. +func (mock *ObjectHandlerMock) ValidateClone(ctx context.Context) error { + if mock.ValidateCloneFunc == nil { + panic("ObjectHandlerMock.ValidateCloneFunc: method is nil but ObjectHandler.ValidateClone was just called") + } + callInfo := struct { + Ctx context.Context + }{ + Ctx: ctx, + } + mock.lockValidateClone.Lock() + mock.calls.ValidateClone = append(mock.calls.ValidateClone, callInfo) + mock.lockValidateClone.Unlock() + return mock.ValidateCloneFunc(ctx) +} + +// ValidateCloneCalls gets all the calls that were made to ValidateClone. +// Check the length with: +// +// len(mockedObjectHandler.ValidateCloneCalls()) +func (mock *ObjectHandlerMock) ValidateCloneCalls() []struct { + Ctx context.Context +} { + var calls []struct { + Ctx context.Context + } + mock.lockValidateClone.RLock() + calls = mock.calls.ValidateClone + mock.lockValidateClone.RUnlock() + return calls +} + +// ValidateRestore calls ValidateRestoreFunc. +func (mock *ObjectHandlerMock) ValidateRestore(ctx context.Context) error { + if mock.ValidateRestoreFunc == nil { + panic("ObjectHandlerMock.ValidateRestoreFunc: method is nil but ObjectHandler.ValidateRestore was just called") + } + callInfo := struct { + Ctx context.Context + }{ + Ctx: ctx, + } + mock.lockValidateRestore.Lock() + mock.calls.ValidateRestore = append(mock.calls.ValidateRestore, callInfo) + mock.lockValidateRestore.Unlock() + return mock.ValidateRestoreFunc(ctx) +} + +// ValidateRestoreCalls gets all the calls that were made to ValidateRestore. +// Check the length with: +// +// len(mockedObjectHandler.ValidateRestoreCalls()) +func (mock *ObjectHandlerMock) ValidateRestoreCalls() []struct { + Ctx context.Context +} { + var calls []struct { + Ctx context.Context + } + mock.lockValidateRestore.RLock() + calls = mock.calls.ValidateRestore + mock.lockValidateRestore.RUnlock() + return calls +} diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/provisioner_restorer.go b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/provisioner_restorer.go index 7976788556..c23da32b86 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/provisioner_restorer.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/provisioner_restorer.go @@ -97,7 +97,7 @@ func (v *ProvisionerHandler) ValidateRestore(ctx context.Context) error { } func (v *ProvisionerHandler) ValidateClone(ctx context.Context) error { - if err := common.ValidateResourceNameLength(v.secret.Name); err != nil { + if err := common.ValidateResourceNameLength(v.secret.Name, v.secret.Kind); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vd_restorer.go b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vd_restorer.go index 10fa9dbf77..a47326ffe0 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vd_restorer.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vd_restorer.go @@ -97,7 +97,7 @@ func (v *VirtualDiskHandler) ValidateRestore(ctx context.Context) error { } func (v *VirtualDiskHandler) ValidateClone(ctx context.Context) error { - if err := common.ValidateResourceNameLength(v.vd.Name); err != nil { + if err := common.ValidateResourceNameLength(v.vd.Name, v.vd.Kind); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vm_restorer.go b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vm_restorer.go index cdbb3d76c3..d81bcf74cf 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vm_restorer.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vm_restorer.go @@ -138,7 +138,7 @@ func (v *VirtualMachineHandler) ValidateRestore(ctx context.Context) error { } func (v *VirtualMachineHandler) ValidateClone(ctx context.Context) error { - if err := common.ValidateResourceNameLength(v.vm.Name); err != nil { + if err := common.ValidateResourceNameLength(v.vm.Name, v.vm.Kind); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vmbda_restorer.go b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vmbda_restorer.go index eff74abed7..9e58b924be 100644 --- a/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vmbda_restorer.go +++ b/images/virtualization-artifact/pkg/controller/service/restorer/restorers/vmbda_restorer.go @@ -112,7 +112,7 @@ func (v *VMBlockDeviceAttachmentHandler) ValidateRestore(ctx context.Context) er } func (v *VMBlockDeviceAttachmentHandler) ValidateClone(ctx context.Context) error { - if err := common.ValidateResourceNameLength(v.vmbda.Name); err != nil { + if err := common.ValidateResourceNameLength(v.vmbda.Name, v.vmbda.Kind); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/controller/service/size_policy_service.go b/images/virtualization-artifact/pkg/controller/service/size_policy_service.go index b2d1540f92..df49f9c727 100644 --- a/images/virtualization-artifact/pkg/controller/service/size_policy_service.go +++ b/images/virtualization-artifact/pkg/controller/service/size_policy_service.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type SizePolicyService struct{} @@ -34,7 +34,7 @@ func NewSizePolicyService() *SizePolicyService { return &SizePolicyService{} } -func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineClass) error { +func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *v1alpha2.VirtualMachine, vmClass *v1alpha2.VirtualMachineClass) error { // check if no sizing policy requirements are set if vmClass == nil || len(vmClass.Spec.SizingPolicies) == 0 { return nil @@ -58,7 +58,7 @@ func (s *SizePolicyService) CheckVMMatchedSizePolicy(vm *virtv2.VirtualMachine, return nil } -func getVMSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineClass) *virtv2.SizingPolicy { +func getVMSizePolicy(vm *v1alpha2.VirtualMachine, vmClass *v1alpha2.VirtualMachineClass) *v1alpha2.SizingPolicy { for _, sp := range vmClass.Spec.SizingPolicies { if sp.Cores == nil { continue @@ -72,7 +72,7 @@ func getVMSizePolicy(vm *virtv2.VirtualMachine, vmClass *virtv2.VirtualMachineCl return nil } -func validateCoreFraction(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validateCoreFraction(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if len(sp.CoreFractions) == 0 { return } @@ -98,7 +98,7 @@ func validateCoreFraction(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (e return } -func validateMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validateMemory(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if sp.Memory == nil || sp.Memory.Max.IsZero() { return } @@ -131,7 +131,7 @@ func validateMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsA return } -func validatePerCoreMemory(vm *virtv2.VirtualMachine, sp *virtv2.SizingPolicy) (errorsArray []error) { +func validatePerCoreMemory(vm *v1alpha2.VirtualMachine, sp *v1alpha2.SizingPolicy) (errorsArray []error) { if sp.Memory == nil || sp.Memory.PerCore.Max.IsZero() { return } diff --git a/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go b/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go index a6b826daf9..171a04b7e7 100644 --- a/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go +++ b/images/virtualization-artifact/pkg/controller/service/size_policy_service_test.go @@ -22,25 +22,25 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("SizePolicyService", func() { Context("when VM's class has no valid size policy", func() { // Virtual machine with non-matching CPU parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 5, CoreFraction: "10%"}, + CPU: v1alpha2.CPUSpec{Cores: 5, CoreFraction: "10%"}, }, } // Initialize a virtual machine class with policies that do not match the VM's parameters - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, }, }, }, @@ -56,19 +56,19 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's class has correct policy without memory requirements", func() { // Virtual machine with appropriate CPU parameters and no memory requirements - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, }, } // Set mock VM class data with valid policies for the VM without memory requirements - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, }, }, }, @@ -84,22 +84,22 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory does not match with policy", func() { // Virtual machine with non-matching memory parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("1Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("1Gi")}, }, } // Set mock VM class data with policies that match memory requirements for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("512Mi"), Max: resource.MustParse("2Gi"), }, @@ -119,22 +119,22 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory matches the policy", func() { // Virtual machine with matching memory parameters - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid memory policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -154,21 +154,21 @@ var _ = Describe("SizePolicyService", func() { Context("when class policy has empty memory requirements", func() { // Virtual machine with memory size that matches an empty memory requirement policy - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ // No specific memory policies defined - SizingPolicies: []virtv2.SizingPolicy{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{}, }, }, }, @@ -184,23 +184,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory is correct per core", func() { // Virtual machine with memory size that adheres to per-core memory policies - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ // Setting policies with per-core memory requirements - SizingPolicies: []virtv2.SizingPolicy{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -221,23 +221,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory is incorrect per core", func() { // Virtual machine with incorrect per-core memory size - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 4, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 4, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } // Set mock VM class data with invalid per-core memory policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("2Gi"), Max: resource.MustParse("3Gi"), }, @@ -258,21 +258,21 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's core fraction is correct", func() { // Virtual machine with a correct core fraction - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid core fraction policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - CoreFractions: []virtv2.CoreFractionValue{10, 25, 50, 100}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + CoreFractions: []v1alpha2.CoreFractionValue{10, 25, 50, 100}, }, }, }, @@ -288,21 +288,21 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's core fraction is incorrect", func() { // Virtual machine with an incorrect core fraction - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 1, CoreFraction: "11%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 1, CoreFraction: "11%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid core fraction policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - CoreFractions: []virtv2.CoreFractionValue{10, 25, 50, 100}, + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + CoreFractions: []v1alpha2.CoreFractionValue{10, 25, 50, 100}, }, }, }, @@ -318,23 +318,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory step is correct", func() { // Virtual machine with a correct memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2Gi")}, }, } // Set mock VM class data with valid memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - MemoryMinMax: virtv2.MemoryMinMax{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -354,23 +354,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's memory step is incorrect", func() { // Virtual machine with an incorrect memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("2001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("2001Mi")}, }, } // Set mock VM class data with invalid memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - MemoryMinMax: virtv2.MemoryMinMax{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -390,23 +390,23 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's per core memory step is correct", func() { // Virtual machine with a correct per-core memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4Gi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4Gi")}, }, } - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -427,24 +427,24 @@ var _ = Describe("SizePolicyService", func() { Context("when VM's per core memory step is incorrect", func() { // Virtual machine with an incorrect per-core memory step - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4001Mi")}, }, } // Set mock VM class data with invalid per-core memory step policies for the VM - vmClass := &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - SizingPolicies: []virtv2.SizingPolicy{ + vmClass := &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + SizingPolicies: []v1alpha2.SizingPolicy{ { - Cores: &virtv2.SizingPolicyCores{Min: 1, Max: 4}, - Memory: &virtv2.SizingPolicyMemory{ + Cores: &v1alpha2.SizingPolicyCores{Min: 1, Max: 4}, + Memory: &v1alpha2.SizingPolicyMemory{ Step: resource.MustParse("1Gi"), - PerCore: virtv2.SizingPolicyMemoryPerCore{ - MemoryMinMax: virtv2.MemoryMinMax{ + PerCore: v1alpha2.SizingPolicyMemoryPerCore{ + MemoryMinMax: v1alpha2.MemoryMinMax{ Min: resource.MustParse("1Gi"), Max: resource.MustParse("3Gi"), }, @@ -464,14 +464,14 @@ var _ = Describe("SizePolicyService", func() { }) Context("When size policy not provided", func() { - vm := &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ + vm := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ VirtualMachineClassName: "vmclasstest", - CPU: virtv2.CPUSpec{Cores: 2, CoreFraction: "10%"}, - Memory: virtv2.MemorySpec{Size: resource.MustParse("4001Mi")}, + CPU: v1alpha2.CPUSpec{Cores: 2, CoreFraction: "10%"}, + Memory: v1alpha2.MemorySpec{Size: resource.MustParse("4001Mi")}, }, } - vmClass := &virtv2.VirtualMachineClass{} + vmClass := &v1alpha2.VirtualMachineClass{} It("should pass validation cause no requirements", func() { service := service.NewSizePolicyService() diff --git a/images/virtualization-artifact/pkg/controller/service/snapshot_service.go b/images/virtualization-artifact/pkg/controller/service/snapshot_service.go index e2588b205a..bcc45dce48 100644 --- a/images/virtualization-artifact/pkg/controller/service/snapshot_service.go +++ b/images/virtualization-artifact/pkg/controller/service/snapshot_service.go @@ -30,9 +30,9 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type SnapshotService struct { @@ -49,7 +49,7 @@ func NewSnapshotService(virtClient kubeclient.Client, client Client, protection } } -func (s *SnapshotService) IsFrozen(vm *virtv2.VirtualMachine) bool { +func (s *SnapshotService) IsFrozen(vm *v1alpha2.VirtualMachine) bool { if vm == nil { return false } @@ -59,8 +59,8 @@ func (s *SnapshotService) IsFrozen(vm *virtv2.VirtualMachine) bool { return filesystemFrozen.Status == metav1.ConditionTrue && filesystemFrozen.Reason == vmcondition.ReasonFilesystemFrozen.String() } -func (s *SnapshotService) CanFreeze(vm *virtv2.VirtualMachine) bool { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning || s.IsFrozen(vm) { +func (s *SnapshotService) CanFreeze(vm *v1alpha2.VirtualMachine) bool { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning || s.IsFrozen(vm) { return false } @@ -70,7 +70,7 @@ func (s *SnapshotService) CanFreeze(vm *virtv2.VirtualMachine) bool { } func (s *SnapshotService) Freeze(ctx context.Context, name, namespace string) error { - err := s.virtClient.VirtualMachines(namespace).Freeze(ctx, name, v1alpha2.VirtualMachineFreeze{}) + err := s.virtClient.VirtualMachines(namespace).Freeze(ctx, name, subv1alpha2.VirtualMachineFreeze{}) if err != nil { return fmt.Errorf("failed to freeze virtual machine %s/%s: %w", namespace, name, err) } @@ -78,19 +78,19 @@ func (s *SnapshotService) Freeze(ctx context.Context, name, namespace string) er return nil } -func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if vm == nil || !s.IsFrozen(vm) { return false, nil } vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind == virtv2.DiskDevice { + if bdr.Kind == v1alpha2.DiskDevice { vdByName[bdr.Name] = struct{}{} } } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -104,12 +104,12 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context } _, ok := vdByName[vdSnapshot.Spec.VirtualDiskName] - if ok && vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseInProgress { + if ok && vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress { return false, nil } } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err = s.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -118,7 +118,7 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context } for _, vmSnapshot := range vmSnapshots.Items { - if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseInProgress { + if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress { return false, nil } } @@ -126,19 +126,19 @@ func (s *SnapshotService) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context return true, nil } -func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if vm == nil || !s.IsFrozen(vm) { return false, nil } vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind == virtv2.DiskDevice { + if bdr.Kind == v1alpha2.DiskDevice { vdByName[bdr.Name] = struct{}{} } } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := s.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -148,12 +148,12 @@ func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Cont for _, vdSnapshot := range vdSnapshots.Items { _, ok := vdByName[vdSnapshot.Spec.VirtualDiskName] - if ok && vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseInProgress { + if ok && vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress { return false, nil } } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err = s.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.Namespace, }) @@ -166,7 +166,7 @@ func (s *SnapshotService) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Cont continue } - if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseInProgress { + if vmSnapshot.Spec.VirtualMachineName == vm.Name && vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress { return false, nil } } @@ -211,20 +211,20 @@ func (s *SnapshotService) DeleteVolumeSnapshot(ctx context.Context, vs *vsv1.Vol return nil } -func (s *SnapshotService) GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDisk{}) +func (s *SnapshotService) GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDisk{}) } func (s *SnapshotService) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) { return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &corev1.PersistentVolumeClaim{}) } -func (s *SnapshotService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualDiskSnapshot{}) +func (s *SnapshotService) GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualDiskSnapshot{}) } -func (s *SnapshotService) GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &virtv2.VirtualMachine{}) +func (s *SnapshotService) GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &v1alpha2.VirtualMachine{}) } func (s *SnapshotService) GetVolumeSnapshot(ctx context.Context, name, namespace string) (*vsv1.VolumeSnapshot, error) { @@ -235,7 +235,7 @@ func (s *SnapshotService) GetSecret(ctx context.Context, name, namespace string) return object.FetchObject(ctx, types.NamespacedName{Namespace: namespace, Name: name}, s.client, &corev1.Secret{}) } -func (s *SnapshotService) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) { +func (s *SnapshotService) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) { err := s.client.Create(ctx, vdSnapshot) if err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/service/stat_service.go b/images/virtualization-artifact/pkg/controller/service/stat_service.go index 2f5be8267a..efa67b5dcf 100644 --- a/images/virtualization-artifact/pkg/controller/service/stat_service.go +++ b/images/virtualization-artifact/pkg/controller/service/stat_service.go @@ -36,7 +36,7 @@ import ( podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/monitoring" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StatService struct { @@ -77,20 +77,20 @@ func (s StatService) GetCDROM(pod *corev1.Pod) bool { return imageformat.IsISO(finalReport.Format) } -func (s StatService) GetSize(pod *corev1.Pod) virtv2.ImageStatusSize { +func (s StatService) GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize { finalReport, err := monitoring.GetFinalReportFromPod(pod) if err != nil { s.logger.Error("GetSize: Cannot get final report from pod", "err", err) - return virtv2.ImageStatusSize{} + return v1alpha2.ImageStatusSize{} } if finalReport == nil { - return virtv2.ImageStatusSize{} + return v1alpha2.ImageStatusSize{} } unpackedSizeBytes := resource.NewQuantity(int64(finalReport.UnpackedSizeBytes), resource.BinarySI) - return virtv2.ImageStatusSize{ + return v1alpha2.ImageStatusSize{ Stored: humanize_bytes.HumanizeIBytes(finalReport.StoredSizeBytes), StoredBytes: strconv.FormatUint(finalReport.StoredSizeBytes, 10), Unpacked: humanize_bytes.HumanizeIBytes(uint64(unpackedSizeBytes.Value())), @@ -135,7 +135,7 @@ func (s StatService) CheckPod(pod *corev1.Pod) error { return nil } -func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { report, err := monitoring.GetFinalReportFromPod(pod) if err != nil && !errors.Is(err, monitoring.ErrTerminationMessageNotFound) { s.logger.Error("GetDownloadSpeed: Cannot get final report from pod", "err", err) @@ -143,7 +143,7 @@ func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virt } if report != nil { - return &virtv2.StatusSpeed{ + return &v1alpha2.StatusSpeed{ Avg: report.GetAverageSpeed(), AvgBytes: strconv.FormatUint(report.GetAverageSpeedRaw(), 10), } @@ -159,7 +159,7 @@ func (s StatService) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virt return nil } - return &virtv2.StatusSpeed{ + return &v1alpha2.StatusSpeed{ Avg: progress.AvgSpeed(), AvgBytes: strconv.FormatUint(progress.AvgSpeedRaw(), 10), Current: progress.CurSpeed(), diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go index 11c52048b7..00c73d1155 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -47,7 +47,7 @@ func NewDatasourceReadyHandler(recorder eventrecord.EventRecorderLogger, blank s } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.DeletionTimestamp != nil { conditions.RemoveCondition(vdcondition.DatasourceReadyType, &vd.Status.Conditions) return reconcile.Result{}, nil @@ -86,7 +86,7 @@ func (h DatasourceReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDi h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDContainerRegistrySecretNotFound, + v1alpha2.ReasonVDContainerRegistrySecretNotFound, "Container registry secret not found", ) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go index 540ca54533..a7d2979b8e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/datasource_ready_test.go @@ -27,19 +27,19 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) func TestDatasourceReadyHandler_Handle(t *testing.T) { ctx := t.Context() blank := &HandlerMock{ - ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil }, } sources := &SourcesMock{ - GetFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { + GetFunc: func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { return blank, true }, } @@ -48,7 +48,7 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { } t.Run("VirtualDisk with DeletionTimestamp", func(t *testing.T) { - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, }, @@ -60,7 +60,7 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with Blank DataSource", func(t *testing.T) { - vd := virtv2.VirtualDisk{} + vd := v1alpha2.VirtualDisk{} handler := NewDatasourceReadyHandler(recorder, blank, nil) _, err := handler.Handle(ctx, &vd) @@ -73,9 +73,9 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with Non Blank DataSource", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, @@ -92,15 +92,15 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with missing VI reference", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, } - sources.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + sources.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return source.NewImageNotFoundError("missing-vi") }}, true } @@ -114,15 +114,15 @@ func TestDatasourceReadyHandler_Handle(t *testing.T) { }) t.Run("VirtualDisk with missing CVI reference", func(t *testing.T) { - vd := virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ + vd := v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ Type: "NonBlank", }, }, } - sources.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *virtv2.VirtualDisk) error { + sources.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{ValidateFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) error { return source.NewClusterImageNotFoundError("missing-cvi") }}, true } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go index 8a856e9f44..eb4e008726 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/deletion.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -44,11 +44,11 @@ func NewDeletionHandler(sources *source.Sources, client client.Client) *Deletion } } -func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vd.DeletionTimestamp != nil { - if controllerutil.ContainsFinalizer(vd, virtv2.FinalizerVDProtection) { + if controllerutil.ContainsFinalizer(vd, v1alpha2.FinalizerVDProtection) { return reconcile.Result{}, nil } @@ -66,15 +66,15 @@ func (h DeletionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re } log.Info("Deletion observed: remove cleanup finalizer from VirtualDisk") - controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDCleanup) + controllerutil.RemoveFinalizer(vd, v1alpha2.FinalizerVDCleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vd, virtv2.FinalizerVDCleanup) + controllerutil.AddFinalizer(vd, v1alpha2.FinalizerVDCleanup) return reconcile.Result{}, nil } -func (h DeletionHandler) cleanupPersistentVolumeClaims(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (h DeletionHandler) cleanupPersistentVolumeClaims(ctx context.Context, vd *v1alpha2.VirtualDisk) error { pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client) if err != nil { return err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/init.go b/images/virtualization-artifact/pkg/controller/vd/internal/init.go index a16f52b59d..236a59ac3f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/init.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/init.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pwgen" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type InitHandler struct{} @@ -35,7 +35,7 @@ func NewInitHandler() *InitHandler { return &InitHandler{} } -func (h *InitHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h *InitHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { // INIT PersistentVolumeClaim Name. // Required for correct work virtual disk supplements. // We should have different names for support migration volumes. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go index d08b0bf983..7f4bb44e89 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/interfaces.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Handler Sources DiskService StorageClassService @@ -33,9 +33,9 @@ import ( type Handler = source.Handler type Sources interface { - Changed(_ context.Context, vi *virtv2.VirtualDisk) bool - Get(dsType virtv2.DataSourceType) (source.Handler, bool) - CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + Changed(_ context.Context, vi *v1alpha2.VirtualDisk) bool + Get(dsType v1alpha2.DataSourceType) (source.Handler, bool) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) } type DiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go index edaf34c4ec..e581028475 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse.go @@ -33,11 +33,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) -var imagePhasesUsingDisk = []virtv2.ImagePhase{virtv2.ImageProvisioning, virtv2.ImagePending} +var imagePhasesUsingDisk = []v1alpha2.ImagePhase{v1alpha2.ImageProvisioning, v1alpha2.ImagePending} type InUseHandler struct { client client.Client @@ -49,7 +49,7 @@ func NewInUseHandler(client client.Client) *InUseHandler { } } -func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h InUseHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { err := h.updateAttachedVirtualMachines(ctx, vd) if err != nil { return reconcile.Result{}, err @@ -103,9 +103,9 @@ func (h InUseHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon return reconcile.Result{}, nil } -func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) bool { +func (h InUseHandler) isVDAttachedToVM(vdName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.DiskDevice && bda.Name == vdName { + if bda.Kind == v1alpha2.DiskDevice && bda.Name == vdName { return true } } @@ -113,7 +113,7 @@ func (h InUseHandler) isVDAttachedToVM(vdName string, vm virtv2.VirtualMachine) return false } -func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { pvcName := vd.Status.Target.PersistentVolumeClaim if pvcName == "" { return false, nil @@ -130,9 +130,9 @@ func (h InUseHandler) checkDataExportUsage(ctx context.Context, vd *virtv2.Virtu return pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true", nil } -func (h InUseHandler) checkImageUsage(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (h InUseHandler) checkImageUsage(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { // If disk is not ready, it cannot be used for create image - if vd.Status.Phase != virtv2.DiskReady { + if vd.Status.Phase != v1alpha2.DiskReady { return false, nil } @@ -150,8 +150,8 @@ func (h InUseHandler) checkImageUsage(ctx context.Context, vd *virtv2.VirtualDis return usedByImage, nil } -func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *virtv2.VirtualDisk) error { - var vms virtv2.VirtualMachineList +func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *v1alpha2.VirtualDisk) error { + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -168,7 +168,7 @@ func (h InUseHandler) updateAttachedVirtualMachines(ctx context.Context, vd *vir return nil } -func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2.VirtualDisk, vms virtv2.VirtualMachineList) (map[string]bool, error) { +func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *v1alpha2.VirtualDisk, vms v1alpha2.VirtualMachineList) (map[string]bool, error) { usageMap := make(map[string]bool) for _, vm := range vms.Items { @@ -179,9 +179,9 @@ func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2. switch vm.Status.Phase { case "": usageMap[vm.GetName()] = false - case virtv2.MachinePending: + case v1alpha2.MachinePending: usageMap[vm.GetName()] = true - case virtv2.MachineStopped: + case v1alpha2.MachineStopped: vmIsActive, err := h.isVMActive(ctx, vm) if err != nil { return nil, err @@ -196,7 +196,7 @@ func (h InUseHandler) getVirtualMachineUsageMap(ctx context.Context, vd *virtv2. return usageMap, nil } -func (h InUseHandler) isVMActive(ctx context.Context, vm virtv2.VirtualMachine) (bool, error) { +func (h InUseHandler) isVMActive(ctx context.Context, vm v1alpha2.VirtualMachine) (bool, error) { kvvm, err := object.FetchObject(ctx, types.NamespacedName{Name: vm.Name, Namespace: vm.Namespace}, h.client, &virtv1.VirtualMachine{}) if err != nil { return false, fmt.Errorf("error getting kvvms: %w", err) @@ -223,21 +223,21 @@ func (h InUseHandler) isVMActive(ctx context.Context, vm virtv2.VirtualMachine) return false, nil } -func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk, usageMap map[string]bool) { +func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *v1alpha2.VirtualDisk, usageMap map[string]bool) { currentlyMountedVM := commonvd.GetCurrentlyMountedVMName(vd) - attachedVMs := make([]virtv2.AttachedVirtualMachine, 0, len(usageMap)) + attachedVMs := make([]v1alpha2.AttachedVirtualMachine, 0, len(usageMap)) setAnyToTrue := false if used, exists := usageMap[currentlyMountedVM]; exists && used { for key := range usageMap { if key == currentlyMountedVM { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: true, }) } else { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: false, }) @@ -246,13 +246,13 @@ func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk } else { for key, value := range usageMap { if !setAnyToTrue && value { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: true, }) setAnyToTrue = true } else { - attachedVMs = append(attachedVMs, virtv2.AttachedVirtualMachine{ + attachedVMs = append(attachedVMs, v1alpha2.AttachedVirtualMachine{ Name: key, Mounted: false, }) @@ -263,7 +263,7 @@ func (h InUseHandler) updateAttachedVirtualMachinesStatus(vd *virtv2.VirtualDisk vd.Status.AttachedToVirtualMachines = attachedVMs } -func (h InUseHandler) checkUsageByVM(vd *virtv2.VirtualDisk) bool { +func (h InUseHandler) checkUsageByVM(vd *v1alpha2.VirtualDisk) bool { for _, attachedVM := range vd.Status.AttachedToVirtualMachines { if attachedVM.Mounted { return true @@ -273,8 +273,8 @@ func (h InUseHandler) checkUsageByVM(vd *virtv2.VirtualDisk) bool { return false } -func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - var vis virtv2.VirtualImageList +func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { + var vis v1alpha2.VirtualImageList err := h.client.List(ctx, &vis, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -284,9 +284,9 @@ func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk for _, vi := range vis.Items { if slices.Contains(imagePhasesUsingDisk, vi.Status.Phase) && - vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + vi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && - vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind && vi.Spec.DataSource.ObjectRef.Name == vd.Name { return true, nil } @@ -295,17 +295,17 @@ func (h InUseHandler) checkUsageByVI(ctx context.Context, vd *virtv2.VirtualDisk return false, nil } -func (h InUseHandler) checkUsageByCVI(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { - var cvis virtv2.ClusterVirtualImageList +func (h InUseHandler) checkUsageByCVI(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { + var cvis v1alpha2.ClusterVirtualImageList err := h.client.List(ctx, &cvis, &client.ListOptions{}) if err != nil { return false, fmt.Errorf("error getting cluster virtual images: %w", err) } for _, cvi := range cvis.Items { if slices.Contains(imagePhasesUsingDisk, cvi.Status.Phase) && - cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && + cvi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && - cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind && + cvi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind && cvi.Spec.DataSource.ObjectRef.Name == vd.Name { return true, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go index fc974ef44c..fe5e4b17b0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/inuse_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("InUseHandler", func() { BeforeEach(func() { scheme = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() @@ -54,14 +54,14 @@ var _ = Describe("InUseHandler", func() { Context("when handling VirtualDisk usage", func() { It("should correctly update status for a disk used by a running VM", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-vm", Mounted: false, @@ -78,72 +78,72 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, } - vm2 := &virtv2.VirtualMachine{ + vm2 := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm2", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, } - vm3 := &virtv2.VirtualMachine{ + vm3 := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm3", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, @@ -175,14 +175,14 @@ var _ = Describe("InUseHandler", func() { }) It("should correctly update status for a disk used by a stopped VM", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-vm", Mounted: true, @@ -191,24 +191,24 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineStopped, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineStopped, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "test-vd", }, }, @@ -233,12 +233,12 @@ var _ = Describe("InUseHandler", func() { }) It("should update the status to NotInUse if no VM uses the disk", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } @@ -259,14 +259,14 @@ var _ = Describe("InUseHandler", func() { }) It("should handle VM disappearance and update status accordingly", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ {Name: "missing-vm", Mounted: true}, }, }, @@ -290,12 +290,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use", func() { It("must set status Unknown and reason Unknown", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } @@ -313,12 +313,12 @@ var _ = Describe("InUseHandler", func() { }) It("must set condition generation equal resource generation", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -346,34 +346,34 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by running VirtualMachine", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -396,22 +396,22 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by not ready VirtualMachine", func() { It("it sets Unknown", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Type: vmcondition.TypeMigrating.String(), @@ -422,9 +422,9 @@ var _ = Describe("InUseHandler", func() { Status: metav1.ConditionFalse, }, }, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -446,33 +446,33 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{}, }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -493,34 +493,34 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by ClusterVirtualImage", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{}, }, } - cvi := &virtv2.ClusterVirtualImage{ + cvi := &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.ClusterVirtualImageSpec{ - DataSource: virtv2.ClusterVirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.ClusterVirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.ClusterVirtualImageSpec{ + DataSource: v1alpha2.ClusterVirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.ClusterVirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", Namespace: "default", }, }, }, - Status: virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -541,46 +541,46 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage and VirtualMachine", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineStarting, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineStarting, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -603,12 +603,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualMachine after create image", func() { It("must set status True and reason AllowedForVirtualMachineUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -619,16 +619,16 @@ var _ = Describe("InUseHandler", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vm", Namespace: "default", }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -651,13 +651,13 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is used by VirtualImage after running VM", func() { It("must set status True and reason AllowedForImageUsage", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -668,22 +668,22 @@ var _ = Describe("InUseHandler", func() { }, } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vi", Namespace: "default", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualDiskKind, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualDiskKind, Name: "test-vd", }, }, }, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageProvisioning, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageProvisioning, Conditions: []metav1.Condition{}, }, } @@ -704,12 +704,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use after image creation", func() { It("must set status False and reason NotInUse", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -736,12 +736,12 @@ var _ = Describe("InUseHandler", func() { Context("when VirtualDisk is not in use after VM deletion", func() { It("must set status False and reason NotInUse", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.InUseType.String(), @@ -767,14 +767,14 @@ var _ = Describe("InUseHandler", func() { }) Context("when VirtualDisk is used by DataExport", func() { It("must set status True and reason UsedForDataExport", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-vd", Namespace: "default", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go index cd7a2be6f6..b53e426b50 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle.go @@ -19,7 +19,6 @@ package internal import ( "context" "fmt" - "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -49,7 +48,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, blank source. } } -func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if !ok { readyCondition = metav1.Condition{ @@ -62,17 +61,17 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r } if vd.DeletionTimestamp != nil { - vd.Status.Phase = virtv2.DiskTerminating + vd.Status.Phase = v1alpha2.DiskTerminating return reconcile.Result{}, nil } if vd.Status.Phase == "" { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending } migrating, _ := conditions.GetCondition(vdcondition.MigratingType, vd.Status.Conditions) if migrating.Status == metav1.ConditionTrue { - vd.Status.Phase = virtv2.DiskMigrating + vd.Status.Phase = v1alpha2.DiskMigrating return reconcile.Result{}, nil } @@ -80,13 +79,13 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDSpecHasBeenChanged, + v1alpha2.ReasonVDSpecHasBeenChanged, "Spec changes are detected: import process is restarted by controller", ) // Reset status and start import again. - vd.Status = virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskPending, + vd.Status = v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskPending, } _, err := h.sources.CleanUp(ctx, vd) @@ -153,10 +152,5 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (r return reconcile.Result{}, fmt.Errorf("failed to sync virtual disk data source %s: %w", ds.Name(), err) } - readyConditionAfterSync, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) - if readyConditionAfterSync.Status == metav1.ConditionTrue && conditions.IsLastUpdated(readyConditionAfterSync, vd) { - return reconcile.Result{RequeueAfter: 1 * time.Second}, nil - } - return result, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go index 607116f502..fe561d8663 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/life_cycle_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -42,8 +42,8 @@ var _ = Describe("LifeCycleHandler Run", func() { var sourcesMock SourcesMock args.ReadyCondition.Type = vdcondition.ReadyType.String() cleanUpCalled := false - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: "", Conditions: []metav1.Condition{ args.ReadyCondition, @@ -57,26 +57,26 @@ var _ = Describe("LifeCycleHandler Run", func() { }, }, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeHTTP, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeHTTP, }, }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) bool { return args.SpecChanged } - sourcesMock.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { var handler HandlerMock - handler.SyncFunc = func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + handler.SyncFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -147,8 +147,8 @@ var _ = Describe("LifeCycleHandler Run", func() { args.StorageClassReadyCondition.Type = vdcondition.StorageClassReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.StorageClassReadyCondition, @@ -158,26 +158,26 @@ var _ = Describe("LifeCycleHandler Run", func() { }, }, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeHTTP, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeHTTP, }, }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(ctx context.Context, vd *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(ctx context.Context, vd *v1alpha2.VirtualDisk) bool { return false } - sourcesMock.GetFunc = func(dsType virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.GetFunc = func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { var handler HandlerMock - handler.SyncFunc = func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + handler.SyncFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -242,8 +242,8 @@ var _ = Describe("LifeCycleHandler Run", func() { EventFunc: func(_ client.Object, _, _, _ string) {}, } ctx := logger.ToContext(context.TODO(), testutil.NewNoOpSlogLogger()) - vd := virtv2.VirtualDisk{ - Status: virtv2.VirtualDiskStatus{ + vd := v1alpha2.VirtualDisk{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.DatasourceReadyType.String(), @@ -259,11 +259,11 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.ChangedFunc = func(_ context.Context, _ *virtv2.VirtualDisk) bool { + sourcesMock.ChangedFunc = func(_ context.Context, _ *v1alpha2.VirtualDisk) bool { return false } - sourcesMock.GetFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { - return &source.HandlerMock{SyncFunc: func(_ context.Context, _ *virtv2.VirtualDisk) (reconcile.Result, error) { + sourcesMock.GetFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { + return &source.HandlerMock{SyncFunc: func(_ context.Context, _ *v1alpha2.VirtualDisk) (reconcile.Result, error) { return reconcile.Result{}, nil }}, true } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go index 468b374fed..6a2dad99a1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go @@ -18,11 +18,12 @@ package internal import ( "context" + "errors" "fmt" "log/slog" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,11 +50,11 @@ const migrationHandlerName = "MigrationHandler" type storageClassValidator interface { IsStorageClassAllowed(scName string) bool - IsStorageClassDeprecated(sc *storev1.StorageClass) bool + IsStorageClassDeprecated(sc *storagev1.StorageClass) bool } type volumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) } type MigrationHandler struct { @@ -306,9 +307,10 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 } // Reset migration info + targetPVCName := vd.Status.MigrationState.TargetPVC vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{} - var targetStorageClass *storev1.StorageClass + var targetStorageClass *storagev1.StorageClass var err error storageClassName := "" @@ -318,7 +320,7 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 switch { case storageClassName != "": - targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: storageClassName}, h.client, &storev1.StorageClass{}) + targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: storageClassName}, h.client, &storagev1.StorageClass{}) if err != nil { return err } @@ -345,7 +347,7 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 } } default: - targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, h.client, &storev1.StorageClass{}) + targetStorageClass, err = object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, h.client, &storagev1.StorageClass{}) if err != nil { return err } @@ -398,11 +400,20 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 } log.Info("Start creating target PersistentVolumeClaim", slog.String("storageClass", targetStorageClass.Name), slog.String("capacity", size.String())) - pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size) + pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size, targetPVCName, vd.Status.Target.PersistentVolumeClaim) if err != nil { return err } - log.Info("Target PersistentVolumeClaim was created or was already exists", slog.String("pvc.name", pvc.Name), slog.String("pvc.namespace", pvc.Namespace)) + + log.Info( + "The target PersistentVolumeClaim has been created or already exists", + slog.String("state.source.pvc", vd.Status.Target.PersistentVolumeClaim), + slog.String("state.target.pvc", pvc.Name), + ) + + if vd.Status.Target.PersistentVolumeClaim == pvc.Name { + return errors.New("the target PersistentVolumeClaim name matched the source PersistentVolumeClaim name, please report a bug") + } vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{ SourcePVC: vd.Status.Target.PersistentVolumeClaim, @@ -410,7 +421,7 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1 StartTimestamp: metav1.Now(), } - cb.Status(metav1.ConditionTrue). + cb.Status(metav1.ConditionFalse). Reason(vdcondition.MigratingWaitForTargetReadyReason). Message("Migration started.") conditions.SetCondition(cb, &vd.Status.Conditions) @@ -426,19 +437,17 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi cb := conditions.NewConditionBuilder(vdcondition.MigratingType). Generation(vd.Generation). - Status(metav1.ConditionTrue). + Status(metav1.ConditionFalse). Reason(vdcondition.MigratingWaitForTargetReadyReason) if pvc == nil { - cb.Status(metav1.ConditionFalse). - Reason(vdcondition.MigratingWaitForTargetReadyReason). - Message("Target persistent volume claim is not found.") + cb.Message("Target persistent volume claim is not found.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } if pvc.Status.Phase == corev1.ClaimBound { - cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is bound.") + cb.Status(metav1.ConditionTrue).Reason(vdcondition.InProgress).Message("Target persistent volume claim is bound.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } @@ -454,7 +463,7 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi return nil } - sc := &storev1.StorageClass{} + sc := &storagev1.StorageClass{} err = h.client.Get(ctx, types.NamespacedName{Name: storageClassName}, sc) if err != nil { if k8serrors.IsNotFound(err) { @@ -465,17 +474,15 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi return err } - isWaitForFistConsumer := sc.VolumeBindingMode == nil || *sc.VolumeBindingMode == storev1.VolumeBindingWaitForFirstConsumer + isWaitForFistConsumer := sc.VolumeBindingMode == nil || *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer if isWaitForFistConsumer { - cb.Reason(vdcondition.InProgress).Message("Target persistent volume claim is waiting for first consumer.") + cb.Status(metav1.ConditionTrue).Reason(vdcondition.InProgress).Message("Target persistent volume claim is waiting for first consumer.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } } - cb.Status(metav1.ConditionFalse). - Reason(vdcondition.MigratingWaitForTargetReadyReason). - Message("Target persistent volume claim is not bound or not waiting for first consumer.") + cb.Message("Target persistent volume claim is not bound or not waiting for first consumer.") conditions.SetCondition(cb, &vd.Status.Conditions) return nil } @@ -483,7 +490,10 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi func (h MigrationHandler) handleRevert(ctx context.Context, vd *v1alpha2.VirtualDisk) error { log := logger.FromContext(ctx) log.Info("Start reverting...") - log.Info("Delete target PersistentVolumeClaim", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace)) + + if vd.Status.MigrationState.TargetPVC == vd.Status.Target.PersistentVolumeClaim { + return errors.New("cannot revert: the target PersistentVolumeClaim name matched the source PersistentVolumeClaim name, please report a bug") + } err := h.deleteTargetPersistentVolumeClaim(ctx, vd) if err != nil { @@ -577,7 +587,7 @@ func (h MigrationHandler) getInProgressMigratingVMOP(ctx context.Context, vm *v1 return nil, nil } -func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storev1.StorageClass, size resource.Quantity) (*corev1.PersistentVolumeClaim, error) { +func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storagev1.StorageClass, size resource.Quantity, targetPVCName, sourcePVCName string) (*corev1.PersistentVolumeClaim, error) { pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client) if err != nil { return nil, err @@ -588,7 +598,7 @@ func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, for _, pvc := range pvcs { // If TargetPVC is empty, that means previous reconciliation failed and not updated TargetPVC in status. // So, we should use pvc, that is not equal to SourcePVC. - if pvc.Name == vd.Status.MigrationState.TargetPVC || pvc.Name != vd.Status.MigrationState.SourcePVC { + if pvc.Name == targetPVCName || pvc.Name != sourcePVCName { return &pvc, nil } } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go index 96f73bf540..54334af750 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -52,7 +52,7 @@ func (m *fakeStorageClassValidator) IsStorageClassAllowed(scName string) bool { return m.allowedStorageClasses[scName] } -func (m *fakeStorageClassValidator) IsStorageClassDeprecated(sc *storev1.StorageClass) bool { +func (m *fakeStorageClassValidator) IsStorageClassDeprecated(sc *storagev1.StorageClass) bool { return m.deprecatedStorageClasses[sc.Name] } @@ -62,7 +62,7 @@ type fakeVolumeAndAccessModesGetter struct { shouldError bool } -func (m *fakeVolumeAndAccessModesGetter) GetVolumeAndAccessModes(_ context.Context, _ client.Object, _ *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { +func (m *fakeVolumeAndAccessModesGetter) GetVolumeAndAccessModes(_ context.Context, _ client.Object, _ *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { if m.shouldError { return "", "", fmt.Errorf("mock error") } @@ -80,7 +80,7 @@ var _ = Describe("MigrationHandler", func() { migrationHandler *MigrationHandler vd *v1alpha2.VirtualDisk vm *v1alpha2.VirtualMachine - storageClass *storev1.StorageClass + storageClass *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim ) @@ -139,11 +139,11 @@ var _ = Describe("MigrationHandler", func() { } // Create test StorageClass - storageClass = &storev1.StorageClass{ + storageClass = &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "allowed-sc", }, - VolumeBindingMode: ptr.To(storev1.VolumeBindingWaitForFirstConsumer), + VolumeBindingMode: ptr.To(storagev1.VolumeBindingWaitForFirstConsumer), } // Create test PVC diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go index e688c6b981..1f183e3b62 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/mock.go @@ -7,7 +7,7 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -25,16 +25,16 @@ var _ Handler = &HandlerMock{} // // // make and configure a mocked Handler // mockedHandler := &HandlerMock{ -// CleanUpFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +// CleanUpFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { // panic("mock out the CleanUp method") // }, // NameFunc: func() string { // panic("mock out the Name method") // }, -// SyncFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +// SyncFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { // panic("mock out the Sync method") // }, -// ValidateFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) error { +// ValidateFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) error { // panic("mock out the Validate method") // }, // } @@ -45,16 +45,16 @@ var _ Handler = &HandlerMock{} // } type HandlerMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + CleanUpFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) // NameFunc mocks the Name method. NameFunc func() string // SyncFunc mocks the Sync method. - SyncFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + SyncFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) // ValidateFunc mocks the Validate method. - ValidateFunc func(ctx context.Context, vd *virtv2.VirtualDisk) error + ValidateFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) error // calls tracks calls to the methods. calls struct { @@ -63,7 +63,7 @@ type HandlerMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } // Name holds details about calls to the Name method. Name []struct { @@ -73,14 +73,14 @@ type HandlerMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } // Validate holds details about calls to the Validate method. Validate []struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } } lockCleanUp sync.RWMutex @@ -90,13 +90,13 @@ type HandlerMock struct { } // CleanUp calls CleanUpFunc. -func (mock *HandlerMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (mock *HandlerMock) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { if mock.CleanUpFunc == nil { panic("HandlerMock.CleanUpFunc: method is nil but Handler.CleanUp was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -113,11 +113,11 @@ func (mock *HandlerMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (b // len(mockedHandler.CleanUpCalls()) func (mock *HandlerMock) CleanUpCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -153,13 +153,13 @@ func (mock *HandlerMock) NameCalls() []struct { } // Sync calls SyncFunc. -func (mock *HandlerMock) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (mock *HandlerMock) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if mock.SyncFunc == nil { panic("HandlerMock.SyncFunc: method is nil but Handler.Sync was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -176,11 +176,11 @@ func (mock *HandlerMock) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco // len(mockedHandler.SyncCalls()) func (mock *HandlerMock) SyncCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockSync.RLock() calls = mock.calls.Sync @@ -189,13 +189,13 @@ func (mock *HandlerMock) SyncCalls() []struct { } // Validate calls ValidateFunc. -func (mock *HandlerMock) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (mock *HandlerMock) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if mock.ValidateFunc == nil { panic("HandlerMock.ValidateFunc: method is nil but Handler.Validate was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -212,11 +212,11 @@ func (mock *HandlerMock) Validate(ctx context.Context, vd *virtv2.VirtualDisk) e // len(mockedHandler.ValidateCalls()) func (mock *HandlerMock) ValidateCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockValidate.RLock() calls = mock.calls.Validate @@ -234,13 +234,13 @@ var _ Sources = &SourcesMock{} // // // make and configure a mocked Sources // mockedSources := &SourcesMock{ -// ChangedFunc: func(contextMoqParam context.Context, vi *virtv2.VirtualDisk) bool { +// ChangedFunc: func(contextMoqParam context.Context, vi *v1alpha2.VirtualDisk) bool { // panic("mock out the Changed method") // }, -// CleanUpFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +// CleanUpFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { // panic("mock out the CleanUp method") // }, -// GetFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { +// GetFunc: func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { // panic("mock out the Get method") // }, // } @@ -251,13 +251,13 @@ var _ Sources = &SourcesMock{} // } type SourcesMock struct { // ChangedFunc mocks the Changed method. - ChangedFunc func(contextMoqParam context.Context, vi *virtv2.VirtualDisk) bool + ChangedFunc func(contextMoqParam context.Context, vi *v1alpha2.VirtualDisk) bool // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + CleanUpFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) // GetFunc mocks the Get method. - GetFunc func(dsType virtv2.DataSourceType) (source.Handler, bool) + GetFunc func(dsType v1alpha2.DataSourceType) (source.Handler, bool) // calls tracks calls to the methods. calls struct { @@ -266,19 +266,19 @@ type SourcesMock struct { // ContextMoqParam is the contextMoqParam argument value. ContextMoqParam context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualDisk + Vi *v1alpha2.VirtualDisk } // CleanUp holds details about calls to the CleanUp method. CleanUp []struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } // Get holds details about calls to the Get method. Get []struct { // DsType is the dsType argument value. - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } } lockChanged sync.RWMutex @@ -287,13 +287,13 @@ type SourcesMock struct { } // Changed calls ChangedFunc. -func (mock *SourcesMock) Changed(contextMoqParam context.Context, vi *virtv2.VirtualDisk) bool { +func (mock *SourcesMock) Changed(contextMoqParam context.Context, vi *v1alpha2.VirtualDisk) bool { if mock.ChangedFunc == nil { panic("SourcesMock.ChangedFunc: method is nil but Sources.Changed was just called") } callInfo := struct { ContextMoqParam context.Context - Vi *virtv2.VirtualDisk + Vi *v1alpha2.VirtualDisk }{ ContextMoqParam: contextMoqParam, Vi: vi, @@ -310,11 +310,11 @@ func (mock *SourcesMock) Changed(contextMoqParam context.Context, vi *virtv2.Vir // len(mockedSources.ChangedCalls()) func (mock *SourcesMock) ChangedCalls() []struct { ContextMoqParam context.Context - Vi *virtv2.VirtualDisk + Vi *v1alpha2.VirtualDisk } { var calls []struct { ContextMoqParam context.Context - Vi *virtv2.VirtualDisk + Vi *v1alpha2.VirtualDisk } mock.lockChanged.RLock() calls = mock.calls.Changed @@ -323,13 +323,13 @@ func (mock *SourcesMock) ChangedCalls() []struct { } // CleanUp calls CleanUpFunc. -func (mock *SourcesMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (mock *SourcesMock) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { if mock.CleanUpFunc == nil { panic("SourcesMock.CleanUpFunc: method is nil but Sources.CleanUp was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -346,11 +346,11 @@ func (mock *SourcesMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (b // len(mockedSources.CleanUpCalls()) func (mock *SourcesMock) CleanUpCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -359,12 +359,12 @@ func (mock *SourcesMock) CleanUpCalls() []struct { } // Get calls GetFunc. -func (mock *SourcesMock) Get(dsType virtv2.DataSourceType) (source.Handler, bool) { +func (mock *SourcesMock) Get(dsType v1alpha2.DataSourceType) (source.Handler, bool) { if mock.GetFunc == nil { panic("SourcesMock.GetFunc: method is nil but Sources.Get was just called") } callInfo := struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType }{ DsType: dsType, } @@ -379,10 +379,10 @@ func (mock *SourcesMock) Get(dsType virtv2.DataSourceType) (source.Handler, bool // // len(mockedSources.GetCalls()) func (mock *SourcesMock) GetCalls() []struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } { var calls []struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } mock.lockGet.RLock() calls = mock.calls.Get diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/protection.go b/images/virtualization-artifact/pkg/controller/vd/internal/protection.go index ba8b41ce37..eec9ebf437 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/protection.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/protection.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ProtectionHandler struct{} @@ -32,7 +32,7 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h ProtectionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h ProtectionHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("protection")) if len(vd.Status.AttachedToVirtualMachines) > 1 { @@ -47,15 +47,15 @@ func (h ProtectionHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) ( } } - if unmounted || vd.Status.Phase == virtv2.DiskPending { + if unmounted || vd.Status.Phase == v1alpha2.DiskPending { log.Debug("Allow virtual disk deletion") - controllerutil.RemoveFinalizer(vd, virtv2.FinalizerVDProtection) + controllerutil.RemoveFinalizer(vd, v1alpha2.FinalizerVDProtection) return reconcile.Result{}, nil } if vd.DeletionTimestamp == nil { log.Debug("Protect virtual disk from deletion") - controllerutil.AddFinalizer(vd, virtv2.FinalizerVDProtection) + controllerutil.AddFinalizer(vd, v1alpha2.FinalizerVDProtection) } return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go index 5d68c190ea..49a7cd3352 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/protection_test.go @@ -26,7 +26,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("The protection handler test", func() { @@ -41,7 +41,7 @@ var _ = Describe("The protection handler test", func() { BeforeEach(func() { schema = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(schema)).To(Succeed()) - Expect(virtv2.AddToScheme(schema)).To(Succeed()) + Expect(v1alpha2.AddToScheme(schema)).To(Succeed()) Expect(virtv1.AddToScheme(schema)).To(Succeed()) ctx = context.TODO() @@ -50,7 +50,7 @@ var _ = Describe("The protection handler test", func() { Context("`VirtualDisk`", func() { When("has the `AttachedToVirtualMachines` status with the `Mounted` false value", func() { It("should remove the `vd-protection` finalizer from the `VirtualDisk`", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-virtual-disk", Namespace: "default", @@ -58,9 +58,9 @@ var _ = Describe("The protection handler test", func() { vdProtection, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-virtual-machine", Mounted: false, @@ -79,7 +79,7 @@ var _ = Describe("The protection handler test", func() { When("has the `AttachedToVirtualMachines` status with the `Mounted` true value", func() { It("should not remove the `vd-protection` finalizer from the `VirtualDisk`", func() { - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "test-virtual-disk", Namespace: "default", @@ -87,9 +87,9 @@ var _ = Describe("The protection handler test", func() { vdProtection, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "test-virtual-machine", Mounted: true, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go index db789d6e83..ece1e1090f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing.go @@ -35,7 +35,7 @@ import ( vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewResizingHandler(recorder eventrecord.EventRecorderLogger, diskService Di } } -func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h ResizingHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("resizing")) resizingCondition, _ := conditions.GetCondition(vdcondition.ResizingType, vd.Status.Conditions) @@ -106,7 +106,7 @@ func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re if pvcResizing != nil && pvcResizing.Status == corev1.ConditionTrue { log.Info("Resizing is in progress", "msg", pvcResizing.Message) - vd.Status.Phase = virtv2.DiskResizing + vd.Status.Phase = v1alpha2.DiskResizing cb. Status(metav1.ConditionTrue). Reason(vdcondition.InProgress). @@ -126,7 +126,7 @@ func (h ResizingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (re func (h ResizingHandler) ResizeNeeded( ctx context.Context, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, log *slog.Logger, @@ -138,7 +138,7 @@ func (h ResizingHandler) ResizeNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingNotAvailable, + v1alpha2.ReasonVDResizingNotAvailable, "The virtual disk cannot be selected for resizing as it is currently snapshotting.", ) @@ -156,7 +156,7 @@ func (h ResizingHandler) ResizeNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingNotAvailable, + v1alpha2.ReasonVDResizingNotAvailable, "The virtual disk cannot be selected for resizing as it is currently being migrated.", ) @@ -198,13 +198,13 @@ func (h ResizingHandler) ResizeNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingStarted, + v1alpha2.ReasonVDResizingStarted, "The virtual disk resizing has started", ) log.Info("The virtual disk resizing has started") - vd.Status.Phase = virtv2.DiskResizing + vd.Status.Phase = v1alpha2.DiskResizing cb. Status(metav1.ConditionTrue). Reason(vdcondition.InProgress). @@ -222,7 +222,7 @@ func (h ResizingHandler) ResizeNeeded( } func (h ResizingHandler) ResizeNotNeeded( - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, resizingCondition metav1.Condition, cb *conditions.ConditionBuilder, ) (reconcile.Result, error) { @@ -230,7 +230,7 @@ func (h ResizingHandler) ResizeNotNeeded( h.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonVDResizingCompleted, + v1alpha2.ReasonVDResizingCompleted, "The virtual disk resizing has completed", ) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go index 5dbc875236..94606e4c79 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/resizing_test.go @@ -34,25 +34,25 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) var _ = Describe("Resizing handler Run", func() { - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk var pvc *corev1.PersistentVolumeClaim var diskService *DiskServiceMock size := resource.MustParse("10G") BeforeEach(func() { - vd = &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + vd = &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: &size, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -177,9 +177,9 @@ var _ = Describe("Resizing handler Run", func() { }) DescribeTable("Resizing handler Handle", func(args handleTestArgs) { - vd := &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{}, - Status: virtv2.VirtualDiskStatus{ + vd := &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{}, + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ResizingType.String(), @@ -237,7 +237,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionUnknown, - expectedVdPhase: virtv2.DiskTerminating, + expectedVdPhase: v1alpha2.DiskTerminating, }), Entry("Virtual Disk is not ready", handleTestArgs{ isDiskDeleting: false, @@ -245,7 +245,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionFalse, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC get error", handleTestArgs{ isDiskDeleting: false, @@ -253,7 +253,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: false, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC is nil", handleTestArgs{ isDiskDeleting: false, @@ -261,7 +261,7 @@ var _ = Describe("Resizing handler Run", func() { pvc: nil, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("PVC is not bound", handleTestArgs{ isDiskDeleting: false, @@ -273,7 +273,7 @@ var _ = Describe("Resizing handler Run", func() { }, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), Entry("Everything is fine", handleTestArgs{ isDiskDeleting: false, @@ -285,18 +285,18 @@ var _ = Describe("Resizing handler Run", func() { }, isErrorNil: true, expectedReadyConditionStatus: metav1.ConditionTrue, - expectedVdPhase: virtv2.DiskPending, + expectedVdPhase: v1alpha2.DiskPending, }), ) DescribeTable("Resizing handler ResizeNeeded", func(args resizeNeededArgs) { - vd := &virtv2.VirtualDisk{ - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + vd := &v1alpha2.VirtualDisk{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: ptr.To(resource.Quantity{}), }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ { Type: vdcondition.ResizingType.String(), @@ -314,7 +314,7 @@ var _ = Describe("Resizing handler Run", func() { Reason: vdcondition.StorageClassReady.String(), }, }, - Phase: virtv2.DiskPending, + Phase: v1alpha2.DiskPending, }, } @@ -357,7 +357,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -367,7 +367,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -377,7 +377,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: false, expectedHaveError: false, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionFalse, expectedReason: vdcondition.ResizingNotAvailable.String(), }), @@ -387,7 +387,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: true, expectedResizeCalled: true, expectedHaveError: true, - expectedPhase: virtv2.DiskPending, + expectedPhase: v1alpha2.DiskPending, expectedStatus: metav1.ConditionUnknown, expectedReason: conditions.ReasonUnknown.String(), }), @@ -397,7 +397,7 @@ var _ = Describe("Resizing handler Run", func() { isResizeReturnErr: false, expectedResizeCalled: true, expectedHaveError: false, - expectedPhase: virtv2.DiskResizing, + expectedPhase: v1alpha2.DiskResizing, expectedStatus: metav1.ConditionTrue, expectedReason: vdcondition.InProgress.String(), }), @@ -414,7 +414,7 @@ type handleTestArgs struct { isErrorNil bool pvc *corev1.PersistentVolumeClaim expectedReadyConditionStatus metav1.ConditionStatus - expectedVdPhase virtv2.DiskPhase + expectedVdPhase v1alpha2.DiskPhase } type resizeNeededArgs struct { @@ -423,7 +423,7 @@ type resizeNeededArgs struct { isResizeReturnErr bool expectedResizeCalled bool expectedHaveError bool - expectedPhase virtv2.DiskPhase + expectedPhase v1alpha2.DiskPhase expectedStatus metav1.ConditionStatus expectedReason string } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go index 0fedf55d6c..d248e05dcf 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/snapshoting_test.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -37,13 +37,13 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand diskService := service.NewDiskService(fakeClient, nil, nil, "test") snapshottingHandler := NewSnapshottingHandler(diskService) - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: args.DeletionTimestamp, Name: "test-vd", Namespace: "test-namespace", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.ResizingCondition, @@ -91,12 +91,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ReadyType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vdd", }, }, @@ -112,12 +112,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ResizingType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vd", }, }, @@ -130,12 +130,12 @@ var _ = DescribeTable("Test Handle cases", func(args snapshottingHandlerTestHand Type: vdcondition.ReadyType.String(), Status: metav1.ConditionTrue, }, - Snapshot: virtv2.VirtualDiskSnapshot{ + Snapshot: v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "test-snapshot", Namespace: "test-namespace", }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: "test-vd", }, }, @@ -154,7 +154,7 @@ type snapshottingHandlerTestHandlerArgs struct { DeletionTimestamp *metav1.Time ReadyCondition metav1.Condition ResizingCondition metav1.Condition - Snapshot virtv2.VirtualDiskSnapshot + Snapshot v1alpha2.VirtualDiskSnapshot IsExpectCondition bool ExpectConditionStatus metav1.ConditionStatus } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go index 4f2fcc24fd..4a64cc93fa 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/snapshotting.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -38,7 +38,7 @@ func NewSnapshottingHandler(diskService *service.DiskService) *SnapshottingHandl } } -func (h SnapshottingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h SnapshottingHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdcondition.SnapshottingType).Generation(vd.Generation) defer func() { @@ -68,7 +68,7 @@ func (h SnapshottingHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) continue } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady || vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseTerminating { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady || vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseTerminating { continue } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go index a4b1e319df..a1e722f7d4 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank.go @@ -31,7 +31,7 @@ import ( vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewBlankDataSource(recorder eventrecord.EventRecorderLogger, diskService Bl } } -func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds BlankDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetHandlerContext(ctx, blankDataSource) supgen := vdsupplements.NewGenerator(vd) @@ -68,7 +68,7 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec ctx = logger.ToContext(ctx, log.With("pvc.name", pvc.Name, "pvc.status.phase", pvc.Status.Phase)) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateBlankPVCStep(pvc, ds.diskService, ds.client, cb), @@ -76,11 +76,11 @@ func (ds BlankDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (rec ).Run(ctx, vd) } -func (ds BlankDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds BlankDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } -func (ds BlankDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds BlankDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := vdsupplements.NewGenerator(vd) requeue, err := ds.diskService.CleanUp(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go index c78627d166..acb7f18525 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go @@ -38,7 +38,7 @@ import ( vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ var _ = Describe("Blank", func() { var ( ctx context.Context scheme *runtime.Scheme - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim recorder eventrecord.EventRecorderLogger @@ -57,7 +57,7 @@ var _ = Describe("Blank", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -87,20 +87,20 @@ var _ = Describe("Blank", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: ptr.To(resource.MustParse("10Mi")), }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, @@ -152,7 +152,7 @@ var _ = Describe("Blank", func() { Expect(pvcCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).NotTo(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -189,7 +189,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) }) It("is in provisioning", func() { @@ -204,7 +204,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) }) }) @@ -221,7 +221,7 @@ var _ = Describe("Blank", func() { ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) ExpectStats(vd) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) }) }) @@ -248,7 +248,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -264,7 +264,7 @@ var _ = Describe("Blank", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go index 088565e474..8d66c7b725 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -78,7 +78,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds HTTPDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, httpDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -143,7 +143,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import to DVCR has started", ) @@ -163,14 +163,14 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -190,7 +190,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -202,17 +202,17 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -257,7 +257,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -265,9 +265,9 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -275,9 +275,9 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -286,7 +286,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -298,11 +298,11 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -342,7 +342,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,11 +358,11 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (b return importerRequeue || diskRequeue, nil } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } -func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -386,7 +386,7 @@ func (ds HTTPDataSource) Name() string { return httpDataSource } -func (ds HTTPDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vd.Spec.DataSource.HTTP, supgen) @@ -417,7 +417,7 @@ func (ds HTTPDataSource) getSource(sup supplements.Generator, dvcrSourceImageNam } } -func (ds HTTPDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds HTTPDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go index cfc7e7fcdf..cd26fd373e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/interfaces.go @@ -23,16 +23,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Handler BlankDataSourceDiskService ObjectRefVirtualImageDiskService ObjectRefClusterVirtualImageDiskService ObjectRefVirtualDiskSnapshotDiskService type Handler interface { Name() string - Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) - CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) - Validate(ctx context.Context, vd *virtv2.VirtualDisk) error + Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) + Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error } type BlankDataSourceDiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go index 0a5df8f975..eefb839265 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go @@ -7,7 +7,7 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -27,16 +27,16 @@ var _ Handler = &HandlerMock{} // // // make and configure a mocked Handler // mockedHandler := &HandlerMock{ -// CleanUpFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +// CleanUpFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { // panic("mock out the CleanUp method") // }, // NameFunc: func() string { // panic("mock out the Name method") // }, -// SyncFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +// SyncFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { // panic("mock out the Sync method") // }, -// ValidateFunc: func(ctx context.Context, vd *virtv2.VirtualDisk) error { +// ValidateFunc: func(ctx context.Context, vd *v1alpha2.VirtualDisk) error { // panic("mock out the Validate method") // }, // } @@ -47,16 +47,16 @@ var _ Handler = &HandlerMock{} // } type HandlerMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) + CleanUpFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) // NameFunc mocks the Name method. NameFunc func() string // SyncFunc mocks the Sync method. - SyncFunc func(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + SyncFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) // ValidateFunc mocks the Validate method. - ValidateFunc func(ctx context.Context, vd *virtv2.VirtualDisk) error + ValidateFunc func(ctx context.Context, vd *v1alpha2.VirtualDisk) error // calls tracks calls to the methods. calls struct { @@ -65,7 +65,7 @@ type HandlerMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } // Name holds details about calls to the Name method. Name []struct { @@ -75,14 +75,14 @@ type HandlerMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } // Validate holds details about calls to the Validate method. Validate []struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } } lockCleanUp sync.RWMutex @@ -92,13 +92,13 @@ type HandlerMock struct { } // CleanUp calls CleanUpFunc. -func (mock *HandlerMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (mock *HandlerMock) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { if mock.CleanUpFunc == nil { panic("HandlerMock.CleanUpFunc: method is nil but Handler.CleanUp was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -115,11 +115,11 @@ func (mock *HandlerMock) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (b // len(mockedHandler.CleanUpCalls()) func (mock *HandlerMock) CleanUpCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -155,13 +155,13 @@ func (mock *HandlerMock) NameCalls() []struct { } // Sync calls SyncFunc. -func (mock *HandlerMock) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (mock *HandlerMock) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if mock.SyncFunc == nil { panic("HandlerMock.SyncFunc: method is nil but Handler.Sync was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -178,11 +178,11 @@ func (mock *HandlerMock) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reco // len(mockedHandler.SyncCalls()) func (mock *HandlerMock) SyncCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockSync.RLock() calls = mock.calls.Sync @@ -191,13 +191,13 @@ func (mock *HandlerMock) SyncCalls() []struct { } // Validate calls ValidateFunc. -func (mock *HandlerMock) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (mock *HandlerMock) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if mock.ValidateFunc == nil { panic("HandlerMock.ValidateFunc: method is nil but Handler.Validate was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk }{ Ctx: ctx, Vd: vd, @@ -214,11 +214,11 @@ func (mock *HandlerMock) Validate(ctx context.Context, vd *virtv2.VirtualDisk) e // len(mockedHandler.ValidateCalls()) func (mock *HandlerMock) ValidateCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualDisk + Vd *v1alpha2.VirtualDisk } mock.lockValidate.RLock() calls = mock.calls.Validate diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go index 79c7618717..a3586326cd 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const objectRefDataSource = "objectref" @@ -51,24 +51,24 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, fmt.Errorf("not object ref data source, please report a bug") } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: return ds.vdSnapshotSyncer.Sync(ctx, vd) - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: return ds.cviSyncer.Sync(ctx, vd) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: return ds.viSyncer.Sync(ctx, vd) } return reconcile.Result{}, fmt.Errorf("unexpected object ref kind %s, please report a bug", vd.Spec.DataSource.ObjectRef.Kind) } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := vdsupplements.NewGenerator(vd) requeue, err := ds.diskService.CleanUp(ctx, supgen) @@ -79,17 +79,17 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDis return requeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("not object ref data source, please report a bug") } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: return ds.vdSnapshotSyncer.Validate(ctx, vd) - case virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: return ds.cviSyncer.Validate(ctx, vd) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: return ds.viSyncer.Validate(ctx, vd) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go index d78f990136..bc4975dcef 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewObjectRefClusterVirtualImage( } } -func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -71,7 +71,7 @@ func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, fmt.Errorf("fetch dv: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateDataVolumeFromClusterVirtualImageStep(pvc, dv, ds.diskService, ds.client, cb), @@ -81,13 +81,13 @@ func (ds ObjectRefClusterVirtualImage) Sync(ctx context.Context, vd *virtv2.Virt ).Run(ctx, vd) } -func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } cviRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name} - cviRef, err := object.FetchObject(ctx, cviRefKey, ds.client, &virtv2.ClusterVirtualImage{}) + cviRef, err := object.FetchObject(ctx, cviRefKey, ds.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return fmt.Errorf("fetch vi %q: %w", cviRefKey, err) } @@ -96,7 +96,7 @@ func (ds ObjectRefClusterVirtualImage) Validate(ctx context.Context, vd *virtv2. return NewClusterImageNotFoundError(vd.Spec.DataSource.ObjectRef.Name) } - if cviRef.Status.Phase != virtv2.ImageReady || cviRef.Status.Target.RegistryURL == "" { + if cviRef.Status.Phase != v1alpha2.ImageReady || cviRef.Status.Target.RegistryURL == "" { return NewClusterImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go index ea196e4310..a301316902 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { var ( ctx context.Context scheme *runtime.Scheme - cvi *virtv2.ClusterVirtualImage - vd *virtv2.VirtualDisk + cvi *v1alpha2.ClusterVirtualImage + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim dv *cdiv1.DataVolume @@ -56,7 +56,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(cdiv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -82,37 +82,37 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { }, } - cvi = &virtv2.ClusterVirtualImage{ + cvi = &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.ClusterVirtualImageStatus{ - Size: virtv2.ImageStatusSize{ + Status: v1alpha2.ClusterVirtualImageStatus{ + Size: v1alpha2.ImageStatusSize{ UnpackedBytes: "100Mi", }, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindClusterVirtualImage, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage, Name: cvi.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, @@ -149,8 +149,8 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + vd.Status = v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, } @@ -169,7 +169,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(dvCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -194,7 +194,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -211,7 +211,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -230,7 +230,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) ExpectStats(vd) }) }) @@ -258,7 +258,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -274,7 +274,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go index 9bbd038dee..66f210f75e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -50,7 +50,7 @@ func NewObjectRefVirtualDiskSnapshot(recorder eventrecord.EventRecorderLogger, d } } -func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -66,7 +66,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.Virt return reconcile.Result{}, err } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreatePVCFromVDSnapshotStep(pvc, ds.recorder, ds.client, cb), @@ -74,7 +74,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, vd *virtv2.Virt ).Run(ctx, vd) } -func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } @@ -82,12 +82,12 @@ func (ds ObjectRefVirtualDiskSnapshot) Validate(ctx context.Context, vd *virtv2. vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace, - }, ds.client, &virtv2.VirtualDiskSnapshot{}) + }, ds.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go index 9fd2265131..18ece6c3c9 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,10 +51,10 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { var ( ctx context.Context scheme *runtime.Scheme - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim recorder eventrecord.EventRecorderLogger svc *ObjectRefVirtualDiskSnapshotDiskServiceMock @@ -64,7 +64,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -112,35 +112,35 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, @@ -150,8 +150,8 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Context("VirtualDisk has just been created", func() { It("must create PVC", func() { var pvcCreated bool - vd.Status = virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + vd.Status = v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, } @@ -176,7 +176,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) Expect(vd.Status.SourceUID).ToNot(BeNil()) Expect(*vd.Status.SourceUID).ToNot(BeEmpty()) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) @@ -194,7 +194,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) }) It("is in provisioning", func() { @@ -209,7 +209,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) }) }) @@ -226,7 +226,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) ExpectStats(vd) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) }) }) @@ -253,7 +253,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -269,20 +269,20 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) }) -func ExpectStats(vd *virtv2.VirtualDisk) { +func ExpectStats(vd *v1alpha2.VirtualDisk) { Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) Expect(vd.Status.Capacity).ToNot(BeEmpty()) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Phase).ToNot(BeEmpty()) } -func ExpectCondition(vd *virtv2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.ReadyReason, msgExists bool) { +func ExpectCondition(vd *v1alpha2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.ReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vdcondition.Ready, vd.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go index 5925a37cb4..ecf5891a25 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,7 +51,7 @@ func NewObjectRefVirtualImage( } } -func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -71,7 +71,7 @@ func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk return reconcile.Result{}, fmt.Errorf("fetch dv: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualDisk]( + return steptaker.NewStepTakers[*v1alpha2.VirtualDisk]( step.NewReadyStep(ds.diskService, pvc, cb), step.NewTerminatingStep(pvc), step.NewCreateDataVolumeFromVirtualImageStep(pvc, dv, ds.diskService, ds.client, cb), @@ -81,13 +81,13 @@ func (ds ObjectRefVirtualImage) Sync(ctx context.Context, vd *virtv2.VirtualDisk ).Run(ctx, vd) } -func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ObjectRef == nil { return errors.New("object ref missed for data source") } viRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace} - viRef, err := object.FetchObject(ctx, viRefKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viRefKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("fetch vi %q: %w", viRefKey, err) } @@ -96,16 +96,16 @@ func (ds ObjectRefVirtualImage) Validate(ctx context.Context, vd *virtv2.Virtual return NewImageNotFoundError(vd.Spec.DataSource.ObjectRef.Name) } - if viRef.Status.Phase != virtv2.ImageReady { + if viRef.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } switch viRef.Spec.Storage { - case virtv2.StoragePersistentVolumeClaim, virtv2.StorageKubernetes: + case v1alpha2.StoragePersistentVolumeClaim, v1alpha2.StorageKubernetes: if viRef.Status.Target.PersistentVolumeClaim == "" { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } - case virtv2.StorageContainerRegistry, "": + case v1alpha2.StorageContainerRegistry, "": if viRef.Status.Target.RegistryURL == "" { return NewImageNotReadyError(vd.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go index 8790e08830..cd640d4161 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("ObjectRef VirtualImage", func() { var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage - vd *virtv2.VirtualDisk + vi *v1alpha2.VirtualImage + vd *v1alpha2.VirtualDisk sc *storagev1.StorageClass pvc *corev1.PersistentVolumeClaim dv *cdiv1.DataVolume @@ -56,7 +56,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(cdiv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -82,37 +82,37 @@ var _ = Describe("ObjectRef VirtualImage", func() { }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.VirtualImageStatus{ - Size: virtv2.ImageStatusSize{ + Status: v1alpha2.VirtualImageStatus{ + Size: v1alpha2.ImageStatusSize{ UnpackedBytes: "100Mi", }, }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualImage, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualImage, Name: vi.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, @@ -149,8 +149,8 @@ var _ = Describe("ObjectRef VirtualImage", func() { Context("VirtualDisk has just been created", func() { It("must create DataVolume", func() { var dvCreated bool - vd.Status = virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + vd.Status = v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, } @@ -169,7 +169,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(dvCreated).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -194,7 +194,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.WaitingForFirstConsumer, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskWaitForFirstConsumer)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskWaitForFirstConsumer)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -211,7 +211,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Provisioning, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskProvisioning)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskProvisioning)) Expect(vd.Status.Progress).ToNot(BeEmpty()) Expect(vd.Status.Target.PersistentVolumeClaim).ToNot(BeEmpty()) }) @@ -230,7 +230,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionTrue, vdcondition.Ready, false) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskReady)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskReady)) ExpectStats(vd) }) }) @@ -258,7 +258,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -274,7 +274,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vd, metav1.ConditionFalse, vdcondition.Lost, true) - Expect(vd.Status.Phase).To(Equal(virtv2.DiskLost)) + Expect(vd.Status.Phase).To(Equal(v1alpha2.DiskLost)) Expect(vd.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go index 8adbdaa6f1..227ade7032 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -46,7 +46,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -79,7 +79,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds RegistryDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, registryDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -145,7 +145,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import to DVCR has started", ) @@ -165,14 +165,14 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). @@ -187,7 +187,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{}, setPhaseConditionFromPodError(ctx, err, pod, vd, cb, ds.client) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -203,17 +203,17 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Registry DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -262,7 +262,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( if updated, err := setPhaseConditionFromStorageError(err, vd, cb); err != nil || updated { return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -270,9 +270,9 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -280,9 +280,9 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -291,7 +291,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -303,11 +303,11 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -348,7 +348,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) ( return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -364,7 +364,7 @@ func (ds RegistryDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk return importerRequeue || diskRequeue, nil } -func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := vdsupplements.NewGenerator(vd) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -384,7 +384,7 @@ func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2. } } -func (ds RegistryDataSource) Validate(ctx context.Context, vd *virtv2.VirtualDisk) error { +func (ds RegistryDataSource) Validate(ctx context.Context, vd *v1alpha2.VirtualDisk) error { if vd.Spec.DataSource == nil || vd.Spec.DataSource.ContainerImage == nil { return errors.New("container image missed for data source") } @@ -411,7 +411,7 @@ func (ds RegistryDataSource) Name() string { return registryDataSource } -func (ds RegistryDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -449,7 +449,7 @@ func (ds RegistryDataSource) getSource(sup supplements.Generator, dvcrSourceImag } } -func (ds RegistryDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds RegistryDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go index bd1ed24f20..aeb6ecc3ee 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go @@ -23,7 +23,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,30 +37,30 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) Get(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) Get(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, vd *virtv2.VirtualDisk) bool { +func (s Sources) Changed(_ context.Context, vd *v1alpha2.VirtualDisk) bool { if vd.Generation == 1 { return false } @@ -68,7 +68,7 @@ func (s Sources) Changed(_ context.Context, vd *virtv2.VirtualDisk) bool { return vd.Generation != vd.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { var requeue bool for _, source := range s.sources { @@ -84,10 +84,10 @@ func (s Sources) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, err } type SupplementsCleaner interface { - CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) } -func CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk, c SupplementsCleaner) (reconcile.Result, error) { +func CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk, c SupplementsCleaner) (reconcile.Result, error) { if object.ShouldCleanupSubResources(vd) { return c.CleanUpSupplements(ctx, vd) } @@ -102,13 +102,13 @@ func IsDiskProvisioningFinished(c metav1.Condition) bool { func setPhaseConditionForFinishedDisk( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, - phase *virtv2.DiskPhase, + phase *v1alpha2.DiskPhase, supgen supplements.Generator, ) { - var newPhase virtv2.DiskPhase + var newPhase v1alpha2.DiskPhase switch { case pvc == nil: - newPhase = virtv2.DiskLost + newPhase = v1alpha2.DiskLost cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -116,14 +116,14 @@ func setPhaseConditionForFinishedDisk( case pvc.Status.Phase == corev1.ClaimLost: cb.Status(metav1.ConditionFalse) if pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true" { - newPhase = virtv2.DiskExporting + newPhase = v1alpha2.DiskExporting cb.Reason(vdcondition.Exporting).Message("PV is being exported") } else { - newPhase = virtv2.DiskLost + newPhase = v1alpha2.DiskLost cb.Reason(vdcondition.Lost).Message(fmt.Sprintf("PV %s not found.", pvc.Spec.VolumeName)) } default: - newPhase = virtv2.DiskReady + newPhase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -138,19 +138,19 @@ type CheckImportProcess interface { CheckImportProcess(ctx context.Context, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error } -func setPhaseConditionFromStorageError(err error, vd *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (bool, error) { +func setPhaseConditionFromStorageError(err error, vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (bool, error) { switch { case err == nil: return false, nil case errors.Is(err, service.ErrStorageProfileNotFound): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). Message("StorageProfile not found in the cluster: Please check a StorageClass name in the cluster or set a default StorageClass.") return true, nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -164,9 +164,9 @@ func setPhaseConditionFromStorageError(err error, vd *virtv2.VirtualDisk, cb *co func setPhaseConditionForPVCProvisioningDisk( ctx context.Context, dv *cdiv1.DataVolume, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, - sc *storev1.StorageClass, + sc *storagev1.StorageClass, cb *conditions.ConditionBuilder, checker CheckImportProcess, ) error { @@ -174,7 +174,7 @@ func setPhaseConditionForPVCProvisioningDisk( switch { case err == nil: if dv == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -182,7 +182,7 @@ func setPhaseConditionForPVCProvisioningDisk( return nil } if isStorageClassWFFC(sc) && (dv.Status.Phase == cdiv1.PendingPopulation || dv.Status.Phase == cdiv1.WaitForFirstConsumer) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -190,14 +190,14 @@ func setPhaseConditionForPVCProvisioningDisk( return nil } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). Message("Import is in the process of provisioning to PVC.") return nil case errors.Is(err, service.ErrDataVolumeNotRunning): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -212,20 +212,20 @@ func setPhaseConditionFromPodError( ctx context.Context, podErr error, pod *corev1.Pod, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder, c client.Client, ) error { switch { case errors.Is(podErr, service.ErrNotInitialized): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). Message(service.CapitalizeFirstLetter(podErr.Error()) + ".") return nil case errors.Is(podErr, service.ErrNotScheduled): - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending nodePlacement, err := getNodePlacement(ctx, c, vd) if err != nil { @@ -276,7 +276,7 @@ func setPhaseConditionFromProvisioningError( ctx context.Context, provisioningErr error, cb *conditions.ConditionBuilder, - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, dv *cdiv1.DataVolume, cleaner Cleaner, c client.Client, @@ -297,7 +297,7 @@ func setPhaseConditionFromProvisioningError( return err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning if isChanged { supgen := vdsupplements.NewGenerator(vd) @@ -328,14 +328,14 @@ func setPhaseConditionFromProvisioningError( } // Deprecated. -func getNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDisk) (*provisioner.NodePlacement, error) { +func getNodePlacement(ctx context.Context, c client.Client, vd *v1alpha2.VirtualDisk) (*provisioner.NodePlacement, error) { return step.GetNodePlacement(ctx, c, vd) } const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.DiskPhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.DiskFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.DiskPhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed) @@ -349,16 +349,16 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.DiskPhase, err error) { - *phase = virtv2.DiskFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.DiskPhase, err error) { + *phase = v1alpha2.DiskFailed cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func isStorageClassWFFC(sc *storev1.StorageClass) bool { - return sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storev1.VolumeBindingWaitForFirstConsumer +func isStorageClassWFFC(sc *storagev1.StorageClass) bool { + return sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer } const ( diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go index b1fb823595..0d56c6b194 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_blank_pvc_step.go @@ -23,7 +23,7 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -37,14 +37,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) const createStep = "create" type VolumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) } type CreateBlankPVCStep struct { @@ -68,7 +68,7 @@ func NewCreateBlankPVCStep( } } -func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateBlankPVCStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -81,7 +81,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* return nil, errors.New("spec.persistentVolumeClaim.size should be set for blank virtual disk") } - sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storev1.StorageClass{}) + sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storagev1.StorageClass{}) if err != nil { return nil, fmt.Errorf("get storage class: %w", err) } @@ -101,7 +101,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* Name: key.Name, Namespace: key.Namespace, Finalizers: []string{ - virtv2.FinalizerVDProtection, + v1alpha2.FinalizerVDProtection, }, OwnerReferences: []metav1.OwnerReference{ service.MakeOwnerReference(vd), @@ -121,7 +121,7 @@ func (s CreateBlankPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (* if strings.Contains(err.Error(), "exceeded quota") { log.Debug("Quota exceeded during PVC creation") - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.QuotaExceeded). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go index d2a137ed1b..da290ff1f7 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_cvi_step.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -64,13 +64,13 @@ func NewCreateDataVolumeFromClusterVirtualImageStep( } } -func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil || s.dv != nil { return nil, nil } cviRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name} - cviRef, err := object.FetchObject(ctx, cviRefKey, s.client, &virtv2.ClusterVirtualImage{}) + cviRef, err := object.FetchObject(ctx, cviRefKey, s.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return nil, fmt.Errorf("fetch cvi %q: %w", cviRefKey, err) } @@ -82,7 +82,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v vd.Status.SourceUID = ptr.To(cviRef.UID) if imageformat.IsISO(cviRef.Status.Format) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -95,7 +95,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v size, err := s.getPVCSize(vd, cviRef) if err != nil { if errors.Is(err, service.ErrInsufficientPVCSize) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -109,7 +109,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) Take(ctx context.Context, v return NewCreateDataVolumeStep(s.dv, s.disk, s.client, source, size, s.cb).Take(ctx, vd) } -func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, cviRef *virtv2.ClusterVirtualImage) (resource.Quantity, error) { +func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *v1alpha2.VirtualDisk, cviRef *v1alpha2.ClusterVirtualImage) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(cviRef.Status.Size.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", cviRef.Status.Size.UnpackedBytes, err) @@ -122,7 +122,7 @@ func (s CreateDataVolumeFromClusterVirtualImageStep) getPVCSize(vd *virtv2.Virtu return service.GetValidatedPVCSize(vd.Spec.PersistentVolumeClaim.Size, unpackedSize) } -func (s CreateDataVolumeFromClusterVirtualImageStep) getSource(vd *virtv2.VirtualDisk, cviRef *virtv2.ClusterVirtualImage) *cdiv1.DataVolumeSource { +func (s CreateDataVolumeFromClusterVirtualImageStep) getSource(vd *v1alpha2.VirtualDisk, cviRef *v1alpha2.ClusterVirtualImage) *cdiv1.DataVolumeSource { supgen := vdsupplements.NewGenerator(vd) url := common.DockerRegistrySchemePrefix + cviRef.Status.Target.RegistryURL diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go index 2e23d29cda..959f65ecfd 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_from_vi_step.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -64,13 +64,13 @@ func NewCreateDataVolumeFromVirtualImageStep( } } -func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil || s.dv != nil { return nil, nil } viRefKey := types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace} - viRef, err := object.FetchObject(ctx, viRefKey, s.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viRefKey, s.client, &v1alpha2.VirtualImage{}) if err != nil { return nil, fmt.Errorf("fetch vi %q: %w", viRefKey, err) } @@ -82,7 +82,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt vd.Status.SourceUID = ptr.To(viRef.UID) if imageformat.IsISO(viRef.Status.Format) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -98,7 +98,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt size, err := s.getPVCSize(vd, viRef) if err != nil { if errors.Is(err, service.ErrInsufficientPVCSize) { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -112,7 +112,7 @@ func (s CreateDataVolumeFromVirtualImageStep) Take(ctx context.Context, vd *virt return NewCreateDataVolumeStep(s.dv, s.disk, s.client, source, size, s.cb).Take(ctx, vd) } -func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, viRef *virtv2.VirtualImage) (resource.Quantity, error) { +func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *v1alpha2.VirtualDisk, viRef *v1alpha2.VirtualImage) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(viRef.Status.Size.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", viRef.Status.Size.UnpackedBytes, err) @@ -125,16 +125,16 @@ func (s CreateDataVolumeFromVirtualImageStep) getPVCSize(vd *virtv2.VirtualDisk, return service.GetValidatedPVCSize(vd.Spec.PersistentVolumeClaim.Size, unpackedSize) } -func (s CreateDataVolumeFromVirtualImageStep) getSource(vd *virtv2.VirtualDisk, viRef *virtv2.VirtualImage) (*cdiv1.DataVolumeSource, error) { +func (s CreateDataVolumeFromVirtualImageStep) getSource(vd *v1alpha2.VirtualDisk, viRef *v1alpha2.VirtualImage) (*cdiv1.DataVolumeSource, error) { switch viRef.Spec.Storage { - case virtv2.StoragePersistentVolumeClaim, virtv2.StorageKubernetes: + case v1alpha2.StoragePersistentVolumeClaim, v1alpha2.StorageKubernetes: return &cdiv1.DataVolumeSource{ PVC: &cdiv1.DataVolumeSourcePVC{ Name: viRef.Status.Target.PersistentVolumeClaim, Namespace: viRef.Namespace, }, }, nil - case virtv2.StorageContainerRegistry, "": + case v1alpha2.StorageContainerRegistry, "": supgen := vdsupplements.NewGenerator(vd) url := common.DockerRegistrySchemePrefix + viRef.Status.Target.RegistryURL diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go index 82aac0c9fe..5099eb1c5e 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -70,7 +70,7 @@ func NewCreateDataVolumeStep( } } -func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreateDataVolumeStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.dv != nil { return nil, nil } @@ -95,7 +95,7 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) case err == nil: // OK. case errors.Is(err, service.ErrStorageProfileNotFound): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -108,13 +108,13 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) return nil, nil } -func GetNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDisk) (*provisioner.NodePlacement, error) { +func GetNodePlacement(ctx context.Context, c client.Client, vd *v1alpha2.VirtualDisk) (*provisioner.NodePlacement, error) { if len(vd.Status.AttachedToVirtualMachines) != 1 { return nil, nil } vmKey := types.NamespacedName{Name: vd.Status.AttachedToVirtualMachines[0].Name, Namespace: vd.Namespace} - vm, err := object.FetchObject(ctx, vmKey, c, &virtv2.VirtualMachine{}) + vm, err := object.FetchObject(ctx, vmKey, c, &v1alpha2.VirtualMachine{}) if err != nil { return nil, fmt.Errorf("unable to get the virtual machine %s: %w", vmKey, err) } @@ -127,7 +127,7 @@ func GetNodePlacement(ctx context.Context, c client.Client, vd *virtv2.VirtualDi nodePlacement.Tolerations = append(nodePlacement.Tolerations, vm.Spec.Tolerations...) vmClassKey := types.NamespacedName{Name: vm.Spec.VirtualMachineClassName} - vmClass, err := object.FetchObject(ctx, vmClassKey, c, &virtv2.VirtualMachineClass{}) + vmClass, err := object.FetchObject(ctx, vmClassKey, c, &v1alpha2.VirtualMachineClass{}) if err != nil { return nil, fmt.Errorf("unable to get the virtual machine class %s: %w", vmClassKey, err) } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go index 28e7befcb1..955c9fd662 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_pvc_from_vdsnapshot_step.go @@ -24,6 +24,7 @@ import ( vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -38,7 +39,8 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization-controller/pkg/logger" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -63,7 +65,7 @@ func NewCreatePVCFromVDSnapshotStep( } } -func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -71,17 +73,17 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua s.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("fetch virtual disk snapshot: %w", err) } if vdSnapshot == nil { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -94,8 +96,8 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("fetch volume snapshot: %w", err) } - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vd.Status.Phase = virtv2.DiskPending + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -103,6 +105,21 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return &reconcile.Result{}, nil } + if err := s.validateStorageClassCompatibility(ctx, vd, vdSnapshot, vs); err != nil { + vd.Status.Phase = v1alpha2.DiskFailed + s.cb. + Status(metav1.ConditionFalse). + Reason(vdcondition.ProvisioningFailed). + Message(err.Error()) + s.recorder.Event( + vd, + corev1.EventTypeWarning, + v1alpha2.ReasonDataSourceSyncFailed, + err.Error(), + ) + return &reconcile.Result{}, nil + } + pvc := s.buildPVC(vd, vs) err = s.client.Create(ctx, pvc) @@ -110,7 +127,7 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("create pvc: %w", err) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -126,7 +143,7 @@ func (s CreatePVCFromVDSnapshotStep) Take(ctx context.Context, vd *virtv2.Virtua // AddOriginalMetadata adds original annotations and labels from VolumeSnapshot to VirtualDisk, // without overwriting existing values -func (s CreatePVCFromVDSnapshotStep) AddOriginalMetadata(vd *virtv2.VirtualDisk, vs *vsv1.VolumeSnapshot) { +func (s CreatePVCFromVDSnapshotStep) AddOriginalMetadata(vd *v1alpha2.VirtualDisk, vs *vsv1.VolumeSnapshot) { if vd.Annotations == nil { vd.Annotations = make(map[string]string) } @@ -157,11 +174,17 @@ func (s CreatePVCFromVDSnapshotStep) AddOriginalMetadata(vd *virtv2.VirtualDisk, } } -func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { - storageClassName := vs.Annotations[annotations.AnnStorageClassName] - if storageClassName == "" { - storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] +func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *v1alpha2.VirtualDisk, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { + var storageClassName string + if vd.Spec.PersistentVolumeClaim.StorageClass != nil && *vd.Spec.PersistentVolumeClaim.StorageClass != "" { + storageClassName = *vd.Spec.PersistentVolumeClaim.StorageClass + } else { + storageClassName = vs.Annotations[annotations.AnnStorageClassName] + if storageClassName == "" { + storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] + } } + volumeMode := vs.Annotations[annotations.AnnVolumeMode] if volumeMode == "" { volumeMode = vs.Annotations[annotations.AnnVolumeModeDeprecated] @@ -210,7 +233,7 @@ func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.V Name: pvcKey.Name, Namespace: pvcKey.Namespace, Finalizers: []string{ - virtv2.FinalizerVDProtection, + v1alpha2.FinalizerVDProtection, }, OwnerReferences: []metav1.OwnerReference{ service.MakeOwnerReference(vd), @@ -219,3 +242,52 @@ func (s CreatePVCFromVDSnapshotStep) buildPVC(vd *virtv2.VirtualDisk, vs *vsv1.V Spec: spec, } } + +func (s CreatePVCFromVDSnapshotStep) validateStorageClassCompatibility(ctx context.Context, vd *v1alpha2.VirtualDisk, vdSnapshot *v1alpha2.VirtualDiskSnapshot, vs *vsv1.VolumeSnapshot) error { + if vd.Spec.PersistentVolumeClaim.StorageClass == nil || *vd.Spec.PersistentVolumeClaim.StorageClass == "" { + return nil + } + + targetSCName := *vd.Spec.PersistentVolumeClaim.StorageClass + + var targetSC storagev1.StorageClass + err := s.client.Get(ctx, types.NamespacedName{Name: targetSCName}, &targetSC) + if err != nil { + return fmt.Errorf("cannot fetch target storage class %q: %w", targetSCName, err) + } + + log, _ := logger.GetDataSourceContext(ctx, "objectref") + if vs.Spec.Source.PersistentVolumeClaimName == nil || *vs.Spec.Source.PersistentVolumeClaimName == "" { + log.With("volumeSnapshot.name", vs.Name).Debug("Cannot determine original PVC from VolumeSnapshot, skipping storage class compatibility validation") + return nil + } + + pvcName := *vs.Spec.Source.PersistentVolumeClaimName + + var originalPVC corev1.PersistentVolumeClaim + err = s.client.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: vdSnapshot.Namespace}, &originalPVC) + if err != nil { + return fmt.Errorf("cannot fetch original PVC %q: %w", pvcName, err) + } + + originalProvisioner := originalPVC.Annotations[annotations.AnnStorageProvisioner] + if originalProvisioner == "" { + originalProvisioner = originalPVC.Annotations[annotations.AnnStorageProvisionerDeprecated] + } + + if originalProvisioner == "" { + log.With("pvc.name", pvcName).Debug("Cannot determine original provisioner from PVC annotations, skipping storage class compatibility validation") + return nil + } + + if targetSC.Provisioner != originalProvisioner { + return fmt.Errorf( + "cannot restore snapshot to storage class %q: incompatible storage providers. "+ + "Original snapshot was created by %q, target storage class uses %q. "+ + "Cross-provider snapshot restore is not supported", + targetSCName, originalProvisioner, targetSC.Provisioner, + ) + } + + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go index 12243c76d0..120505af0f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ensure_node_placement.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -66,7 +66,7 @@ func NewEnsureNodePlacementStep( } } -func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } @@ -92,7 +92,7 @@ func (s EnsureNodePlacementStep) Take(ctx context.Context, vd *virtv2.VirtualDis return nil, fmt.Errorf("is node placement changed: %w", err) } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning if !isChanged { s.cb. diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go index 5303c2e987..9825efaeb4 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -62,12 +62,12 @@ func NewReadyStep( } } -func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s ReadyStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogStep(readyStep)) if s.pvc == nil { if vd.Status.Progress == "100%" { - vd.Status.Phase = virtv2.DiskLost + vd.Status.Phase = v1alpha2.DiskLost s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -85,10 +85,10 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile case corev1.ClaimLost: s.cb.Status(metav1.ConditionFalse) if s.pvc.GetAnnotations()[annotations.AnnDataExportRequest] == "true" { - vd.Status.Phase = virtv2.DiskExporting + vd.Status.Phase = v1alpha2.DiskExporting s.cb.Reason(vdcondition.Exporting).Message("PV is being exported") } else { - vd.Status.Phase = virtv2.DiskLost + vd.Status.Phase = v1alpha2.DiskLost s.cb. Reason(vdcondition.Lost). Message(fmt.Sprintf("The PersistentVolume %q not found.", s.pvc.Spec.VolumeName)) @@ -98,7 +98,7 @@ func (s ReadyStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile return &reconcile.Result{}, nil case corev1.ClaimBound: - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady s.cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go index 4cb14b6668..910cf9f821 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/terminating_step.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const terminatingStep = "terminating" @@ -40,7 +40,7 @@ func NewTerminatingStep(pvc *corev1.PersistentVolumeClaim) *TerminatingStep { } } -func (s TerminatingStep) Take(ctx context.Context, _ *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s TerminatingStep) Take(ctx context.Context, _ *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go index 9b6e8999ea..656d60dd72 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_dv_step.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -66,9 +66,9 @@ func NewWaitForDVStep( } } -func (s WaitForDVStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s WaitForDVStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.dv == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -112,9 +112,9 @@ func (s WaitForDVStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*recon return nil, nil } -func (s WaitForDVStep) setForProvisioning(vd *virtv2.VirtualDisk) (set bool) { +func (s WaitForDVStep) setForProvisioning(vd *v1alpha2.VirtualDisk) (set bool) { if s.dv.Status.Phase != cdiv1.Succeeded { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -125,7 +125,7 @@ func (s WaitForDVStep) setForProvisioning(vd *virtv2.VirtualDisk) (set bool) { return false } -func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *virtv2.VirtualDisk) (set bool, err error) { +func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *v1alpha2.VirtualDisk) (set bool, err error) { sc, err := object.FetchObject(ctx, types.NamespacedName{Name: vd.Status.StorageClassName}, s.client, &storagev1.StorageClass{}) if err != nil { return false, fmt.Errorf("get sc: %w", err) @@ -133,7 +133,7 @@ func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *vir isWFFC := sc != nil && sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer if isWFFC && (s.dv.Status.Phase == cdiv1.PendingPopulation || s.dv.Status.Phase == cdiv1.WaitForFirstConsumer) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -144,12 +144,12 @@ func (s WaitForDVStep) setForFirstConsumerIsAwaited(ctx context.Context, vd *vir return false, nil } -func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *virtv2.VirtualDisk, inwffc bool) (ok bool) { +func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *v1alpha2.VirtualDisk, inwffc bool) (ok bool) { dvQuotaNotExceededCondition, _ := conditions.GetDataVolumeCondition(conditions.DVQoutaNotExceededConditionType, s.dv.Status.Conditions) if dvQuotaNotExceededCondition.Status == corev1.ConditionFalse { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if inwffc { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } s.cb. Status(metav1.ConditionFalse). @@ -161,18 +161,18 @@ func (s WaitForDVStep) checkQoutaNotExceededCondition(vd *virtv2.VirtualDisk, in return true } -func (s WaitForDVStep) checkRunningCondition(vd *virtv2.VirtualDisk) (ok bool) { +func (s WaitForDVStep) checkRunningCondition(vd *v1alpha2.VirtualDisk) (ok bool) { dvRunningCondition, _ := conditions.GetDataVolumeCondition(conditions.DVRunningConditionType, s.dv.Status.Conditions) switch { case dvRunningCondition.Reason == conditions.DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). Message(dvRunningCondition.Message) return false case strings.Contains(dvRunningCondition.Reason, "Error"): - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -183,7 +183,7 @@ func (s WaitForDVStep) checkRunningCondition(vd *virtv2.VirtualDisk) (ok bool) { } } -func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.VirtualDisk) (ok bool, err error) { +func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *v1alpha2.VirtualDisk) (ok bool, err error) { if s.pvc == nil { return true, nil } @@ -201,7 +201,7 @@ func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.Vir if cdiImporterPrime != nil { podInitializedCond, _ := conditions.GetPodCondition(corev1.PodInitialized, cdiImporterPrime.Status.Conditions) if podInitializedCond.Status == corev1.ConditionFalse && strings.Contains(podInitializedCond.Reason, "Error") { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). @@ -211,7 +211,7 @@ func (s WaitForDVStep) checkImporterPrimePod(ctx context.Context, vd *virtv2.Vir podScheduledCond, _ := conditions.GetPodCondition(corev1.PodScheduled, cdiImporterPrime.Status.Conditions) if podScheduledCond.Status == corev1.ConditionFalse && strings.Contains(podScheduledCond.Reason, "Error") { - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.ImagePullFailed). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go index 1ccbf71542..538b7f1e15 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/wait_for_pvc_step.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -51,9 +51,9 @@ func NewWaitForPVCStep( } } -func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reconcile.Result, error) { +func (s WaitForPVCStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconcile.Result, error) { if s.pvc == nil { - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -71,7 +71,7 @@ func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reco } if wffc { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitingForFirstConsumer). @@ -79,7 +79,7 @@ func (s WaitForPVCStep) Take(ctx context.Context, vd *virtv2.VirtualDisk) (*reco return &reconcile.Result{}, nil } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go index 5f7c5ff8d8..c6790a7fa0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -78,7 +78,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, uploadDataSource) condition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -152,7 +152,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to DVCR has started", ) @@ -172,14 +172,14 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vd.Status.Phase, err, vd.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vd.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). @@ -196,19 +196,19 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re if ds.statService.IsUploaderReady(pod, svc, ing) { log.Info("Waiting for the user upload", "pod.phase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskWaitForUserUpload + vd.Status.Phase = v1alpha2.DiskWaitForUserUpload cb. Status(metav1.ConditionFalse). Reason(vdcondition.WaitForUserUpload). Message("Waiting for the user upload.") - vd.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vd.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } } else { log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningNotStarted). @@ -220,7 +220,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re log.Info("Provisioning to DVCR is in progress", "podPhase", pod.Status.Phase) - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -237,17 +237,17 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vd.Status.Phase = virtv2.DiskFailed + vd.Status.Phase = v1alpha2.DiskFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vd, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vd, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vdcondition.ProvisioningFailed). @@ -290,7 +290,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{}, err } - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -298,9 +298,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -308,9 +308,9 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vd.Status.Phase = virtv2.DiskPending + vd.Status.Phase = v1alpha2.DiskPending if dv.Status.ClaimName != "" && isStorageClassWFFC(sc) { - vd.Status.Phase = virtv2.DiskWaitForFirstConsumer + vd.Status.Phase = v1alpha2.DiskWaitForFirstConsumer } cb. Status(metav1.ConditionFalse). @@ -319,7 +319,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event(vd, corev1.EventTypeWarning, vdcondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vd.Status.Phase = virtv2.DiskProvisioning + vd.Status.Phase = v1alpha2.DiskProvisioning cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). @@ -331,11 +331,11 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re ds.recorder.Event( vd, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) - vd.Status.Phase = virtv2.DiskReady + vd.Status.Phase = v1alpha2.DiskReady cb. Status(metav1.ConditionTrue). Reason(vdcondition.Ready). @@ -378,7 +378,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *virtv2.VirtualDisk) (re return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, vd *v1alpha2.VirtualDisk) (bool, error) { supgen := vdsupplements.NewGenerator(vd) uploaderRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) @@ -394,7 +394,7 @@ func (ds UploadDataSource) CleanUp(ctx context.Context, vd *virtv2.VirtualDisk) return uploaderRequeue || diskRequeue, nil } -func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { supgen := vdsupplements.NewGenerator(vd) uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) @@ -414,7 +414,7 @@ func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vd *virtv2.Vi } } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualDisk) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualDisk) error { return nil } @@ -422,7 +422,7 @@ func (ds UploadDataSource) Name() string { return uploadDataSource } -func (ds UploadDataSource) getEnvSettings(vd *virtv2.VirtualDisk, supgen supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vd *v1alpha2.VirtualDisk, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -452,7 +452,7 @@ func (ds UploadDataSource) getSource(sup supplements.Generator, dvcrSourceImageN } } -func (ds UploadDataSource) getPVCSize(vd *virtv2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { +func (ds UploadDataSource) getPVCSize(vd *v1alpha2.VirtualDisk, pod *corev1.Pod) (resource.Quantity, error) { // Get size from the importer Pod to detect if specified PVC size is enough. unpackedSize, err := resource.ParseQuantity(ds.statService.GetSize(pod).UnpackedBytes) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go index 5b7d069b16..b2532f2bff 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/stats.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/stats.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ func NewStatsHandler(stat *service.StatService, importer *service.ImporterServic } } -func (h StatsHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h StatsHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { sinceCreation := time.Since(vd.CreationTimestamp.Time).Truncate(time.Second) readyCondition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -89,7 +89,7 @@ func (h StatsHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (recon var err error switch vd.Spec.DataSource.Type { - case virtv2.DataSourceTypeUpload: + case v1alpha2.DataSourceTypeUpload: pod, err = h.uploader.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go index 6e0cacf862..33e593b915 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -43,7 +43,7 @@ func NewStorageClassReadyHandler(svc StorageClassService) *StorageClassReadyHand } } -func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) { +func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdcondition.StorageClassReadyType).Generation(vd.Generation) if vd.DeletionTimestamp != nil { @@ -115,7 +115,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vd *virtv2.Virtual return reconcile.Result{}, nil } -func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *virtv2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *v1alpha2.VirtualDisk, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName == "" { return fmt.Errorf("pvc does not have storage class") } @@ -153,7 +153,7 @@ func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vd *vi return nil } -func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) error { vd.Status.StorageClassName = *vd.Spec.PersistentVolumeClaim.StorageClass sc, err := h.svc.GetStorageClass(ctx, *vd.Spec.PersistentVolumeClaim.StorageClass) @@ -205,7 +205,7 @@ func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vd *virtv2.Vi return nil } -func (h StorageClassReadyHandler) setFromModuleSettings(vd *virtv2.VirtualDisk, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromModuleSettings(vd *v1alpha2.VirtualDisk, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vd.Status.StorageClassName = moduleStorageClass.Name if h.svc.IsStorageClassDeprecated(moduleStorageClass) { @@ -234,7 +234,7 @@ func (h StorageClassReadyHandler) setFromModuleSettings(vd *virtv2.VirtualDisk, } } -func (h StorageClassReadyHandler) setFromDefault(vd *virtv2.VirtualDisk, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromDefault(vd *v1alpha2.VirtualDisk, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vd.Status.StorageClassName = defaultStorageClass.Name if h.svc.IsStorageClassDeprecated(defaultStorageClass) { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go index 9af903280f..9470799cef 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/storageclass_ready_test.go @@ -29,14 +29,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) var _ = Describe("StorageClassReadyHandler Run", func() { var ( ctx context.Context - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk pvc *corev1.PersistentVolumeClaim svc *StorageClassServiceMock sc *storagev1.StorageClass @@ -57,15 +57,15 @@ var _ = Describe("StorageClassReadyHandler Run", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd", Generation: 1, UID: "11111111-1111-1111-1111-111111111111", }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ StorageClassName: sc.Name, - Target: virtv2.DiskTarget{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "test-pvc", }, }, @@ -313,7 +313,7 @@ var _ = Describe("StorageClassReadyHandler Run", func() { }) }) -func ExpectStorageClassReadyCondition(vd *virtv2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.StorageClassReadyReason, msgExists bool) { +func ExpectStorageClassReadyCondition(vd *v1alpha2.VirtualDisk, status metav1.ConditionStatus, reason vdcondition.StorageClassReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vdcondition.StorageClassReadyType, vd.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go index 71d814dd1a..4c43300f69 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/supplements/supplements.go @@ -21,7 +21,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ supplements.Generator = &VirtualDiskGenerator{} @@ -31,7 +31,7 @@ type VirtualDiskGenerator struct { claimName string } -func NewGenerator(vd *virtv2.VirtualDisk) *VirtualDiskGenerator { +func NewGenerator(vd *v1alpha2.VirtualDisk) *VirtualDiskGenerator { return &VirtualDiskGenerator{ Generator: supplements.NewGenerator(annotations.VDShortName, vd.Name, vd.Namespace, vd.UID), claimName: vd.Status.Target.PersistentVolumeClaim, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go index 692a1f7d8c..a8059648a1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/iso_source_validator.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ISOSourceValidator struct { @@ -37,18 +37,18 @@ func NewISOSourceValidator(client client.Client) *ISOSourceValidator { return &ISOSourceValidator{client: client} } -func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if vd.Spec.DataSource == nil { return nil, nil } - if vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { + if vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { return nil, nil } switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, v.client) if err != nil { return nil, err @@ -68,18 +68,18 @@ func (v *ISOSourceValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virt return nil, nil } -func (v *ISOSourceValidator) ValidateUpdate(ctx context.Context, _, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *ISOSourceValidator) ValidateUpdate(ctx context.Context, _, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if newVD.Spec.DataSource == nil { return nil, nil } - if newVD.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { + if newVD.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { return nil, nil } switch newVD.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, newVD.Spec.DataSource, newVD, v.client) if err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go index d467260580..53f8ea41a0 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/name_validator.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/common/validate" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type NameValidator struct{} @@ -33,7 +33,7 @@ func NewNameValidator() *NameValidator { return &NameValidator{} } -func (v *NameValidator) ValidateCreate(_ context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *NameValidator) ValidateCreate(_ context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if strings.Contains(vd.Name, ".") { return nil, fmt.Errorf("the VirtualDisk name %q is invalid: '.' is forbidden, allowed name symbols are [0-9a-zA-Z-]", vd.Name) } @@ -45,7 +45,7 @@ func (v *NameValidator) ValidateCreate(_ context.Context, vd *virtv2.VirtualDisk return nil, nil } -func (v *NameValidator) ValidateUpdate(_ context.Context, _, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *NameValidator) ValidateUpdate(_ context.Context, _, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { var warnings admission.Warnings if strings.Contains(newVD.Name, ".") { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go index 516a0305be..7f18cbec08 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/pvc_size_validator.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ func NewPVCSizeValidator(client client.Client) *PVCSizeValidator { return &PVCSizeValidator{client: client} } -func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *v1alpha2.VirtualDisk) (admission.Warnings, error) { if vd.Spec.PersistentVolumeClaim.Size != nil && vd.Spec.PersistentVolumeClaim.Size.IsZero() { return nil, fmt.Errorf("virtual disk size must be greater than 0") } @@ -55,15 +55,15 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("if the data source is not specified, it's necessary to set spec.PersistentVolumeClaim.size to create blank virtual disk") } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vd.Spec.DataSource.ObjectRef == nil { return nil, nil } var unpackedSize resource.Quantity switch vd.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, vd.Spec.DataSource, vd, v.client) if err != nil { return nil, err @@ -78,16 +78,16 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua return nil, fmt.Errorf("failed to parse unpacked bytes %s: %w", unpackedSize.String(), err) } - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: vd.Spec.DataSource.ObjectRef.Name, Namespace: vd.Namespace, - }, v.client, &virtv2.VirtualDiskSnapshot{}) + }, v.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return nil, nil } @@ -121,9 +121,9 @@ func (v *PVCSizeValidator) ValidateCreate(ctx context.Context, vd *virtv2.Virtua } } -func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { sizeEqual := equality.Semantic.DeepEqual(oldVD.Spec.PersistentVolumeClaim.Size, newVD.Spec.PersistentVolumeClaim.Size) - if oldVD.Status.Phase == virtv2.DiskMigrating && !sizeEqual { + if oldVD.Status.Phase == v1alpha2.DiskMigrating && !sizeEqual { return nil, errors.New("spec.persistentVolumeClaim.size cannot be changed during migration. Please wait for the migration to finish") } @@ -143,16 +143,16 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir if s := newVD.Spec.PersistentVolumeClaim.Size; s != nil { newSize = *s } else if ready.Status == metav1.ConditionTrue || - newVD.Status.Phase != virtv2.DiskPending && - newVD.Status.Phase != virtv2.DiskProvisioning && - newVD.Status.Phase != virtv2.DiskWaitForFirstConsumer { + newVD.Status.Phase != v1alpha2.DiskPending && + newVD.Status.Phase != v1alpha2.DiskProvisioning && + newVD.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { return nil, errors.New("spec.persistentVolumeClaim.size cannot be omitted once set") } if ready.Status == metav1.ConditionTrue || - newVD.Status.Phase != virtv2.DiskPending && - newVD.Status.Phase != virtv2.DiskProvisioning && - newVD.Status.Phase != virtv2.DiskWaitForFirstConsumer { + newVD.Status.Phase != v1alpha2.DiskPending && + newVD.Status.Phase != v1alpha2.DiskProvisioning && + newVD.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { if newSize.Cmp(oldSize) == common.CmpLesser { return nil, fmt.Errorf( "spec.persistentVolumeClaim.size value (%s) should be greater than or equal to the current value (%s)", @@ -162,15 +162,15 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir } } - if newVD.Spec.DataSource == nil || newVD.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { + if newVD.Spec.DataSource == nil || newVD.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || newVD.Spec.DataSource.ObjectRef == nil { return nil, nil } var unpackedSize resource.Quantity switch newVD.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualDiskObjectRefKindVirtualImage, - virtv2.VirtualDiskObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualDiskObjectRefKindVirtualImage, + v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMD(ctx, newVD.Spec.DataSource, newVD, v.client) if err != nil { return nil, err @@ -185,16 +185,16 @@ func (v *PVCSizeValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *vir return nil, fmt.Errorf("failed to parse unpacked bytes %s: %w", unpackedSize.String(), err) } - case virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot: vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{ Name: newVD.Spec.DataSource.ObjectRef.Name, Namespace: newVD.Namespace, - }, v.client, &virtv2.VirtualDiskSnapshot{}) + }, v.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, err } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go index c8ee9eec83..424f8686a7 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" "github.com/deckhouse/virtualization-controller/pkg/featuregates" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -46,7 +46,7 @@ func NewSpecChangesValidator(client client.Client, scService *intsvc.VirtualDisk } } -func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass) if err != nil { @@ -63,14 +63,14 @@ func (v *SpecChangesValidator) ValidateCreate(ctx context.Context, newVD *virtv2 return nil, nil } -func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *virtv2.VirtualDisk) (admission.Warnings, error) { +func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD *v1alpha2.VirtualDisk) (admission.Warnings, error) { if oldVD.Generation == newVD.Generation { return nil, nil } ready, _ := conditions.GetCondition(vdcondition.ReadyType, newVD.Status.Conditions) switch { - case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == virtv2.DiskReady, newVD.Status.Phase == virtv2.DiskLost: + case ready.Status == metav1.ConditionTrue, newVD.Status.Phase == v1alpha2.DiskReady, newVD.Status.Phase == v1alpha2.DiskLost: if !reflect.DeepEqual(oldVD.Spec.DataSource, newVD.Spec.DataSource) { return nil, errors.New("data source cannot be changed if the VirtualDisk has already been provisioned") } @@ -82,30 +82,30 @@ func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD return nil, errors.New("storage class cannot be changed if the VirtualDisk not mounted to virtual machine") } - vm := &virtv2.VirtualMachine{} + vm := &v1alpha2.VirtualMachine{} err := v.client.Get(ctx, client.ObjectKey{Name: vmName, Namespace: newVD.Namespace}, vm) if err != nil { return nil, err } - if !(vm.Status.Phase == virtv2.MachineRunning || vm.Status.Phase == virtv2.MachineMigrating) { + if !(vm.Status.Phase == v1alpha2.MachineRunning || vm.Status.Phase == v1alpha2.MachineMigrating) { return nil, errors.New("storage class cannot be changed unless the VirtualDisk is mounted to a running virtual machine") } for _, bd := range vm.Status.BlockDeviceRefs { - if bd.Kind == virtv2.DiskDevice && bd.Name == oldVD.Name && bd.Hotplugged { - return nil, errors.New("storage class cannot be changed if the VirtualDisk is hotplugged to a running virtual machine") + if bd.Hotplugged { + return nil, errors.New("for now, changing the storage class is not allowed if the virtual machine has hot-plugged block devices") } } } else { return nil, errors.New("storage class cannot be changed if the VirtualDisk has already been provisioned") } } - case newVD.Status.Phase == virtv2.DiskTerminating: + case newVD.Status.Phase == v1alpha2.DiskTerminating: if !reflect.DeepEqual(oldVD.Spec, newVD.Spec) { return nil, errors.New("spec cannot be changed if the VirtualDisk is the process of termination") } - case newVD.Status.Phase == virtv2.DiskPending: + case newVD.Status.Phase == v1alpha2.DiskPending: if newVD.Spec.PersistentVolumeClaim.StorageClass != nil && *newVD.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVD.Spec.PersistentVolumeClaim.StorageClass) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go index ddd9d50668..ffbaccf88d 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/datavolume_watcher.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DataVolumeWatcher struct{} @@ -44,7 +44,7 @@ func (w *DataVolumeWatcher) Watch(mgr manager.Manager, ctr controller.Controller handler.TypedEnqueueRequestForOwner[*cdiv1.DataVolume]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualDisk{}, + &v1alpha2.VirtualDisk{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*cdiv1.DataVolume]{ diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go index 2fee36c3ed..d07b409879 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/pvc_watcher.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/datavolume" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PersistentVolumeClaimWatcher struct { @@ -72,7 +72,7 @@ func (w PersistentVolumeClaimWatcher) Watch(mgr manager.Manager, ctr controller. func (w PersistentVolumeClaimWatcher) enqueueRequestsFromOwnerRefsRecursively(ctx context.Context, obj client.Object) (requests []reconcile.Request) { for _, ownerRef := range obj.GetOwnerReferences() { switch ownerRef.Kind { - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ownerRef.Name, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go index 4b40c0ede6..c99dc923ae 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/resource_quota_watcher.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -69,7 +69,7 @@ func (w ResourceQuotaWatcher) Watch(mgr manager.Manager, ctr controller.Controll } func (w ResourceQuotaWatcher) enqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds, client.InNamespace(obj.GetNamespace())) if err != nil { w.logger.Error(fmt.Sprintf("failed to get virtual disks: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go index 230daacf53..9bb5263719 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/storageclass_watcher.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StorageClassWatcher struct { @@ -47,7 +47,7 @@ type StorageClassWatcher struct { func NewStorageClassWatcher(client client.Client) *StorageClassWatcher { return &StorageClassWatcher{ client: client, - logger: slog.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskKind)), + logger: slog.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskKind)), } } @@ -87,7 +87,7 @@ func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1. fieldSelector := fields.OneTermEqualSelector(indexer.IndexFieldVDByStorageClass, selectorValue) - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds, &client.ListOptions{FieldSelector: fieldSelector}) if err != nil { w.logger.Error(fmt.Sprintf("failed to list virtual disks: %v", err)) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go index c7f8d65764..189b2b6f63 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/vdsnapshot_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -45,17 +45,17 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -66,12 +66,12 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { // 1. Need to reconcile the virtual disk from which the snapshot was taken. vd, err := object.FetchObject(ctx, types.NamespacedName{ Name: vdSnapshot.Spec.VirtualDiskName, Namespace: vdSnapshot.Namespace, - }, w.client, &virtv2.VirtualDisk{}) + }, w.client, &v1alpha2.VirtualDisk{}) if err != nil { w.logger.Error(fmt.Sprintf("failed to get virtual disk: %s", err)) return @@ -89,7 +89,7 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps } // Need to reconcile the virtual disk with the snapshot data source. - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err = w.client.List(ctx, &vds, &client.ListOptions{ Namespace: vdSnapshot.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVDByVDSnapshot, vdSnapshot.Name), @@ -116,12 +116,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds *virtv2.VirtualDiskDataSource, vdSnapshotName string) bool { - if ds == nil || ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds *v1alpha2.VirtualDiskDataSource, vdSnapshotName string) bool { + if ds == nil || ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go index d59a1aff86..c177fbd19f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/watcher/virtualmachine_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct{} @@ -40,16 +40,16 @@ func NewVirtualMachineWatcher() *VirtualMachineWatcher { func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueDisksAttachedToVM), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.vmHasAttachedDisks(e.ObjectOld) || w.vmHasAttachedDisks(e.ObjectNew) }, }, @@ -60,11 +60,11 @@ func (w *VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Contro return nil } -func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -77,9 +77,9 @@ func (w *VirtualMachineWatcher) enqueueDisksAttachedToVM(_ context.Context, vm * return requests } -func (w *VirtualMachineWatcher) vmHasAttachedDisks(vm *virtv2.VirtualMachine) bool { +func (w *VirtualMachineWatcher) vmHasAttachedDisks(vm *v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.DiskDevice { + if bda.Kind == v1alpha2.DiskDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go index 010f080e51..03081eed00 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_controller.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_controller.go @@ -38,7 +38,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/logger" vdcolelctor "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vd" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -49,7 +49,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, vd *virtv2.VirtualDisk) error + Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) error } func NewController( @@ -63,7 +63,7 @@ func NewController( storageClassSettings config.VirtualDiskStorageClassSettings, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVDProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVDProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) disk := service.NewDiskService(mgr.GetClient(), dvcr, protection, ControllerName) @@ -73,10 +73,10 @@ func NewController( blank := source.NewBlankDataSource(recorder, disk, mgr.GetClient()) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, disk, mgr.GetClient())) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, disk, dvcr, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, disk, mgr.GetClient())) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, disk, dvcr, mgr.GetClient())) reconciler := NewReconciler( mgr.GetClient(), @@ -109,7 +109,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualDisk{}). + For(&v1alpha2.VirtualDisk{}). WithValidator(NewValidator(mgr.GetClient(), scService)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go index 529e83bb36..654caf4b0f 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_reconciler.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -41,7 +41,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, vd *virtv2.VirtualDisk) (reconcile.Result, error) + Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (reconcile.Result, error) } type Reconciler struct { @@ -88,20 +88,20 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualDisk]{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualDisk]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualDisk: %w", err) } - vdFromVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.VirtualDiskObjectRefKindVirtualImage) + vdFromVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.VirtualDiskObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), vdFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - vdFromCVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.VirtualDiskObjectRefKindClusterVirtualImage) + vdFromCVIEnqueuer := watchers.NewVirtualDiskRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), vdFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -125,10 +125,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{} +func (r *Reconciler) factory() *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualDisk) virtv2.VirtualDiskStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualDisk) v1alpha2.VirtualDiskStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go index 3aa8f7530c..1b6ea816f6 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go @@ -27,12 +27,12 @@ import ( intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/validator" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualDisk) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualDisk) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualDisk) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualDisk) (admission.Warnings, error) } type Validator struct { @@ -51,7 +51,7 @@ func NewValidator(client client.Client, scService *intsvc.VirtualDiskStorageClas } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vd, ok := obj.(*virtv2.VirtualDisk) + vd, ok := obj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", obj) } @@ -72,12 +72,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - newVD, ok := newObj.(*virtv2.VirtualDisk) + newVD, ok := newObj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected a new VirtualDisk but got a %T", newObj) } - oldVD, ok := oldObj.(*virtv2.VirtualDisk) + oldVD, ok := oldObj.(*v1alpha2.VirtualDisk) if !ok { return nil, fmt.Errorf("expected an old VirtualDisk but got a %T", oldObj) } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go index ee7e69b8eb..9731011350 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/deletion.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,7 +39,7 @@ func NewDeletionHandler(snapshotter *service.SnapshotService) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vdSnapshot.DeletionTimestamp != nil { @@ -53,7 +53,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualD return reconcile.Result{}, err } - var vm *virtv2.VirtualMachine + var vm *v1alpha2.VirtualMachine if vd != nil { vm, err = getVirtualMachine(ctx, vd, h.snapshotter) if err != nil { @@ -85,10 +85,10 @@ func (h DeletionHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualD log.Info("Deletion observed: remove cleanup finalizer from VirtualDiskSnapshot") - controllerutil.RemoveFinalizer(vdSnapshot, virtv2.FinalizerVDSnapshotCleanup) + controllerutil.RemoveFinalizer(vdSnapshot, v1alpha2.FinalizerVDSnapshotCleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vdSnapshot, virtv2.FinalizerVDSnapshotCleanup) + controllerutil.AddFinalizer(vdSnapshot, v1alpha2.FinalizerVDSnapshotCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go index 92018fd243..845deafc10 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/interfaces.go @@ -22,24 +22,24 @@ import ( vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . VirtualDiskReadySnapshotter LifeCycleSnapshotter type VirtualDiskReadySnapshotter interface { - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) } type LifeCycleSnapshotter interface { Freeze(ctx context.Context, name, namespace string) error - IsFrozen(vm *virtv2.VirtualMachine) bool - CanFreeze(vm *virtv2.VirtualMachine) bool - CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + IsFrozen(vm *v1alpha2.VirtualMachine) bool + CanFreeze(vm *v1alpha2.VirtualMachine) bool + CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) Unfreeze(ctx context.Context, name, namespace string) error CreateVolumeSnapshot(ctx context.Context, vs *vsv1.VolumeSnapshot) (*vsv1.VolumeSnapshot, error) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) GetVolumeSnapshot(ctx context.Context, name, namespace string) (*vsv1.VolumeSnapshot, error) } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go index 82e376cf22..94c6c55e98 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -47,7 +47,7 @@ func NewLifeCycleHandler(snapshotter LifeCycleSnapshotter) *LifeCycleHandler { } } -func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) cb := conditions.NewConditionBuilder(vdscondition.VirtualDiskSnapshotReadyType).Generation(vdSnapshot.Generation) @@ -78,7 +78,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if vdSnapshot.DeletionTimestamp != nil { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseTerminating + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseTerminating cb. Status(metav1.ConditionUnknown). Reason(conditions.ReasonUnknown). @@ -89,17 +89,17 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual switch vdSnapshot.Status.Phase { case "": - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending - case virtv2.VirtualDiskSnapshotPhaseFailed: + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending + case v1alpha2.VirtualDiskSnapshotPhaseFailed: readyCondition, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) cb. Status(metav1.ConditionFalse). Reason(conditions.CommonReason(readyCondition.Reason)). Message(readyCondition.Message) return reconcile.Result{}, nil - case virtv2.VirtualDiskSnapshotPhaseReady: + case v1alpha2.VirtualDiskSnapshotPhaseReady: if vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VolumeSnapshotLost). @@ -107,7 +107,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{Requeue: true}, nil } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseReady + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseReady vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionTrue). @@ -119,7 +119,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual virtualDiskReadyCondition, _ := conditions.GetCondition(vdscondition.VirtualDiskReadyType, vdSnapshot.Status.Conditions) if vd == nil || virtualDiskReadyCondition.Status != metav1.ConditionTrue { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.WaitingForTheVirtualDisk). @@ -137,7 +137,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if pvc == nil || pvc.Status.Phase != corev1.ClaimBound { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.WaitingForTheVirtualDisk). @@ -153,12 +153,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual switch { case vs == nil: - if vm != nil && vm.Status.Phase != virtv2.MachineStopped && !h.snapshotter.IsFrozen(vm) { + if vm != nil && vm.Status.Phase != v1alpha2.MachineStopped && !h.snapshotter.IsFrozen(vm) { if h.snapshotter.CanFreeze(vm) { log.Debug("Freeze the virtual machine to take a snapshot") - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhasePending { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhasePending { + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.Snapshotting). @@ -172,7 +172,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{}, err } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.FileSystemFreezing). @@ -184,7 +184,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } if vdSnapshot.Spec.RequiredConsistency { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhasePending + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhasePending cb. Status(metav1.ConditionFalse). Reason(vdscondition.PotentiallyInconsistent) @@ -204,7 +204,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual "The virtual machine %q with an attached virtual disk %q is %s: "+ "the snapshotting of virtual disk might result in an inconsistent snapshot: "+ "waiting for the virtual machine to be %s or the disk to be detached", - vm.Name, vd.Name, vm.Status.Phase, virtv2.MachineStopped, + vm.Name, vd.Name, vm.Status.Phase, v1alpha2.MachineStopped, )) } @@ -212,8 +212,8 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhasePending { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhasePending { + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vdscondition.Snapshotting). @@ -279,7 +279,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual return reconcile.Result{}, err } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionFalse). @@ -289,7 +289,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual case vs.Status != nil && vs.Status.Error != nil && vs.Status.Error.Message != nil: log.Debug("The volume snapshot has an error") - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VirtualDiskSnapshotFailed). @@ -298,7 +298,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual case vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse: log.Debug("Waiting for the volume snapshot to be ready to use") - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionFalse). @@ -309,7 +309,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual log.Debug("The volume snapshot is ready to use") switch { - case vm == nil, vm.Status.Phase == virtv2.MachineStopped: + case vm == nil, vm.Status.Phase == v1alpha2.MachineStopped: vdSnapshot.Status.Consistent = ptr.To(true) case h.snapshotter.IsFrozen(vm): vdSnapshot.Status.Consistent = ptr.To(true) @@ -332,7 +332,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseReady + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseReady vdSnapshot.Status.VolumeSnapshotName = vs.Name cb. Status(metav1.ConditionTrue). @@ -343,7 +343,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vdSnapshot *virtv2.Virtual } } -func getVirtualMachine(ctx context.Context, vd *virtv2.VirtualDisk, snapshotter LifeCycleSnapshotter) (*virtv2.VirtualMachine, error) { +func getVirtualMachine(ctx context.Context, vd *v1alpha2.VirtualDisk, snapshotter LifeCycleSnapshotter) (*v1alpha2.VirtualMachine, error) { if vd == nil { return nil, nil } @@ -364,16 +364,16 @@ func getVirtualMachine(ctx context.Context, vd *virtv2.VirtualDisk, snapshotter } } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.VirtualDiskSnapshotPhase, err error) { - *phase = virtv2.VirtualDiskSnapshotPhaseFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualDiskSnapshotPhase, err error) { + *phase = v1alpha2.VirtualDiskSnapshotPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vdscondition.VirtualDiskSnapshotFailed). Message(service.CapitalizeFirstLetter(err.Error())) } -func (h LifeCycleHandler) unfreezeFilesystemIfFailed(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) error { - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseFailed { +func (h LifeCycleHandler) unfreezeFilesystemIfFailed(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) error { + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseFailed { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go index c92ec47a40..56200e6019 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/life_cycle_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -35,9 +35,9 @@ import ( var _ = Describe("LifeCycle handler", func() { var snapshotter *LifeCycleSnapshotterMock var pvc *corev1.PersistentVolumeClaim - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk var vs *vsv1.VolumeSnapshot - var vdSnapshot *virtv2.VirtualDiskSnapshot + var vdSnapshot *v1alpha2.VirtualDiskSnapshot BeforeEach(func() { pvc = &corev1.PersistentVolumeClaim{ @@ -47,10 +47,10 @@ var _ = Describe("LifeCycle handler", func() { }, } - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-01"}, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: pvc.Name, }, }, @@ -60,10 +60,10 @@ var _ = Describe("LifeCycle handler", func() { ObjectMeta: metav1.ObjectMeta{Name: "vs-01"}, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vdsnapshot"}, - Spec: virtv2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, - Status: virtv2.VirtualDiskSnapshotStatus{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ Conditions: []metav1.Condition{ { Type: vdscondition.VirtualDiskReadyType.String(), @@ -80,10 +80,10 @@ var _ = Describe("LifeCycle handler", func() { GetPersistentVolumeClaimFunc: func(_ context.Context, _, _ string) (*corev1.PersistentVolumeClaim, error) { return pvc, nil }, - GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, - GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return nil, nil }, GetVolumeSnapshotFunc: func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -98,7 +98,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -119,7 +119,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseFailed)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotFailed.String())) @@ -135,7 +135,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -154,7 +154,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseReady)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotReady.String())) @@ -163,32 +163,32 @@ var _ = Describe("LifeCycle handler", func() { }) Context("The virtual disk snapshot with virtual machine", func() { - var vm *virtv2.VirtualMachine + var vm *v1alpha2.VirtualMachine BeforeEach(func() { - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseInProgress + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseInProgress - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "vm"}, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, }, } - vd.Status.AttachedToVirtualMachines = []virtv2.AttachedVirtualMachine{{Name: vm.Name}} + vd.Status.AttachedToVirtualMachines = []v1alpha2.AttachedVirtualMachine{{Name: vm.Name}} - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil } - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.FreezeFunc = func(_ context.Context, _, _ string) error { return nil } - snapshotter.CanUnfreezeWithVirtualDiskSnapshotFunc = func(_ context.Context, _ string, _ *virtv2.VirtualMachine) (bool, error) { + snapshotter.CanUnfreezeWithVirtualDiskSnapshotFunc = func(_ context.Context, _ string, _ *v1alpha2.VirtualMachine) (bool, error) { return true, nil } snapshotter.UnfreezeFunc = func(_ context.Context, _, _ string) error { @@ -201,7 +201,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.FileSystemFreezing.String())) @@ -209,15 +209,15 @@ var _ = Describe("LifeCycle handler", func() { }) It("No need to freeze virtual machine", func() { - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { - vm.Status.Phase = virtv2.MachineStopped + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { + vm.Status.Phase = v1alpha2.MachineStopped return vm, nil } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -226,14 +226,14 @@ var _ = Describe("LifeCycle handler", func() { It("Cannot freeze virtual machine: deny potentially inconsistent", func() { vdSnapshot.Spec.RequiredConsistency = true - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhasePending)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhasePending)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.PotentiallyInconsistent.String())) @@ -242,14 +242,14 @@ var _ = Describe("LifeCycle handler", func() { It("Cannot freeze virtual machine: allow potentially inconsistent", func() { vdSnapshot.Spec.RequiredConsistency = false - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } h := NewLifeCycleHandler(snapshotter) _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseInProgress)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vdscondition.Snapshotting.String())) @@ -257,7 +257,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("Unfreeze virtual machine", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.GetVolumeSnapshotFunc = func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -270,17 +270,17 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseReady)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vdscondition.VirtualDiskSnapshotReady.String())) Expect(ready.Message).To(BeEmpty()) }) - DescribeTable("Check unfreeze if failed", func(vm *virtv2.VirtualMachine, expectUnfreezing bool) { + DescribeTable("Check unfreeze if failed", func(vm *v1alpha2.VirtualMachine, expectUnfreezing bool) { unFreezeCalled := false - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.GetVolumeSnapshotFunc = func(_ context.Context, _, _ string) (*vsv1.VolumeSnapshot, error) { @@ -293,22 +293,22 @@ var _ = Describe("LifeCycle handler", func() { unFreezeCalled = true return nil } - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil } h := NewLifeCycleHandler(snapshotter) - vdSnapshot.Status.Phase = virtv2.VirtualDiskSnapshotPhaseFailed + vdSnapshot.Status.Phase = v1alpha2.VirtualDiskSnapshotPhaseFailed _, err := h.Handle(testContext(), vdSnapshot) Expect(err).To(BeNil()) - Expect(vdSnapshot.Status.Phase).To(Equal(virtv2.VirtualDiskSnapshotPhaseFailed)) + Expect(vdSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualDiskSnapshotPhaseFailed)) Expect(unFreezeCalled).To(Equal(expectUnfreezing)) }, Entry("Has VM with frozen filesystem", - &virtv2.VirtualMachine{ - Status: virtv2.VirtualMachineStatus{ + &v1alpha2.VirtualMachine{ + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Type: vmcondition.TypeFilesystemFrozen.String(), @@ -319,7 +319,7 @@ var _ = Describe("LifeCycle handler", func() { }, true, ), - Entry("Has VM with unfrozen filesystem", &virtv2.VirtualMachine{}, false), + Entry("Has VM with unfrozen filesystem", &v1alpha2.VirtualMachine{}, false), Entry("Has no VM", nil, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/mock.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/mock.go index b5e9a0e217..a063e0df10 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/mock.go @@ -5,7 +5,7 @@ package internal import ( "context" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" "sync" @@ -21,7 +21,7 @@ var _ VirtualDiskReadySnapshotter = &VirtualDiskReadySnapshotterMock{} // // // make and configure a mocked VirtualDiskReadySnapshotter // mockedVirtualDiskReadySnapshotter := &VirtualDiskReadySnapshotterMock{ -// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { // panic("mock out the GetVirtualDisk method") // }, // } @@ -32,7 +32,7 @@ var _ VirtualDiskReadySnapshotter = &VirtualDiskReadySnapshotterMock{} // } type VirtualDiskReadySnapshotterMock struct { // GetVirtualDiskFunc mocks the GetVirtualDisk method. - GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) // calls tracks calls to the methods. calls struct { @@ -50,7 +50,7 @@ type VirtualDiskReadySnapshotterMock struct { } // GetVirtualDisk calls GetVirtualDiskFunc. -func (mock *VirtualDiskReadySnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +func (mock *VirtualDiskReadySnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { if mock.GetVirtualDiskFunc == nil { panic("VirtualDiskReadySnapshotterMock.GetVirtualDiskFunc: method is nil but VirtualDiskReadySnapshotter.GetVirtualDisk was just called") } @@ -99,10 +99,10 @@ var _ LifeCycleSnapshotter = &LifeCycleSnapshotterMock{} // // // make and configure a mocked LifeCycleSnapshotter // mockedLifeCycleSnapshotter := &LifeCycleSnapshotterMock{ -// CanFreezeFunc: func(vm *virtv2.VirtualMachine) bool { +// CanFreezeFunc: func(vm *v1alpha2.VirtualMachine) bool { // panic("mock out the CanFreeze method") // }, -// CanUnfreezeWithVirtualDiskSnapshotFunc: func(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +// CanUnfreezeWithVirtualDiskSnapshotFunc: func(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { // panic("mock out the CanUnfreezeWithVirtualDiskSnapshot method") // }, // CreateVolumeSnapshotFunc: func(ctx context.Context, vs *vsv1.VolumeSnapshot) (*vsv1.VolumeSnapshot, error) { @@ -114,16 +114,16 @@ var _ LifeCycleSnapshotter = &LifeCycleSnapshotterMock{} // GetPersistentVolumeClaimFunc: func(ctx context.Context, name string, namespace string) (*corev1.PersistentVolumeClaim, error) { // panic("mock out the GetPersistentVolumeClaim method") // }, -// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { // panic("mock out the GetVirtualDisk method") // }, -// GetVirtualMachineFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) { +// GetVirtualMachineFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) { // panic("mock out the GetVirtualMachine method") // }, // GetVolumeSnapshotFunc: func(ctx context.Context, name string, namespace string) (*vsv1.VolumeSnapshot, error) { // panic("mock out the GetVolumeSnapshot method") // }, -// IsFrozenFunc: func(vm *virtv2.VirtualMachine) bool { +// IsFrozenFunc: func(vm *v1alpha2.VirtualMachine) bool { // panic("mock out the IsFrozen method") // }, // UnfreezeFunc: func(ctx context.Context, name string, namespace string) error { @@ -137,10 +137,10 @@ var _ LifeCycleSnapshotter = &LifeCycleSnapshotterMock{} // } type LifeCycleSnapshotterMock struct { // CanFreezeFunc mocks the CanFreeze method. - CanFreezeFunc func(vm *virtv2.VirtualMachine) bool + CanFreezeFunc func(vm *v1alpha2.VirtualMachine) bool // CanUnfreezeWithVirtualDiskSnapshotFunc mocks the CanUnfreezeWithVirtualDiskSnapshot method. - CanUnfreezeWithVirtualDiskSnapshotFunc func(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + CanUnfreezeWithVirtualDiskSnapshotFunc func(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) // CreateVolumeSnapshotFunc mocks the CreateVolumeSnapshot method. CreateVolumeSnapshotFunc func(ctx context.Context, vs *vsv1.VolumeSnapshot) (*vsv1.VolumeSnapshot, error) @@ -152,16 +152,16 @@ type LifeCycleSnapshotterMock struct { GetPersistentVolumeClaimFunc func(ctx context.Context, name string, namespace string) (*corev1.PersistentVolumeClaim, error) // GetVirtualDiskFunc mocks the GetVirtualDisk method. - GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) // GetVirtualMachineFunc mocks the GetVirtualMachine method. - GetVirtualMachineFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualMachineFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) // GetVolumeSnapshotFunc mocks the GetVolumeSnapshot method. GetVolumeSnapshotFunc func(ctx context.Context, name string, namespace string) (*vsv1.VolumeSnapshot, error) // IsFrozenFunc mocks the IsFrozen method. - IsFrozenFunc func(vm *virtv2.VirtualMachine) bool + IsFrozenFunc func(vm *v1alpha2.VirtualMachine) bool // UnfreezeFunc mocks the Unfreeze method. UnfreezeFunc func(ctx context.Context, name string, namespace string) error @@ -171,7 +171,7 @@ type LifeCycleSnapshotterMock struct { // CanFreeze holds details about calls to the CanFreeze method. CanFreeze []struct { // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // CanUnfreezeWithVirtualDiskSnapshot holds details about calls to the CanUnfreezeWithVirtualDiskSnapshot method. CanUnfreezeWithVirtualDiskSnapshot []struct { @@ -180,7 +180,7 @@ type LifeCycleSnapshotterMock struct { // VdSnapshotName is the vdSnapshotName argument value. VdSnapshotName string // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // CreateVolumeSnapshot holds details about calls to the CreateVolumeSnapshot method. CreateVolumeSnapshot []struct { @@ -237,7 +237,7 @@ type LifeCycleSnapshotterMock struct { // IsFrozen holds details about calls to the IsFrozen method. IsFrozen []struct { // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // Unfreeze holds details about calls to the Unfreeze method. Unfreeze []struct { @@ -262,12 +262,12 @@ type LifeCycleSnapshotterMock struct { } // CanFreeze calls CanFreezeFunc. -func (mock *LifeCycleSnapshotterMock) CanFreeze(vm *virtv2.VirtualMachine) bool { +func (mock *LifeCycleSnapshotterMock) CanFreeze(vm *v1alpha2.VirtualMachine) bool { if mock.CanFreezeFunc == nil { panic("LifeCycleSnapshotterMock.CanFreezeFunc: method is nil but LifeCycleSnapshotter.CanFreeze was just called") } callInfo := struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ VM: vm, } @@ -282,10 +282,10 @@ func (mock *LifeCycleSnapshotterMock) CanFreeze(vm *virtv2.VirtualMachine) bool // // len(mockedLifeCycleSnapshotter.CanFreezeCalls()) func (mock *LifeCycleSnapshotterMock) CanFreezeCalls() []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockCanFreeze.RLock() calls = mock.calls.CanFreeze @@ -294,14 +294,14 @@ func (mock *LifeCycleSnapshotterMock) CanFreezeCalls() []struct { } // CanUnfreezeWithVirtualDiskSnapshot calls CanUnfreezeWithVirtualDiskSnapshotFunc. -func (mock *LifeCycleSnapshotterMock) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (mock *LifeCycleSnapshotterMock) CanUnfreezeWithVirtualDiskSnapshot(ctx context.Context, vdSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if mock.CanUnfreezeWithVirtualDiskSnapshotFunc == nil { panic("LifeCycleSnapshotterMock.CanUnfreezeWithVirtualDiskSnapshotFunc: method is nil but LifeCycleSnapshotter.CanUnfreezeWithVirtualDiskSnapshot was just called") } callInfo := struct { Ctx context.Context VdSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ Ctx: ctx, VdSnapshotName: vdSnapshotName, @@ -320,12 +320,12 @@ func (mock *LifeCycleSnapshotterMock) CanUnfreezeWithVirtualDiskSnapshot(ctx con func (mock *LifeCycleSnapshotterMock) CanUnfreezeWithVirtualDiskSnapshotCalls() []struct { Ctx context.Context VdSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { Ctx context.Context VdSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockCanUnfreezeWithVirtualDiskSnapshot.RLock() calls = mock.calls.CanUnfreezeWithVirtualDiskSnapshot @@ -450,7 +450,7 @@ func (mock *LifeCycleSnapshotterMock) GetPersistentVolumeClaimCalls() []struct { } // GetVirtualDisk calls GetVirtualDiskFunc. -func (mock *LifeCycleSnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +func (mock *LifeCycleSnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { if mock.GetVirtualDiskFunc == nil { panic("LifeCycleSnapshotterMock.GetVirtualDiskFunc: method is nil but LifeCycleSnapshotter.GetVirtualDisk was just called") } @@ -490,7 +490,7 @@ func (mock *LifeCycleSnapshotterMock) GetVirtualDiskCalls() []struct { } // GetVirtualMachine calls GetVirtualMachineFunc. -func (mock *LifeCycleSnapshotterMock) GetVirtualMachine(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) { +func (mock *LifeCycleSnapshotterMock) GetVirtualMachine(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) { if mock.GetVirtualMachineFunc == nil { panic("LifeCycleSnapshotterMock.GetVirtualMachineFunc: method is nil but LifeCycleSnapshotter.GetVirtualMachine was just called") } @@ -570,12 +570,12 @@ func (mock *LifeCycleSnapshotterMock) GetVolumeSnapshotCalls() []struct { } // IsFrozen calls IsFrozenFunc. -func (mock *LifeCycleSnapshotterMock) IsFrozen(vm *virtv2.VirtualMachine) bool { +func (mock *LifeCycleSnapshotterMock) IsFrozen(vm *v1alpha2.VirtualMachine) bool { if mock.IsFrozenFunc == nil { panic("LifeCycleSnapshotterMock.IsFrozenFunc: method is nil but LifeCycleSnapshotter.IsFrozen was just called") } callInfo := struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ VM: vm, } @@ -590,10 +590,10 @@ func (mock *LifeCycleSnapshotterMock) IsFrozen(vm *virtv2.VirtualMachine) bool { // // len(mockedLifeCycleSnapshotter.IsFrozenCalls()) func (mock *LifeCycleSnapshotterMock) IsFrozenCalls() []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockIsFrozen.RLock() calls = mock.calls.IsFrozen diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go index 6cadf94ba3..519d79786a 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" ) @@ -39,7 +39,7 @@ func NewVirtualDiskReadyHandler(snapshotter VirtualDiskReadySnapshotter) *Virtua } } -func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) { +func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vdscondition.VirtualDiskReadyType).Generation(vdSnapshot.Generation) defer func() { conditions.SetCondition(cb, &vdSnapshot.Status.Conditions) }() @@ -52,7 +52,7 @@ func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2. return reconcile.Result{}, nil } - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady { cb. Status(metav1.ConditionTrue). Reason(vdscondition.VirtualDiskReady). @@ -82,7 +82,7 @@ func (h VirtualDiskReadyHandler) Handle(ctx context.Context, vdSnapshot *virtv2. } switch vd.Status.Phase { - case virtv2.DiskReady: + case v1alpha2.DiskReady: snapshotting, _ := conditions.GetCondition(vdcondition.SnapshottingType, vd.Status.Conditions) if snapshotting.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(snapshotting, vd) { cb. diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go index 7874397d55..410692cad1 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/virtual_disk_ready_test.go @@ -25,21 +25,21 @@ import ( "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" ) var _ = Describe("VirtualDiskReady handler", func() { var snapshotter *VirtualDiskReadySnapshotterMock - var vd *virtv2.VirtualDisk - var vdSnapshot *virtv2.VirtualDiskSnapshot + var vd *v1alpha2.VirtualDisk + var vdSnapshot *v1alpha2.VirtualDiskSnapshot BeforeEach(func() { - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-01"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.SnapshottingType.String(), @@ -49,13 +49,13 @@ var _ = Describe("VirtualDiskReady handler", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vdsnapshot"}, - Spec: virtv2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, + Spec: v1alpha2.VirtualDiskSnapshotSpec{VirtualDiskName: vd.Name}, } snapshotter = &VirtualDiskReadySnapshotterMock{ - GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, } @@ -76,7 +76,7 @@ var _ = Describe("VirtualDiskReady handler", func() { Context("condition VirtualDiskReady is False", func() { It("The virtual disk not found", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { return nil, nil } h := NewVirtualDiskReadyHandler(snapshotter) @@ -90,7 +90,7 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is in process of deletion", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { vd.DeletionTimestamp = ptr.To(metav1.Now()) return vd, nil } @@ -105,8 +105,8 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is not Ready", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { - vd.Status.Phase = virtv2.DiskPending + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { + vd.Status.Phase = v1alpha2.DiskPending return vd, nil } h := NewVirtualDiskReadyHandler(snapshotter) @@ -120,7 +120,7 @@ var _ = Describe("VirtualDiskReady handler", func() { }) It("The virtual disk is not ready for snapshot taking yet", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { vd.Status.Conditions = nil vd.Status.Conditions = append(vd.Status.Conditions, metav1.Condition{ Type: vdcondition.SnapshottingType.String(), diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go index ddaf29dde5..75debfec1e 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vd_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,9 +48,9 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ UpdateFunc: w.filterUpdateEvents, }, ), @@ -60,8 +60,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vdSnapshots virtv2.VirtualDiskSnapshotList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := w.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -86,7 +86,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt return } -func (w VirtualDiskWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { +func (w VirtualDiskWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go index d10853f8b6..0d93acf6d3 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vdsnapshot_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWat func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualDiskSnapshot]{}, - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualDiskSnapshot]{}, + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go index 6f32b8df96..fd717873a6 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vm_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -48,9 +48,9 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ UpdateFunc: w.filterUpdateEvents, }, ), @@ -60,10 +60,10 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { vdByName := make(map[string]struct{}) for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -74,7 +74,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } - var vdSnapshots virtv2.VirtualDiskSnapshotList + var vdSnapshots v1alpha2.VirtualDiskSnapshotList err := w.client.List(ctx, &vdSnapshots, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -100,7 +100,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { +func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go index 7ce7892a9b..46e6dbaa6c 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/internal/watcher/vs_watcher.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VolumeSnapshotWatcher struct{} @@ -40,7 +40,7 @@ func (w VolumeSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Control handler.TypedEnqueueRequestForOwner[*vsv1.VolumeSnapshot]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualDiskSnapshot{}, + &v1alpha2.VirtualDiskSnapshot{}, ), ), ); err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go index 95c438ea08..7e671818f4 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_controller.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vdsnapshotcollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vdsnapshot" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vdsnapshot-controller" @@ -43,7 +43,7 @@ func NewController( log *log.Logger, virtClient kubeclient.Client, ) (controller.Controller, error) { - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVDSnapshotProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVDSnapshotProtection) freezer := service.NewSnapshotService(virtClient, mgr.GetClient(), protection) reconciler := NewReconciler( @@ -69,7 +69,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualDiskSnapshot{}). + For(&v1alpha2.VirtualDiskSnapshot{}). WithValidator(NewValidator(log)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go index 60083e1e23..f73f8492f6 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vdsnapshot/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (reconcile.Result, error) + Handle(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (reconcile.Result, error) } type Watcher interface { @@ -92,10 +92,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualDiskSnapshot { - return &virtv2.VirtualDiskSnapshot{} +func (r *Reconciler) factory() *v1alpha2.VirtualDiskSnapshot { + return &v1alpha2.VirtualDiskSnapshot{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualDiskSnapshot) virtv2.VirtualDiskSnapshotStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualDiskSnapshot) v1alpha2.VirtualDiskSnapshotStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go index 2763050960..bb3290762e 100644 --- a/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vdsnapshot/vdsnapshot_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct { @@ -44,12 +44,12 @@ func (v *Validator) ValidateCreate(_ context.Context, _ runtime.Object) (admissi } func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVDS, ok := oldObj.(*virtv2.VirtualDiskSnapshot) + oldVDS, ok := oldObj.(*v1alpha2.VirtualDiskSnapshot) if !ok { return nil, fmt.Errorf("expected an old VirtualDiskSnapshot but got a %T", newObj) } - newVDS, ok := newObj.(*virtv2.VirtualDiskSnapshot) + newVDS, ok := newObj.(*v1alpha2.VirtualDiskSnapshot) if !ok { return nil, fmt.Errorf("expected a new VirtualDiskSnapshot but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go b/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go index f62972a6ee..7157b23338 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/attachee.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttacheeHandler struct { @@ -38,7 +38,7 @@ func NewAttacheeHandler(client client.Client) *AttacheeHandler { } } -func (h AttacheeHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h AttacheeHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("attachee")) hasAttachedVM, err := h.hasAttachedVM(ctx, vi) @@ -49,10 +49,10 @@ func (h AttacheeHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (r switch { case !hasAttachedVM: log.Debug("Allow virtual image deletion") - controllerutil.RemoveFinalizer(vi, virtv2.FinalizerVIProtection) + controllerutil.RemoveFinalizer(vi, v1alpha2.FinalizerVIProtection) case vi.DeletionTimestamp == nil: log.Debug("Protect virtual image from deletion") - controllerutil.AddFinalizer(vi, virtv2.FinalizerVIProtection) + controllerutil.AddFinalizer(vi, v1alpha2.FinalizerVIProtection) default: log.Debug("Virtual image deletion is delayed: it's protected by virtual machines") } @@ -65,7 +65,7 @@ func (h AttacheeHandler) Name() string { } func (h AttacheeHandler) hasAttachedVM(ctx context.Context, vi client.Object) (bool, error) { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{ Namespace: vi.GetNamespace(), }) @@ -82,9 +82,9 @@ func (h AttacheeHandler) hasAttachedVM(ctx context.Context, vi client.Object) (b return false, nil } -func (h AttacheeHandler) isVIAttachedToVM(viName string, vm virtv2.VirtualMachine) bool { +func (h AttacheeHandler) isVIAttachedToVM(viName string, vm v1alpha2.VirtualMachine) bool { for _, bda := range vm.Status.BlockDeviceRefs { - if bda.Kind == virtv2.ImageDevice && bda.Name == viName { + if bda.Kind == v1alpha2.ImageDevice && bda.Name == viName { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go b/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go index ccc08ad00a..60c54de04e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/datasource_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -41,7 +41,7 @@ func NewDatasourceReadyHandler(sources *source.Sources) *DatasourceReadyHandler } } -func (h DatasourceReadyHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h DatasourceReadyHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vicondition.DatasourceReadyType).Generation(vi.Generation) defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go index 030e94d986..b9240254f2 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/deletion.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -40,7 +40,7 @@ func NewDeletionHandler(sources *source.Sources) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) if vi.DeletionTimestamp != nil { @@ -54,11 +54,11 @@ func (h DeletionHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (r } log.Info("Deletion observed: remove cleanup finalizer from VirtualImage") - controllerutil.RemoveFinalizer(vi, virtv2.FinalizerVICleanup) + controllerutil.RemoveFinalizer(vi, v1alpha2.FinalizerVICleanup) return reconcile.Result{}, nil } - controllerutil.AddFinalizer(vi, virtv2.FinalizerVICleanup) + controllerutil.AddFinalizer(vi, v1alpha2.FinalizerVICleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go index b626d75a99..59fe2e0934 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/interfaces.go @@ -25,15 +25,15 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . DiskService Sources StorageClassService type Sources interface { - Changed(ctx context.Context, vi *virtv2.VirtualImage) bool - For(dsType virtv2.DataSourceType) (source.Handler, bool) - CleanUp(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) + Changed(ctx context.Context, vi *v1alpha2.VirtualImage) bool + For(dsType v1alpha2.DataSourceType) (source.Handler, bool) + CleanUp(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) } type DiskService interface { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go index 8048ef0338..ab7fddf9a0 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -46,7 +46,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, sources Sourc } } -func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { readyCondition, ok := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) if !ok { @@ -59,25 +59,25 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( } if vi.DeletionTimestamp != nil { - vi.Status.Phase = virtv2.ImageTerminating + vi.Status.Phase = v1alpha2.ImageTerminating return reconcile.Result{}, nil } if vi.Status.Phase == "" { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending } if readyCondition.Status != metav1.ConditionTrue && h.sources.Changed(ctx, vi) { h.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonVISpecHasBeenChanged, + v1alpha2.ReasonVISpecHasBeenChanged, "Spec changes are detected: import process is restarted by controller", ) // Reset status and start import again. - vi.Status = virtv2.VirtualImageStatus{ - Phase: virtv2.ImagePending, + vi.Status = v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImagePending, } _, err := h.sources.CleanUp(ctx, vi) @@ -102,7 +102,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( return reconcile.Result{}, nil } - if !source.IsImageProvisioningFinished(readyCondition) && (vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim) { + if !source.IsImageProvisioningFinished(readyCondition) && (vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim) { storageClassReady, _ := conditions.GetCondition(vicondition.StorageClassReadyType, vi.Status.Conditions) if storageClassReady.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(storageClassReady, vi) { cb. @@ -125,9 +125,9 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) ( } switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: return ds.StoreToPVC(ctx, vi) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: return ds.StoreToDVCR(ctx, vi) default: return reconcile.Result{}, fmt.Errorf("unknown spec storage: %s", vi.Spec.Storage) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go index f2ca0daf0c..f6d185dd23 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/life_cycle_test.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -38,8 +38,8 @@ var _ = Describe("LifeCycleHandler Run", func() { args.ReadyCondition.Type = vicondition.ReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vi := virtv2.VirtualImage{ - Status: virtv2.VirtualImageStatus{ + vi := v1alpha2.VirtualImage{ + Status: v1alpha2.VirtualImageStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, { @@ -54,19 +54,19 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *virtv2.VirtualImage) bool { + sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *v1alpha2.VirtualImage) bool { return args.SpecChanged } - sourcesMock.ForFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.ForFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { var handler source.HandlerMock - handler.StoreToPVCFunc = func(_ context.Context, _ *virtv2.VirtualImage) (reconcile.Result, error) { + handler.StoreToPVCFunc = func(_ context.Context, _ *v1alpha2.VirtualImage) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -122,11 +122,11 @@ var _ = Describe("LifeCycleHandler Run", func() { args.StorageClassReadyCondition.Type = vicondition.StorageClassReadyType.String() var sourcesMock SourcesMock cleanUpCalled := false - vi := virtv2.VirtualImage{ - Spec: virtv2.VirtualImageSpec{ + vi := v1alpha2.VirtualImage{ + Spec: v1alpha2.VirtualImageSpec{ Storage: args.StorageType, }, - Status: virtv2.VirtualImageStatus{ + Status: v1alpha2.VirtualImageStatus{ Conditions: []metav1.Condition{ args.ReadyCondition, args.StorageClassReadyCondition, @@ -139,19 +139,19 @@ var _ = Describe("LifeCycleHandler Run", func() { }, } - sourcesMock.CleanUpFunc = func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { + sourcesMock.CleanUpFunc = func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { cleanUpCalled = true return false, nil } - sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *virtv2.VirtualImage) bool { + sourcesMock.ChangedFunc = func(contextMoqParam context.Context, vi *v1alpha2.VirtualImage) bool { return false } - sourcesMock.ForFunc = func(_ virtv2.DataSourceType) (source.Handler, bool) { + sourcesMock.ForFunc = func(_ v1alpha2.DataSourceType) (source.Handler, bool) { var handler source.HandlerMock - handler.StoreToPVCFunc = func(_ context.Context, _ *virtv2.VirtualImage) (reconcile.Result, error) { + handler.StoreToPVCFunc = func(_ context.Context, _ *v1alpha2.VirtualImage) (reconcile.Result, error) { return reconcile.Result{}, nil } @@ -174,7 +174,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "sc", - StorageType: virtv2.StorageContainerRegistry, + StorageType: v1alpha2.StorageContainerRegistry, ExpectCleanup: false, }, ), @@ -188,7 +188,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -202,7 +202,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionFalse, }, StorageClassInStatus: "sc", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -216,7 +216,7 @@ var _ = Describe("LifeCycleHandler Run", func() { Status: metav1.ConditionTrue, }, StorageClassInStatus: "sc", - StorageType: virtv2.StoragePersistentVolumeClaim, + StorageType: v1alpha2.StoragePersistentVolumeClaim, ExpectCleanup: false, }, ), @@ -233,6 +233,6 @@ type cleanupAfterScNotReadyTestArgs struct { ReadyCondition metav1.Condition StorageClassReadyCondition metav1.Condition StorageClassInStatus string - StorageType virtv2.StorageType + StorageType v1alpha2.StorageType ExpectCleanup bool } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/mock.go index 9c5d52f5eb..4f8ef5285e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/mock.go @@ -7,7 +7,7 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" @@ -146,13 +146,13 @@ var _ Sources = &SourcesMock{} // // // make and configure a mocked Sources // mockedSources := &SourcesMock{ -// ChangedFunc: func(ctx context.Context, vi *virtv2.VirtualImage) bool { +// ChangedFunc: func(ctx context.Context, vi *v1alpha2.VirtualImage) bool { // panic("mock out the Changed method") // }, -// CleanUpFunc: func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { +// CleanUpFunc: func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { // panic("mock out the CleanUp method") // }, -// ForFunc: func(dsType virtv2.DataSourceType) (source.Handler, bool) { +// ForFunc: func(dsType v1alpha2.DataSourceType) (source.Handler, bool) { // panic("mock out the For method") // }, // } @@ -163,13 +163,13 @@ var _ Sources = &SourcesMock{} // } type SourcesMock struct { // ChangedFunc mocks the Changed method. - ChangedFunc func(ctx context.Context, vi *virtv2.VirtualImage) bool + ChangedFunc func(ctx context.Context, vi *v1alpha2.VirtualImage) bool // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) + CleanUpFunc func(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) // ForFunc mocks the For method. - ForFunc func(dsType virtv2.DataSourceType) (source.Handler, bool) + ForFunc func(dsType v1alpha2.DataSourceType) (source.Handler, bool) // calls tracks calls to the methods. calls struct { @@ -178,19 +178,19 @@ type SourcesMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } // CleanUp holds details about calls to the CleanUp method. CleanUp []struct { // Ctx is the ctx argument value. Ctx context.Context // Vd is the vd argument value. - Vd *virtv2.VirtualImage + Vd *v1alpha2.VirtualImage } // For holds details about calls to the For method. For []struct { // DsType is the dsType argument value. - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } } lockChanged sync.RWMutex @@ -199,13 +199,13 @@ type SourcesMock struct { } // Changed calls ChangedFunc. -func (mock *SourcesMock) Changed(ctx context.Context, vi *virtv2.VirtualImage) bool { +func (mock *SourcesMock) Changed(ctx context.Context, vi *v1alpha2.VirtualImage) bool { if mock.ChangedFunc == nil { panic("SourcesMock.ChangedFunc: method is nil but Sources.Changed was just called") } callInfo := struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage }{ Ctx: ctx, Vi: vi, @@ -222,11 +222,11 @@ func (mock *SourcesMock) Changed(ctx context.Context, vi *virtv2.VirtualImage) b // len(mockedSources.ChangedCalls()) func (mock *SourcesMock) ChangedCalls() []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } mock.lockChanged.RLock() calls = mock.calls.Changed @@ -235,13 +235,13 @@ func (mock *SourcesMock) ChangedCalls() []struct { } // CleanUp calls CleanUpFunc. -func (mock *SourcesMock) CleanUp(ctx context.Context, vd *virtv2.VirtualImage) (bool, error) { +func (mock *SourcesMock) CleanUp(ctx context.Context, vd *v1alpha2.VirtualImage) (bool, error) { if mock.CleanUpFunc == nil { panic("SourcesMock.CleanUpFunc: method is nil but Sources.CleanUp was just called") } callInfo := struct { Ctx context.Context - Vd *virtv2.VirtualImage + Vd *v1alpha2.VirtualImage }{ Ctx: ctx, Vd: vd, @@ -258,11 +258,11 @@ func (mock *SourcesMock) CleanUp(ctx context.Context, vd *virtv2.VirtualImage) ( // len(mockedSources.CleanUpCalls()) func (mock *SourcesMock) CleanUpCalls() []struct { Ctx context.Context - Vd *virtv2.VirtualImage + Vd *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vd *virtv2.VirtualImage + Vd *v1alpha2.VirtualImage } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -271,12 +271,12 @@ func (mock *SourcesMock) CleanUpCalls() []struct { } // For calls ForFunc. -func (mock *SourcesMock) For(dsType virtv2.DataSourceType) (source.Handler, bool) { +func (mock *SourcesMock) For(dsType v1alpha2.DataSourceType) (source.Handler, bool) { if mock.ForFunc == nil { panic("SourcesMock.ForFunc: method is nil but Sources.For was just called") } callInfo := struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType }{ DsType: dsType, } @@ -291,10 +291,10 @@ func (mock *SourcesMock) For(dsType virtv2.DataSourceType) (source.Handler, bool // // len(mockedSources.ForCalls()) func (mock *SourcesMock) ForCalls() []struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } { var calls []struct { - DsType virtv2.DataSourceType + DsType v1alpha2.DataSourceType } mock.lockFor.RLock() calls = mock.calls.For diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go index 86bc5d38a9..854e1eedc1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service.go @@ -23,7 +23,7 @@ import ( "slices" corev1 "k8s.io/api/core/v1" - storev1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" "github.com/deckhouse/virtualization-controller/pkg/config" @@ -61,7 +61,7 @@ func NewVirtualImageStorageClassService(svc *service.BaseStorageClassService, se // Errors: // 1. Return error if no storage class is specified. // 2. Return error if specified non-empty class is not allowed. -func (svc *VirtualImageStorageClassService) GetValidatedStorageClass(storageClassFromSpec *string, clusterDefaultStorageClass *storev1.StorageClass) (*string, error) { +func (svc *VirtualImageStorageClassService) GetValidatedStorageClass(storageClassFromSpec *string, clusterDefaultStorageClass *storagev1.StorageClass) (*string, error) { if svc.storageClassSettings.DefaultStorageClassName == "" && len(svc.storageClassSettings.AllowedStorageClassNames) == 0 { if svc.storageClassSettings.StorageClassName == "" { return storageClassFromSpec, nil @@ -117,7 +117,7 @@ func (svc *VirtualImageStorageClassService) IsStorageClassAllowed(scName string) return false } -func (svc *VirtualImageStorageClassService) GetModuleStorageClass(ctx context.Context) (*storev1.StorageClass, error) { +func (svc *VirtualImageStorageClassService) GetModuleStorageClass(ctx context.Context) (*storagev1.StorageClass, error) { return svc.GetStorageClass(ctx, svc.storageClassSettings.DefaultStorageClassName) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go index e6020ba7d1..a4146734f7 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/service/vi_storage_class_service_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -215,8 +215,8 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, }, }, @@ -235,16 +235,16 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, }, }, @@ -263,12 +263,12 @@ var _ = Describe("VirtualImageStorageClassService", func() { Status: cdiv1.StorageProfileStatus{ ClaimPropertySets: []cdiv1.ClaimPropertySet{ { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - VolumeMode: ptr.To(v1.PersistentVolumeBlock), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + VolumeMode: ptr.To(corev1.PersistentVolumeBlock), }, { - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - VolumeMode: ptr.To(v1.PersistentVolumeFilesystem), + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + VolumeMode: ptr.To(corev1.PersistentVolumeFilesystem), }, }, }, diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go index 5339324230..68e16610a5 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -69,7 +69,7 @@ func NewHTTPDataSource( } } -func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -91,7 +91,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -100,7 +100,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -113,14 +113,14 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -132,11 +132,11 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -152,7 +152,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -177,7 +177,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) @@ -188,7 +188,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "http") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -252,14 +252,14 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -281,7 +281,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -293,17 +293,17 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The HTTP DataSource import has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -341,7 +341,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -349,14 +349,14 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -364,7 +364,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -375,11 +375,11 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The HTTP DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -410,7 +410,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -426,7 +426,7 @@ func (ds HTTPDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) ( return importerRequeue || diskRequeue, nil } -func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -446,11 +446,11 @@ func (ds HTTPDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.Virt } } -func (ds HTTPDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) error { +func (ds HTTPDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualImage) error { return nil } -func (ds HTTPDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *importer.Settings { +func (ds HTTPDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyHTTPSourceSettings(&settings, vi.Spec.DataSource.HTTP, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go index 34e16b178c..3e4e7373fa 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Importer Uploader Stat Bounder Handler Disk @@ -65,7 +65,7 @@ type Stat interface { step.ReadyContainerRegistryStepStat IsUploadStarted(ownerUID types.UID, pod *corev1.Pod) bool IsUploaderReady(pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) bool - GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed } type Bounder interface { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go index 5eed57e48b..e792231e7b 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go @@ -10,7 +10,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/uploader" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1085,7 +1085,7 @@ var _ Stat = &StatMock{} // GetDVCRImageNameFunc: func(pod *corev1.Pod) string { // panic("mock out the GetDVCRImageName method") // }, -// GetDownloadSpeedFunc: func(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +// GetDownloadSpeedFunc: func(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { // panic("mock out the GetDownloadSpeed method") // }, // GetFormatFunc: func(pod *corev1.Pod) string { @@ -1094,7 +1094,7 @@ var _ Stat = &StatMock{} // GetProgressFunc: func(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string { // panic("mock out the GetProgress method") // }, -// GetSizeFunc: func(pod *corev1.Pod) virtv2.ImageStatusSize { +// GetSizeFunc: func(pod *corev1.Pod) v1alpha2.ImageStatusSize { // panic("mock out the GetSize method") // }, // IsUploadStartedFunc: func(ownerUID types.UID, pod *corev1.Pod) bool { @@ -1120,7 +1120,7 @@ type StatMock struct { GetDVCRImageNameFunc func(pod *corev1.Pod) string // GetDownloadSpeedFunc mocks the GetDownloadSpeed method. - GetDownloadSpeedFunc func(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed + GetDownloadSpeedFunc func(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed // GetFormatFunc mocks the GetFormat method. GetFormatFunc func(pod *corev1.Pod) string @@ -1129,7 +1129,7 @@ type StatMock struct { GetProgressFunc func(ownerUID types.UID, pod *corev1.Pod, prevProgress string, opts ...service.GetProgressOption) string // GetSizeFunc mocks the GetSize method. - GetSizeFunc func(pod *corev1.Pod) virtv2.ImageStatusSize + GetSizeFunc func(pod *corev1.Pod) v1alpha2.ImageStatusSize // IsUploadStartedFunc mocks the IsUploadStarted method. IsUploadStartedFunc func(ownerUID types.UID, pod *corev1.Pod) bool @@ -1307,7 +1307,7 @@ func (mock *StatMock) GetDVCRImageNameCalls() []struct { } // GetDownloadSpeed calls GetDownloadSpeedFunc. -func (mock *StatMock) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *virtv2.StatusSpeed { +func (mock *StatMock) GetDownloadSpeed(ownerUID types.UID, pod *corev1.Pod) *v1alpha2.StatusSpeed { if mock.GetDownloadSpeedFunc == nil { panic("StatMock.GetDownloadSpeedFunc: method is nil but Stat.GetDownloadSpeed was just called") } @@ -1419,7 +1419,7 @@ func (mock *StatMock) GetProgressCalls() []struct { } // GetSize calls GetSizeFunc. -func (mock *StatMock) GetSize(pod *corev1.Pod) virtv2.ImageStatusSize { +func (mock *StatMock) GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize { if mock.GetSizeFunc == nil { panic("StatMock.GetSizeFunc: method is nil but Stat.GetSize was just called") } @@ -1720,16 +1720,16 @@ var _ Handler = &HandlerMock{} // // // make and configure a mocked Handler // mockedHandler := &HandlerMock{ -// CleanUpFunc: func(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +// CleanUpFunc: func(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { // panic("mock out the CleanUp method") // }, -// StoreToDVCRFunc: func(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +// StoreToDVCRFunc: func(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { // panic("mock out the StoreToDVCR method") // }, -// StoreToPVCFunc: func(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +// StoreToPVCFunc: func(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { // panic("mock out the StoreToPVC method") // }, -// ValidateFunc: func(ctx context.Context, vi *virtv2.VirtualImage) error { +// ValidateFunc: func(ctx context.Context, vi *v1alpha2.VirtualImage) error { // panic("mock out the Validate method") // }, // } @@ -1740,16 +1740,16 @@ var _ Handler = &HandlerMock{} // } type HandlerMock struct { // CleanUpFunc mocks the CleanUp method. - CleanUpFunc func(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) + CleanUpFunc func(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) // StoreToDVCRFunc mocks the StoreToDVCR method. - StoreToDVCRFunc func(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + StoreToDVCRFunc func(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) // StoreToPVCFunc mocks the StoreToPVC method. - StoreToPVCFunc func(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + StoreToPVCFunc func(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) // ValidateFunc mocks the Validate method. - ValidateFunc func(ctx context.Context, vi *virtv2.VirtualImage) error + ValidateFunc func(ctx context.Context, vi *v1alpha2.VirtualImage) error // calls tracks calls to the methods. calls struct { @@ -1758,28 +1758,28 @@ type HandlerMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } // StoreToDVCR holds details about calls to the StoreToDVCR method. StoreToDVCR []struct { // Ctx is the ctx argument value. Ctx context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } // StoreToPVC holds details about calls to the StoreToPVC method. StoreToPVC []struct { // Ctx is the ctx argument value. Ctx context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } // Validate holds details about calls to the Validate method. Validate []struct { // Ctx is the ctx argument value. Ctx context.Context // Vi is the vi argument value. - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } } lockCleanUp sync.RWMutex @@ -1789,13 +1789,13 @@ type HandlerMock struct { } // CleanUp calls CleanUpFunc. -func (mock *HandlerMock) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (mock *HandlerMock) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { if mock.CleanUpFunc == nil { panic("HandlerMock.CleanUpFunc: method is nil but Handler.CleanUp was just called") } callInfo := struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage }{ Ctx: ctx, Vi: vi, @@ -1812,11 +1812,11 @@ func (mock *HandlerMock) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) ( // len(mockedHandler.CleanUpCalls()) func (mock *HandlerMock) CleanUpCalls() []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } mock.lockCleanUp.RLock() calls = mock.calls.CleanUp @@ -1825,13 +1825,13 @@ func (mock *HandlerMock) CleanUpCalls() []struct { } // StoreToDVCR calls StoreToDVCRFunc. -func (mock *HandlerMock) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (mock *HandlerMock) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { if mock.StoreToDVCRFunc == nil { panic("HandlerMock.StoreToDVCRFunc: method is nil but Handler.StoreToDVCR was just called") } callInfo := struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage }{ Ctx: ctx, Vi: vi, @@ -1848,11 +1848,11 @@ func (mock *HandlerMock) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImag // len(mockedHandler.StoreToDVCRCalls()) func (mock *HandlerMock) StoreToDVCRCalls() []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } mock.lockStoreToDVCR.RLock() calls = mock.calls.StoreToDVCR @@ -1861,13 +1861,13 @@ func (mock *HandlerMock) StoreToDVCRCalls() []struct { } // StoreToPVC calls StoreToPVCFunc. -func (mock *HandlerMock) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (mock *HandlerMock) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { if mock.StoreToPVCFunc == nil { panic("HandlerMock.StoreToPVCFunc: method is nil but Handler.StoreToPVC was just called") } callInfo := struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage }{ Ctx: ctx, Vi: vi, @@ -1884,11 +1884,11 @@ func (mock *HandlerMock) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage // len(mockedHandler.StoreToPVCCalls()) func (mock *HandlerMock) StoreToPVCCalls() []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } mock.lockStoreToPVC.RLock() calls = mock.calls.StoreToPVC @@ -1897,13 +1897,13 @@ func (mock *HandlerMock) StoreToPVCCalls() []struct { } // Validate calls ValidateFunc. -func (mock *HandlerMock) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (mock *HandlerMock) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { if mock.ValidateFunc == nil { panic("HandlerMock.ValidateFunc: method is nil but Handler.Validate was just called") } callInfo := struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage }{ Ctx: ctx, Vi: vi, @@ -1920,11 +1920,11 @@ func (mock *HandlerMock) Validate(ctx context.Context, vi *virtv2.VirtualImage) // len(mockedHandler.ValidateCalls()) func (mock *HandlerMock) ValidateCalls() []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } { var calls []struct { Ctx context.Context - Vi *virtv2.VirtualImage + Vi *v1alpha2.VirtualImage } mock.lockValidate.RLock() calls = mock.calls.Validate diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go index 1f43767409..3017574fb1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go @@ -45,7 +45,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -90,8 +90,8 @@ func NewObjectRefDataSource( } } -func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskSnapshotKind { +func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskSnapshotKind { return ds.vdSnapshotPVCSyncer.Sync(ctx, vi) } @@ -102,9 +102,9 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -113,12 +113,12 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, fmt.Errorf("VI object ref %s is nil", viKey) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viObjectRefOnPvc.StoreToPVC(ctx, vi, viRef, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: vdKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - vd, err := object.FetchObject(ctx, vdKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, vdKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", vdKey, err) } @@ -171,7 +171,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -220,7 +220,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -228,14 +228,14 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -243,7 +243,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -254,11 +254,11 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -297,8 +297,8 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskSnapshotKind { +func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskSnapshotKind { return ds.vdSnapshotCRSyncer.Sync(ctx, vi) } @@ -309,9 +309,9 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -320,12 +320,12 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, fmt.Errorf("VI object ref source %s is nil", vi.Spec.DataSource.ObjectRef.Name) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { return ds.viObjectRefOnPvc.StoreToDVCR(ctx, vi, viRef, cb) } - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - vd, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to get VD %s: %w", viKey, err) } @@ -352,7 +352,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -361,7 +361,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -386,14 +386,14 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -405,11 +405,11 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -438,7 +438,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = dvcrDataSource.GetSize() vi.Status.CDROM = dvcrDataSource.IsCDROM() vi.Status.Format = dvcrDataSource.GetFormat() @@ -457,7 +457,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) log.Info("Ready", "progress", vi.Status.Progress, "pod.phase", pod.Status.Phase) @@ -466,7 +466,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -487,15 +487,15 @@ func (ds ObjectRefDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualIma return importerRequeue || bounderRequeue || diskRequeue, nil } -func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { if vi.Spec.DataSource.ObjectRef == nil { return fmt.Errorf("nil object ref: %s", vi.Spec.DataSource.Type) } switch vi.Spec.DataSource.ObjectRef.Kind { - case virtv2.VirtualImageObjectRefKindVirtualImage: + case v1alpha2.VirtualImageObjectRefKindVirtualImage: viKey := types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace} - viRef, err := object.FetchObject(ctx, viKey, ds.client, &virtv2.VirtualImage{}) + viRef, err := object.FetchObject(ctx, viKey, ds.client, &v1alpha2.VirtualImage{}) if err != nil { return fmt.Errorf("unable to get VI %s: %w", viKey, err) } @@ -504,8 +504,8 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } - if viRef.Spec.Storage == virtv2.StorageKubernetes || viRef.Spec.Storage == virtv2.StoragePersistentVolumeClaim { - if viRef.Status.Phase != virtv2.ImageReady { + if viRef.Spec.Storage == v1alpha2.StorageKubernetes || viRef.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { + if viRef.Status.Phase != v1alpha2.ImageReady { return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } return nil @@ -521,7 +521,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } return NewImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualImageObjectRefKindClusterVirtualImage: + case v1alpha2.VirtualImageObjectRefKindClusterVirtualImage: dvcrDataSource, err := controller.NewDVCRDataSourcesForVMI(ctx, vi.Spec.DataSource, vi, ds.client) if err != nil { return err @@ -532,13 +532,13 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } return NewClusterImageNotReadyError(vi.Spec.DataSource.ObjectRef.Name) - case virtv2.VirtualImageObjectRefKindVirtualDisk: + case v1alpha2.VirtualImageObjectRefKindVirtualDisk: return ds.vdSyncer.Validate(ctx, vi) - case virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot: + case v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot: switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: return ds.vdSnapshotPVCSyncer.Validate(ctx, vi) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: return ds.vdSnapshotCRSyncer.Validate(ctx, vi) } @@ -548,7 +548,7 @@ func (ds ObjectRefDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIm } } -func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { +func (ds ObjectRefDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, sup supplements.Generator, dvcrDataSource controller.DVCRDataSource) (*importer.Settings, error) { if !dvcrDataSource.IsReady() { return nil, errors.New("dvcr data source is not ready") } @@ -565,7 +565,7 @@ func (ds ObjectRefDataSource) getEnvSettings(vi *virtv2.VirtualImage, sup supple return &settings, nil } -func (ds ObjectRefDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index c71e37b0bd..585a3de86f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -46,7 +46,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -78,7 +78,7 @@ func NewObjectRefVirtualDisk( } } -func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -97,7 +97,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -106,7 +106,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -122,14 +122,14 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -141,11 +141,11 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -161,7 +161,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -172,7 +172,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu default: err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -182,7 +182,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Message(service.CapitalizeFirstLetter(err.Error() + ".")) return reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -203,7 +203,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -213,7 +213,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *virtv2.Virtu return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage, vdRef *virtv2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage, vdRef *v1alpha2.VirtualDisk, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, objectRefDataSource) supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -259,7 +259,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -289,7 +289,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -297,14 +297,14 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -312,7 +312,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -323,11 +323,11 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -343,7 +343,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{}, errors.New("fail to convert quantity to int64") } - vi.Status.Size = virtv2.ImageStatusSize{ + vi.Status.Size = v1alpha2.ImageStatusSize{ Stored: vdRef.Status.Capacity, StoredBytes: strconv.FormatInt(intQ, 10), Unpacked: vdRef.Status.Capacity, @@ -375,7 +375,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *virtv2.Virtua return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -395,7 +395,7 @@ func (ds ObjectRefVirtualDisk) CleanUpSupplements(ctx context.Context, vi *virtv } } -func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -411,7 +411,7 @@ func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, vi *virtv2.VirtualIm return importerRequeue || diskRequeue, nil } -func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { +func (ds ObjectRefVirtualDisk) getEnvSettings(vi *v1alpha2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -424,20 +424,20 @@ func (ds ObjectRefVirtualDisk) getEnvSettings(vi *virtv2.VirtualImage, sup suppl return &settings } -func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDisk { - return fmt.Errorf("not a %s data source", virtv2.VirtualImageObjectRefKindVirtualDisk) +func (ds ObjectRefVirtualDisk) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDisk { + return fmt.Errorf("not a %s data source", v1alpha2.VirtualImageObjectRefKindVirtualDisk) } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, ds.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, ds.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } - if vd == nil || vd.Status.Phase != virtv2.DiskReady { + if vd == nil || vd.Status.Phase != v1alpha2.DiskReady { return NewVirtualDiskNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { inUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, vd.Status.Conditions) if inUseCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(inUseCondition, vd) { return NewVirtualDiskNotReadyForUseError(vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go index 5519e6060e..adf931a806 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -67,8 +67,8 @@ func NewObjectRefVirtualDiskSnapshotCR( } } -func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -87,7 +87,7 @@ func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.Vi return reconcile.Result{}, fmt.Errorf("fetch pod: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualImage]( + return steptaker.NewStepTakers[*v1alpha2.VirtualImage]( step.NewReadyContainerRegistryStep(pod, ds.importer, ds.diskService, ds.stat, ds.recorder, cb), step.NewTerminatingStep(pvc), step.NewCreatePersistentVolumeClaimStep(pvc, ds.recorder, ds.client, cb), @@ -96,21 +96,21 @@ func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *virtv2.Vi ).Run(ctx, vi) } -func (ds ObjectRefVirtualDiskSnapshotCR) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefVirtualDiskSnapshotCR) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { return validateVirtualDiskSnapshot(ctx, vi, ds.client) } -func validateVirtualDiskSnapshot(ctx context.Context, vi *virtv2.VirtualImage, client client.Client) error { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func validateVirtualDiskSnapshot(ctx context.Context, vi *v1alpha2.VirtualImage, client client.Client) error { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return errors.New("object ref missed for data source") } - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return fmt.Errorf("fetch virtual disk snapshot: %w", err) } - if vdSnapshot == nil || vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot == nil || vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady { return NewVirtualDiskSnapshotNotReadyError(vi.Spec.DataSource.ObjectRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go index fc489b612d..0e406f0088 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -56,10 +56,10 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage + vi *v1alpha2.VirtualImage vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim pod *corev1.Pod settings *dvcr.Settings @@ -73,7 +73,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -94,8 +94,8 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { CheckPodFunc: func(_ *corev1.Pod) error { return nil }, - GetSizeFunc: func(_ *corev1.Pod) virtv2.ImageStatusSize { - return virtv2.ImageStatusSize{} + GetSizeFunc: func(_ *corev1.Pod) v1alpha2.ImageStatusSize { + return v1alpha2.ImageStatusSize{} }, GetCDROMFunc: func(_ *corev1.Pod) bool { return false @@ -131,29 +131,29 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualImageSpec{ - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualImageSpec{ + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, @@ -194,7 +194,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { return nil } - vi.Status = virtv2.VirtualImageStatus{} + vi.Status = v1alpha2.VirtualImageStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { @@ -219,7 +219,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) Expect(vi.Status.SourceUID).ToNot(BeNil()) Expect(*vi.Status.SourceUID).ToNot(BeEmpty()) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) Expect(vi.Status.Target.PersistentVolumeClaim).To(BeEmpty()) }) }) @@ -237,7 +237,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) It("waits for the Pod to be Running", func() { @@ -251,7 +251,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) It("waits for the Pod to be Succeeded", func() { @@ -265,7 +265,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.RequeueAfter).ToNot(BeZero()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) }) }) @@ -281,7 +281,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) It("does not have Pod", func() { @@ -302,12 +302,12 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) }) }) -func ExpectCondition(vi *virtv2.VirtualImage, status metav1.ConditionStatus, reason vicondition.ReadyReason, msgExists bool) { +func ExpectCondition(vi *v1alpha2.VirtualImage, status metav1.ConditionStatus, reason vicondition.ReadyReason, msgExists bool) { ready, _ := conditions.GetCondition(vicondition.Ready, vi.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go index db866dccfb..189cffa1c8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -64,8 +64,8 @@ func NewObjectRefVirtualDiskSnapshotPVC( } } -func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { - if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { +func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { + if vi.Spec.DataSource.ObjectRef == nil || vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return reconcile.Result{}, errors.New("object ref missed for data source") } @@ -79,7 +79,7 @@ func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.V return reconcile.Result{}, fmt.Errorf("fetch pvc: %w", err) } - return steptaker.NewStepTakers[*virtv2.VirtualImage]( + return steptaker.NewStepTakers[*v1alpha2.VirtualImage]( step.NewReadyPersistentVolumeClaimStep(pvc, ds.bounder, ds.recorder, cb), step.NewTerminatingStep(pvc), step.NewCreatePersistentVolumeClaimStep(pvc, ds.recorder, ds.client, cb), @@ -88,6 +88,6 @@ func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *virtv2.V ).Run(ctx, vi) } -func (ds ObjectRefVirtualDiskSnapshotPVC) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds ObjectRefVirtualDiskSnapshotPVC) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { return validateVirtualDiskSnapshot(ctx, vi, ds.client) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go index f7f22bf297..a28aa9aabd 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go @@ -39,7 +39,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -47,10 +47,10 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() var ( ctx context.Context scheme *runtime.Scheme - vi *virtv2.VirtualImage + vi *v1alpha2.VirtualImage vs *vsv1.VolumeSnapshot sc *storagev1.StorageClass - vdSnapshot *virtv2.VirtualDiskSnapshot + vdSnapshot *v1alpha2.VirtualDiskSnapshot pvc *corev1.PersistentVolumeClaim settings *dvcr.Settings recorder eventrecord.EventRecorderLogger @@ -63,7 +63,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() ctx = logger.ToContext(context.TODO(), slog.Default()) scheme = runtime.NewScheme() - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) Expect(vsv1.AddToScheme(scheme)).To(Succeed()) Expect(storagev1.AddToScheme(scheme)).To(Succeed()) @@ -89,8 +89,8 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() CheckPodFunc: func(_ *corev1.Pod) error { return nil }, - GetSizeFunc: func(_ *corev1.Pod) virtv2.ImageStatusSize { - return virtv2.ImageStatusSize{} + GetSizeFunc: func(_ *corev1.Pod) v1alpha2.ImageStatusSize { + return v1alpha2.ImageStatusSize{} }, GetCDROMFunc: func(_ *corev1.Pod) bool { return false @@ -119,30 +119,30 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vd-snapshot", UID: "11111111-1111-1111-1111-111111111111", }, - Spec: virtv2.VirtualDiskSnapshotSpec{}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Spec: v1alpha2.VirtualDiskSnapshotSpec{}, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, VolumeSnapshotName: vs.Name, }, } - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: "vi", Generation: 1, UID: "22222222-2222-2222-2222-222222222222", }, - Spec: virtv2.VirtualImageSpec{ - Storage: virtv2.StoragePersistentVolumeClaim, - DataSource: virtv2.VirtualImageDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualImageObjectRef{ - Kind: virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualImageSpec{ + Storage: v1alpha2.StoragePersistentVolumeClaim, + DataSource: v1alpha2.VirtualImageDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualImageObjectRef{ + Kind: v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, @@ -168,7 +168,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() It("must create PVC", func() { var pvcCreated bool - vi.Status = virtv2.VirtualImageStatus{} + vi.Status = v1alpha2.VirtualImageStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { @@ -192,7 +192,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() ExpectCondition(vi, metav1.ConditionFalse, vicondition.Provisioning, true) Expect(vi.Status.SourceUID).ToNot(BeNil()) Expect(*vi.Status.SourceUID).ToNot(BeEmpty()) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageProvisioning)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageProvisioning)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) @@ -209,7 +209,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionTrue, vicondition.Ready, false) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageReady)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageReady)) }) }) @@ -232,7 +232,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Lost, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageLost)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageLost)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) @@ -248,7 +248,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() Expect(res.IsZero()).To(BeTrue()) ExpectCondition(vi, metav1.ConditionFalse, vicondition.Lost, true) - Expect(vi.Status.Phase).To(Equal(virtv2.ImageLost)) + Expect(vi.Status.Phase).To(Equal(v1alpha2.ImageLost)) Expect(vi.Status.Target.PersistentVolumeClaim).NotTo(BeEmpty()) }) }) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go index 1b38e4ddd8..478109ad31 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -74,7 +74,7 @@ func NewObjectRefDataVirtualImageOnPVC( } } -func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "objectref") supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -93,7 +93,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -102,7 +102,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -118,14 +118,14 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -137,11 +137,11 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -157,7 +157,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = viRef.Status.Size vi.Status.CDROM = viRef.Status.CDROM vi.Status.Format = viRef.Status.Format @@ -181,7 +181,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -191,7 +191,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viRef *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viRef *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, objectRefDataSource) supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) @@ -236,7 +236,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) @@ -272,7 +272,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -280,14 +280,14 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -295,7 +295,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -306,11 +306,11 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -342,7 +342,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -358,7 +358,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) CleanUp(ctx context.Context, vi *virtv2 return importerRequeue || diskRequeue, nil } -func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { +func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *v1alpha2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -371,7 +371,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) getEnvSettings(vi *virtv2.VirtualImage, return &settings } -func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) @@ -391,7 +391,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) CleanUpSupplements(ctx context.Context, } } -func (ds ObjectRefDataVirtualImageOnPVC) getPVCSize(refSize virtv2.ImageStatusSize) (resource.Quantity, error) { +func (ds ObjectRefDataVirtualImageOnPVC) getPVCSize(refSize v1alpha2.ImageStatusSize) (resource.Quantity, error) { unpackedSize, err := resource.ParseQuantity(refSize.UnpackedBytes) if err != nil { return resource.Quantity{}, fmt.Errorf("failed to parse unpacked bytes %s: %w", refSize.UnpackedBytes, err) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go index f990637bc2..87a63b9c2e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go @@ -43,7 +43,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -76,7 +76,7 @@ func NewRegistryDataSource( } } -func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, registryDataSource) condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -141,14 +141,14 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -163,7 +163,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{}, setPhaseConditionFromPodError(cb, vi, err) } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -180,11 +180,11 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -221,7 +221,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -229,14 +229,14 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -244,7 +244,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -253,7 +253,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI case ds.diskService.IsImportDone(dv, pvc): log.Info("Import has completed", "dvProgress", dv.Status.Progress, "dvPhase", dv.Status.Phase, "pvcPhase", pvc.Status.Phase) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -286,7 +286,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualI return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "registry") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -308,7 +308,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.importerService.Unprotect(ctx, pod) if err != nil { @@ -317,7 +317,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual return CleanUpSupplements(ctx, vi, ds) case object.IsTerminating(pod): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil: @@ -329,14 +329,14 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -348,11 +348,11 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -366,7 +366,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Registry DataSource import has completed", ) @@ -375,7 +375,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -394,7 +394,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = "0%" vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) @@ -404,7 +404,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUp(ctx, supgen) @@ -420,7 +420,7 @@ func (ds RegistryDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImag return importerRequeue || diskRequeue, nil } -func (ds RegistryDataSource) Validate(ctx context.Context, vi *virtv2.VirtualImage) error { +func (ds RegistryDataSource) Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error { if vi.Spec.DataSource.ContainerImage.ImagePullSecret.Name != "" { secretName := types.NamespacedName{ Namespace: vi.GetNamespace(), @@ -439,7 +439,7 @@ func (ds RegistryDataSource) Validate(ctx context.Context, vi *virtv2.VirtualIma return nil } -func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *importer.Settings { +func (ds RegistryDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen supplements.Generator) *importer.Settings { var settings importer.Settings containerImage := &datasource.ContainerRegistry{ @@ -461,7 +461,7 @@ func (ds RegistryDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supp return &settings } -func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds RegistryDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.importerService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go index b3a9a3de0a..8075929648 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go @@ -31,41 +31,41 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) type Handler interface { - StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) - StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) - CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) - Validate(ctx context.Context, vi *virtv2.VirtualImage) error + StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) + StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) + Validate(ctx context.Context, vi *v1alpha2.VirtualImage) error } type Sources struct { - sources map[virtv2.DataSourceType]Handler + sources map[v1alpha2.DataSourceType]Handler } func NewSources() *Sources { return &Sources{ - sources: make(map[virtv2.DataSourceType]Handler), + sources: make(map[v1alpha2.DataSourceType]Handler), } } -func (s Sources) Set(dsType virtv2.DataSourceType, h Handler) { +func (s Sources) Set(dsType v1alpha2.DataSourceType, h Handler) { s.sources[dsType] = h } -func (s Sources) For(dsType virtv2.DataSourceType) (Handler, bool) { +func (s Sources) For(dsType v1alpha2.DataSourceType) (Handler, bool) { source, ok := s.sources[dsType] return source, ok } -func (s Sources) Changed(_ context.Context, vi *virtv2.VirtualImage) bool { +func (s Sources) Changed(_ context.Context, vi *v1alpha2.VirtualImage) bool { return vi.Generation != vi.Status.ObservedGeneration } -func (s Sources) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (s Sources) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { var requeue bool for _, source := range s.sources { @@ -81,11 +81,11 @@ func (s Sources) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, er } type Cleaner interface { - CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) - CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) + CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) } -func CleanUp(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (bool, error) { +func CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage, c Cleaner) (bool, error) { if object.ShouldCleanupSubResources(vi) { return c.CleanUp(ctx, vi) } @@ -93,7 +93,7 @@ func CleanUp(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (bool, err return false, nil } -func CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage, c Cleaner) (reconcile.Result, error) { +func CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage, c Cleaner) (reconcile.Result, error) { if object.ShouldCleanupSubResources(vi) { return c.CleanUpSupplements(ctx, vi) } @@ -112,18 +112,18 @@ type CheckImportProcess interface { func setPhaseConditionForFinishedImage( pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, - phase *virtv2.ImagePhase, + phase *v1alpha2.ImagePhase, supgen supplements.Generator, ) { switch { case pvc == nil: - *phase = virtv2.ImageLost + *phase = v1alpha2.ImageLost cb. Status(metav1.ConditionFalse). Reason(vicondition.Lost). Message(fmt.Sprintf("PVC %s not found.", supgen.PersistentVolumeClaim().String())) default: - *phase = virtv2.ImageReady + *phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -131,8 +131,8 @@ func setPhaseConditionForFinishedImage( } } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -142,7 +142,7 @@ func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.Im func setPhaseConditionForPVCProvisioningImage( ctx context.Context, dv *cdiv1.DataVolume, - vi *virtv2.VirtualImage, + vi *v1alpha2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder, checker CheckImportProcess, @@ -151,7 +151,7 @@ func setPhaseConditionForPVCProvisioningImage( switch { case err == nil: if dv == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -159,21 +159,21 @@ func setPhaseConditionForPVCProvisioningImage( return nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to PVC.") return nil case errors.Is(err, service.ErrDataVolumeNotRunning): - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error())) return nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -184,8 +184,8 @@ func setPhaseConditionForPVCProvisioningImage( } } -func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *virtv2.VirtualImage, err error) error { - vi.Status.Phase = virtv2.ImageFailed +func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *v1alpha2.VirtualImage, err error) error { + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): @@ -205,19 +205,19 @@ func setPhaseConditionFromPodError(cb *conditions.ConditionBuilder, vi *virtv2.V } } -func setPhaseConditionFromStorageError(err error, vi *virtv2.VirtualImage, cb *conditions.ConditionBuilder) (bool, error) { +func setPhaseConditionFromStorageError(err error, vi *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) (bool, error) { switch { case err == nil: return false, nil case errors.Is(err, service.ErrStorageProfileNotFound): - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message("StorageProfile not found in the cluster: Please check a StorageClass name in the cluster or set a default StorageClass.") return true, nil case errors.Is(err, service.ErrDefaultStorageClassNotFound): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -230,8 +230,8 @@ func setPhaseConditionFromStorageError(err error, vi *virtv2.VirtualImage, cb *c const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) reconcile.Result { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go index 1d39927859..cd836499ff 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_bounder_pod_step.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -66,7 +66,7 @@ func NewCreateBounderPodStep( } } -func (s CreateBounderPodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreateBounderPodStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } @@ -89,7 +89,7 @@ func (s CreateBounderPodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) case err == nil: // OK. case common.ErrQuotaExceeded(err): - s.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + s.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(s.cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(s.cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go index 66a7a88272..9b28b8558e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pod_step.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,7 +45,7 @@ type CreatePodStepImporter interface { } type CreatePodStepStat interface { - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string GetFormat(pod *corev1.Pod) string GetCDROM(pod *corev1.Pod) bool @@ -78,7 +78,7 @@ func NewCreatePodStep( } } -func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreatePodStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pod != nil { return nil, nil } @@ -95,7 +95,7 @@ func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reco case err == nil: // OK. case common.ErrQuotaExceeded(err): - s.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + s.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(s.cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(s.cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) @@ -111,7 +111,7 @@ func (s CreatePodStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reco return nil, nil } -func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.Generator) *importer.Settings { +func (s CreatePodStep) getEnvSettings(vi *v1alpha2.VirtualImage, sup supplements.Generator) *importer.Settings { var settings importer.Settings importer.ApplyBlockDeviceSourceSettings(&settings) importer.ApplyDVCRDestinationSettings( @@ -126,8 +126,8 @@ func (s CreatePodStep) getEnvSettings(vi *virtv2.VirtualImage, sup supplements.G const retryPeriod = 1 -func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error, creationTimestamp metav1.Time) *reconcile.Result { - *phase = virtv2.ImageFailed +func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error, creationTimestamp metav1.Time) *reconcile.Result { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed) @@ -141,8 +141,8 @@ func setQuotaExceededPhaseCondition(cb *conditions.ConditionBuilder, phase *virt return &reconcile.Result{RequeueAfter: retryPeriod * time.Minute} } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.ImagePhase, err error) { - *phase = virtv2.ImageFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.ImagePhase, err error) { + *phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go index 92288f24b9..02d4008806 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/create_pvc_step.go @@ -23,6 +23,7 @@ import ( vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -38,7 +39,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -63,7 +64,7 @@ func NewCreatePersistentVolumeClaimStep( } } -func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc != nil { return nil, nil } @@ -71,17 +72,17 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The ObjectRef DataSource import has started", ) - vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, s.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, types.NamespacedName{Name: vi.Spec.DataSource.ObjectRef.Name, Namespace: vi.Namespace}, s.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("fetch virtual disk snapshot: %w", err) } if vdSnapshot == nil { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -94,8 +95,8 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi return nil, fmt.Errorf("fetch volume snapshot: %w", err) } - if vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { - vi.Status.Phase = virtv2.ImagePending + if vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady || vs == nil || vs.Status == nil || vs.Status.ReadyToUse == nil || !*vs.Status.ReadyToUse { + vi.Status.Phase = v1alpha2.ImagePending s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -103,6 +104,21 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi return &reconcile.Result{}, nil } + if err := s.validateStorageClassCompatibility(ctx, vi, vdSnapshot, vs); err != nil { + vi.Status.Phase = v1alpha2.ImageFailed + s.cb. + Status(metav1.ConditionFalse). + Reason(vicondition.ProvisioningFailed). + Message(err.Error()) + s.recorder.Event( + vi, + corev1.EventTypeWarning, + v1alpha2.ReasonDataSourceSyncFailed, + err.Error(), + ) + return &reconcile.Result{}, nil + } + pvc := s.buildPVC(vi, vs) err = s.client.Create(ctx, pvc) @@ -113,7 +129,7 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi log, _ := logger.GetDataSourceContext(ctx, "objectref") log.With("pvc.name", pvc.Name).Debug("The underlying PVC has just been created.") - if vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim || vi.Spec.Storage == virtv2.StorageKubernetes { + if vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim || vi.Spec.Storage == v1alpha2.StorageKubernetes { vi.Status.Target.PersistentVolumeClaim = pvc.Name } @@ -123,10 +139,15 @@ func (s CreatePersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vi return nil, nil } -func (s CreatePersistentVolumeClaimStep) buildPVC(vi *virtv2.VirtualImage, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { - storageClassName := vs.Annotations[annotations.AnnStorageClassName] - if storageClassName == "" { - storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] +func (s CreatePersistentVolumeClaimStep) buildPVC(vi *v1alpha2.VirtualImage, vs *vsv1.VolumeSnapshot) *corev1.PersistentVolumeClaim { + var storageClassName string + if vi.Spec.PersistentVolumeClaim.StorageClass != nil && *vi.Spec.PersistentVolumeClaim.StorageClass != "" { + storageClassName = *vi.Spec.PersistentVolumeClaim.StorageClass + } else { + storageClassName = vs.Annotations[annotations.AnnStorageClassName] + if storageClassName == "" { + storageClassName = vs.Annotations[annotations.AnnStorageClassNameDeprecated] + } } volumeMode := vs.Annotations[annotations.AnnVolumeMode] if volumeMode == "" { @@ -179,9 +200,58 @@ func (s CreatePersistentVolumeClaimStep) buildPVC(vi *virtv2.VirtualImage, vs *v service.MakeOwnerReference(vi), }, Finalizers: []string{ - virtv2.FinalizerVIProtection, + v1alpha2.FinalizerVIProtection, }, }, Spec: spec, } } + +func (s CreatePersistentVolumeClaimStep) validateStorageClassCompatibility(ctx context.Context, vi *v1alpha2.VirtualImage, vdSnapshot *v1alpha2.VirtualDiskSnapshot, vs *vsv1.VolumeSnapshot) error { + if vi.Spec.PersistentVolumeClaim.StorageClass == nil || *vi.Spec.PersistentVolumeClaim.StorageClass == "" { + return nil + } + + targetSCName := *vi.Spec.PersistentVolumeClaim.StorageClass + + var targetSC storagev1.StorageClass + err := s.client.Get(ctx, types.NamespacedName{Name: targetSCName}, &targetSC) + if err != nil { + return fmt.Errorf("cannot fetch target storage class %q: %w", targetSCName, err) + } + + log, _ := logger.GetDataSourceContext(ctx, "objectref") + if vs.Spec.Source.PersistentVolumeClaimName == nil || *vs.Spec.Source.PersistentVolumeClaimName == "" { + log.With("volumeSnapshot.name", vs.Name).Debug("Cannot determine original PVC from VolumeSnapshot, skipping storage class compatibility validation") + return nil + } + + pvcName := *vs.Spec.Source.PersistentVolumeClaimName + + var originalPVC corev1.PersistentVolumeClaim + err = s.client.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: vdSnapshot.Namespace}, &originalPVC) + if err != nil { + return fmt.Errorf("cannot fetch original PVC %q: %w", pvcName, err) + } + + originalProvisioner := originalPVC.Annotations[annotations.AnnStorageProvisioner] + if originalProvisioner == "" { + originalProvisioner = originalPVC.Annotations[annotations.AnnStorageProvisionerDeprecated] + } + + if originalProvisioner == "" { + log.With("pvc.name", pvcName).Debug("Cannot determine original provisioner from PVC annotations, skipping storage class compatibility validation") + return nil + } + + if targetSC.Provisioner != originalProvisioner { + return fmt.Errorf( + "cannot restore snapshot to storage class %q: incompatible storage providers. "+ + "Original snapshot was created by %q, target storage class uses %q. "+ + "Cross-provider snapshot restore is not supported", + targetSCName, originalProvisioner, targetSC.Provisioner, + ) + } + + return nil +} diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go index 204a4e7a49..e1a1ed8bd4 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_cr_step.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,7 +45,7 @@ type ReadyContainerRegistryStepImporter interface { } type ReadyContainerRegistryStepStat interface { - GetSize(pod *corev1.Pod) virtv2.ImageStatusSize + GetSize(pod *corev1.Pod) v1alpha2.ImageStatusSize GetDVCRImageName(pod *corev1.Pod) string GetFormat(pod *corev1.Pod) string CheckPod(pod *corev1.Pod) error @@ -79,14 +79,14 @@ func NewReadyContainerRegistryStep( } } -func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, "objectref") ready, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) if ready.Status == metav1.ConditionTrue { log.Debug("Image is Ready") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady s.cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -101,7 +101,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual err := s.stat.CheckPod(s.pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): @@ -127,7 +127,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) @@ -136,7 +136,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = s.stat.GetSize(s.pod) vi.Status.CDROM = s.stat.GetCDROM(s.pod) vi.Status.Format = s.stat.GetFormat(s.pod) @@ -146,7 +146,7 @@ func (s ReadyContainerRegistryStep) Take(ctx context.Context, vi *virtv2.Virtual return &reconcile.Result{}, nil } -func (s ReadyContainerRegistryStep) cleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) error { +func (s ReadyContainerRegistryStep) cleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) error { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) _, err := s.importer.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go index de3dc78248..b66ade793b 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/ready_pvc_step.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -61,7 +61,7 @@ func NewReadyPersistentVolumeClaimStep( } } -func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { log, _ := logger.GetDataSourceContext(ctx, "objectref") if s.pvc == nil { @@ -69,7 +69,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir if ready.Status == metav1.ConditionTrue { log.Debug("PVC is lost", ".status.target.pvc", vi.Status.Target.PersistentVolumeClaim) - vi.Status.Phase = virtv2.ImageLost + vi.Status.Phase = v1alpha2.ImageLost s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Lost). @@ -86,7 +86,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir case corev1.ClaimLost: log.Warn("Image is Lost: underlying PVC is Lost") - vi.Status.Phase = virtv2.ImageLost + vi.Status.Phase = v1alpha2.ImageLost s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Lost). @@ -101,11 +101,11 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir return nil, fmt.Errorf("clean up supplements: %w", err) } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { s.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The ObjectRef DataSource import has completed", ) } @@ -115,7 +115,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir Reason(vdcondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Progress = "100%" res := s.pvc.Status.Capacity[corev1.ResourceStorage] @@ -125,7 +125,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir return nil, errors.New("failed to convert quantity to int64") } - vi.Status.Size = virtv2.ImageStatusSize{ + vi.Status.Size = v1alpha2.ImageStatusSize{ Stored: res.String(), StoredBytes: strconv.FormatInt(intQ, 10), Unpacked: res.String(), @@ -138,7 +138,7 @@ func (s ReadyPersistentVolumeClaimStep) Take(ctx context.Context, vi *virtv2.Vir } } -func (s ReadyPersistentVolumeClaimStep) cleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) error { +func (s ReadyPersistentVolumeClaimStep) cleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) error { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) _, err := s.bounder.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go index 6d4efbb907..91caec0dbb 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/terminating_step.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type TerminatingStep struct { @@ -37,7 +37,7 @@ func NewTerminatingStep(pvc *corev1.PersistentVolumeClaim) *TerminatingStep { } } -func (s TerminatingStep) Take(ctx context.Context, _ *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s TerminatingStep) Take(ctx context.Context, _ *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go index 432d2a2ff6..bc8ee88471 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pod_step.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -60,9 +60,9 @@ func NewWaitForPodStep( } } -func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s WaitForPodStep) Take(_ context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pod == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -76,7 +76,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon switch { case errors.Is(err, service.ErrNotInitialized), errors.Is(err, service.ErrNotScheduled): if strings.Contains(err.Error(), "pod has unbound immediate PersistentVolumeClaims") { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -85,21 +85,21 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon return &reconcile.Result{Requeue: true}, nil } - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). Message(service.CapitalizeFirstLetter(err.Error() + ".")) return &reconcile.Result{}, nil case errors.Is(err, service.ErrProvisioningFailed): - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). Message(service.CapitalizeFirstLetter(err.Error() + ".")) return &reconcile.Result{}, nil default: - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed s.cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -114,7 +114,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon Reason(vicondition.Provisioning). Message("Preparing to start import to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Target.RegistryURL = s.stat.GetDVCRImageName(s.pod) return &reconcile.Result{}, nil @@ -125,7 +125,7 @@ func (s WaitForPodStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = s.stat.GetProgress(vi.GetUID(), s.pod, vi.Status.Progress) vi.Status.Target.RegistryURL = s.stat.GetDVCRImageName(s.pod) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go index 83702504a4..5dc2e51e66 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/step/wait_for_pvc_step.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -45,9 +45,9 @@ func NewWaitForPVCStep( } } -func (s WaitForPVCStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*reconcile.Result, error) { +func (s WaitForPVCStep) Take(_ context.Context, vi *v1alpha2.VirtualImage) (*reconcile.Result, error) { if s.pvc == nil { - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -60,7 +60,7 @@ func (s WaitForPVCStep) Take(_ context.Context, vi *virtv2.VirtualImage) (*recon return nil, nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning s.cb. Status(metav1.ConditionFalse). Reason(vdcondition.Provisioning). diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go index ced8c74ab8..e22425a219 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -71,7 +71,7 @@ func NewUploadDataSource( } } -func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, uploadDataSource) condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -137,7 +137,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to DVCR has started", ) @@ -149,14 +149,14 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -175,20 +175,20 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma if ds.statService.IsUploaderReady(pod, svc, ing) { log.Info("Waiting for the user upload", "pod.phase", pod.Status.Phase) - vi.Status.Phase = virtv2.ImageWaitForUserUpload + vi.Status.Phase = v1alpha2.ImageWaitForUserUpload cb. Status(metav1.ConditionFalse). Reason(vicondition.WaitForUserUpload). Message("Waiting for the user upload.") - vi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } } else { log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningNotStarted). @@ -198,7 +198,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -215,17 +215,17 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncStarted, + v1alpha2.ReasonDataSourceSyncStarted, "The Upload DataSource import to PVC has started", ) err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceDiskProvisioningFailed, "Disk provisioning failed") cb. Status(metav1.ConditionFalse). Reason(vicondition.ProvisioningFailed). @@ -263,7 +263,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -271,14 +271,14 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil case dvQuotaNotExceededCondition != nil && dvQuotaNotExceededCondition.Status == corev1.ConditionFalse: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.QuotaExceeded). Message(dvQuotaNotExceededCondition.Message) return reconcile.Result{}, nil case dvRunningCondition != nil && dvRunningCondition.Status != corev1.ConditionTrue && dvRunningCondition.Reason == DVImagePullFailedReason: - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending cb. Status(metav1.ConditionFalse). Reason(vicondition.ImagePullFailed). @@ -286,7 +286,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event(vi, corev1.EventTypeWarning, vicondition.ImagePullFailed.String(), dvRunningCondition.Message) return reconcile.Result{}, nil case pvc == nil: - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -297,11 +297,11 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma ds.recorder.Event( vi, corev1.EventTypeNormal, - virtv2.ReasonDataSourceSyncCompleted, + v1alpha2.ReasonDataSourceSyncCompleted, "The Upload DataSource import has completed", ) - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady cb. Status(metav1.ConditionTrue). Reason(vicondition.Ready). @@ -336,7 +336,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *virtv2.VirtualIma return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { log, ctx := logger.GetDataSourceContext(ctx, "upload") condition, _ := conditions.GetCondition(vicondition.ReadyType, vi.Status.Conditions) @@ -366,7 +366,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) if err != nil { @@ -375,7 +375,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm return CleanUpSupplements(ctx, vi, ds) case object.AnyTerminating(pod, svc, ing): - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Cleaning up...") case pod == nil || svc == nil || ing == nil: @@ -385,14 +385,14 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm case err == nil: // OK. case common.ErrQuotaExceeded(err): - ds.recorder.Event(vi, corev1.EventTypeWarning, virtv2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") + ds.recorder.Event(vi, corev1.EventTypeWarning, v1alpha2.ReasonDataSourceQuotaExceeded, "DataSource quota exceed") return setQuotaExceededPhaseCondition(cb, &vi.Status.Phase, err, vi.CreationTimestamp), nil default: setPhaseConditionToFailed(cb, &vi.Status.Phase, fmt.Errorf("unexpected error: %w", err)) return reconcile.Result{}, err } - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning cb. Status(metav1.ConditionFalse). Reason(vicondition.Provisioning). @@ -404,7 +404,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm case podutil.IsPodComplete(pod): err = ds.statService.CheckPod(pod) if err != nil { - vi.Status.Phase = virtv2.ImageFailed + vi.Status.Phase = v1alpha2.ImageFailed switch { case errors.Is(err, service.ErrProvisioningFailed): @@ -423,7 +423,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Ready). Message("") - vi.Status.Phase = virtv2.ImageReady + vi.Status.Phase = v1alpha2.ImageReady vi.Status.Size = ds.statService.GetSize(pod) vi.Status.CDROM = ds.statService.GetCDROM(pod) vi.Status.Format = ds.statService.GetFormat(pod) @@ -443,7 +443,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.Provisioning). Message("Import is in the process of provisioning to DVCR.") - vi.Status.Phase = virtv2.ImageProvisioning + vi.Status.Phase = v1alpha2.ImageProvisioning vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress) vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) @@ -460,9 +460,9 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.WaitForUserUpload). Message("Waiting for the user upload.") - vi.Status.Phase = virtv2.ImageWaitForUserUpload + vi.Status.Phase = v1alpha2.ImageWaitForUserUpload vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) - vi.Status.ImageUploadURLs = &virtv2.ImageUploadURLs{ + vi.Status.ImageUploadURLs = &v1alpha2.ImageUploadURLs{ External: ds.uploaderService.GetExternalURL(ctx, ing), InCluster: ds.uploaderService.GetInClusterURL(ctx, svc), } @@ -474,7 +474,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm Reason(vicondition.ProvisioningNotStarted). Message(fmt.Sprintf("Waiting for the uploader %q to be ready to process the user's upload.", pod.Name)) - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending log.Info("Waiting for the uploader to be ready to process the user's upload", "pod.phase", pod.Status.Phase) } @@ -482,7 +482,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *virtv2.VirtualIm return reconcile.Result{RequeueAfter: time.Second}, nil } -func (ds UploadDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) (bool, error) { +func (ds UploadDataSource) CleanUp(ctx context.Context, vi *v1alpha2.VirtualImage) (bool, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) importerRequeue, err := ds.uploaderService.CleanUp(ctx, supgen) @@ -498,11 +498,11 @@ func (ds UploadDataSource) CleanUp(ctx context.Context, vi *virtv2.VirtualImage) return importerRequeue || diskRequeue, nil } -func (ds UploadDataSource) Validate(_ context.Context, _ *virtv2.VirtualImage) error { +func (ds UploadDataSource) Validate(_ context.Context, _ *v1alpha2.VirtualImage) error { return nil } -func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supplements.Generator) *uploader.Settings { +func (ds UploadDataSource) getEnvSettings(vi *v1alpha2.VirtualImage, supgen supplements.Generator) *uploader.Settings { var settings uploader.Settings uploader.ApplyDVCRDestinationSettings( @@ -515,7 +515,7 @@ func (ds UploadDataSource) getEnvSettings(vi *virtv2.VirtualImage, supgen supple return &settings } -func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (ds UploadDataSource) CleanUpSupplements(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { supgen := supplements.NewGenerator(annotations.VIShortName, vi.Name, vi.Namespace, vi.UID) uploaderRequeue, err := ds.uploaderService.CleanUpSupplements(ctx, supgen) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go index 471d4da4e6..995105d539 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -51,7 +51,7 @@ func NewStorageClassReadyHandler(recorder eventrecord.EventRecorderLogger, svc S } } -func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) { +func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vicondition.StorageClassReadyType).Generation(vi.Generation) if vi.DeletionTimestamp != nil { @@ -59,7 +59,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, nil } - if vi.Spec.Storage == virtv2.StorageContainerRegistry { + if vi.Spec.Storage == v1alpha2.StorageContainerRegistry { conditions.RemoveCondition(cb.GetType(), &vi.Status.Conditions) return reconcile.Result{}, nil } @@ -137,7 +137,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual h.recorder.Event( vi, corev1.EventTypeWarning, - virtv2.ReasonVIStorageClassNotFound, + v1alpha2.ReasonVIStorageClassNotFound, msg, ) cb. @@ -149,7 +149,7 @@ func (h StorageClassReadyHandler) Handle(ctx context.Context, vi *virtv2.Virtual return reconcile.Result{}, nil } -func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *virtv2.VirtualImage, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *v1alpha2.VirtualImage, cb *conditions.ConditionBuilder) error { vi.Status.StorageClassName = *vi.Spec.PersistentVolumeClaim.StorageClass sc, err := h.svc.GetStorageClass(ctx, *vi.Spec.PersistentVolumeClaim.StorageClass) @@ -218,7 +218,7 @@ func (h StorageClassReadyHandler) setFromSpec(ctx context.Context, vi *virtv2.Vi return nil } -func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *virtv2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { +func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *v1alpha2.VirtualImage, pvc *corev1.PersistentVolumeClaim, cb *conditions.ConditionBuilder) error { if pvc.Spec.StorageClassName == nil || *pvc.Spec.StorageClassName == "" { return fmt.Errorf("pvc does not have storage class") } @@ -256,7 +256,7 @@ func (h StorageClassReadyHandler) setFromExistingPVC(ctx context.Context, vi *vi return nil } -func (h StorageClassReadyHandler) setFromModuleSettings(vi *virtv2.VirtualImage, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromModuleSettings(vi *v1alpha2.VirtualImage, moduleStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vi.Status.StorageClassName = moduleStorageClass.Name if h.svc.IsStorageClassDeprecated(moduleStorageClass) { @@ -285,7 +285,7 @@ func (h StorageClassReadyHandler) setFromModuleSettings(vi *virtv2.VirtualImage, } } -func (h StorageClassReadyHandler) setFromDefault(vi *virtv2.VirtualImage, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { +func (h StorageClassReadyHandler) setFromDefault(vi *v1alpha2.VirtualImage, defaultStorageClass *storagev1.StorageClass, cb *conditions.ConditionBuilder) { vi.Status.StorageClassName = defaultStorageClass.Name if h.svc.IsStorageClassDeprecated(defaultStorageClass) { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go index 1563527278..c12ad5fcfc 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/storageclass_ready_test.go @@ -33,16 +33,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) var _ = Describe("StorageClassHandler Run", func() { Describe("Check for the storage ContainerRegistry", func() { - var vi *virtv2.VirtualImage + var vi *v1alpha2.VirtualImage BeforeEach(func() { - vi = newVI(nil, virtv2.StorageContainerRegistry) + vi = newVI(nil, v1alpha2.StorageContainerRegistry) }) It("doest not have StorageClass", func() { @@ -76,7 +76,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be false because no storage class can be return", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(nil, false), - VI: newVI(nil, virtv2.StoragePersistentVolumeClaim), + VI: newVI(nil, v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionFalse, Reason: vicondition.StorageClassNotFound.String(), @@ -87,7 +87,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be true because storage class from spec found", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), false), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionTrue, Reason: vicondition.StorageClassReady.String(), @@ -98,7 +98,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be true because default storage class found", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), false), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionTrue, Reason: vicondition.StorageClassReady.String(), @@ -109,7 +109,7 @@ var _ = Describe("StorageClassHandler Run", func() { "StorageClassReady must be false because storage class is not supported", handlerTestArgs{ StorageClassServiceMock: newStorageClassServiceMock(ptr.To("sc"), true), - VI: newVI(ptr.To("sc"), virtv2.StoragePersistentVolumeClaim), + VI: newVI(ptr.To("sc"), v1alpha2.StoragePersistentVolumeClaim), ExpectedCondition: metav1.Condition{ Status: metav1.ConditionFalse, Reason: vicondition.StorageClassNotReady.String(), @@ -121,7 +121,7 @@ var _ = Describe("StorageClassHandler Run", func() { type handlerTestArgs struct { StorageClassServiceMock *StorageClassServiceMock - VI *virtv2.VirtualImage + VI *v1alpha2.VirtualImage ExpectedCondition metav1.Condition } @@ -186,15 +186,15 @@ func newStorageClassServiceMock(existedStorageClass *string, unsupportedStorageC return &storageClassServiceMock } -func newVI(specSC *string, storageType virtv2.StorageType) *virtv2.VirtualImage { - return &virtv2.VirtualImage{ - Spec: virtv2.VirtualImageSpec{ - PersistentVolumeClaim: virtv2.VirtualImagePersistentVolumeClaim{ +func newVI(specSC *string, storageType v1alpha2.StorageType) *v1alpha2.VirtualImage { + return &v1alpha2.VirtualImage{ + Spec: v1alpha2.VirtualImageSpec{ + PersistentVolumeClaim: v1alpha2.VirtualImagePersistentVolumeClaim{ StorageClass: specSC, }, Storage: storageType, }, - Status: virtv2.VirtualImageStatus{ + Status: v1alpha2.VirtualImageStatus{ StorageClassName: "", }, } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go index a1b561b9d6..07a3dfb78e 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/datavolume_watcher.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type DataVolumeWatcher struct{} @@ -44,7 +44,7 @@ func (w *DataVolumeWatcher) Watch(mgr manager.Manager, ctr controller.Controller handler.TypedEnqueueRequestForOwner[*cdiv1.DataVolume]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*cdiv1.DataVolume]{ diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go index fdde123608..f1b8f2d8e0 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pod_watcher.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PodWatcher struct { @@ -49,7 +49,7 @@ func (w PodWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error handler.TypedEnqueueRequestForOwner[*corev1.Pod]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, ), predicate.TypedFuncs[*corev1.Pod]{ DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Pod]) bool { return false }, diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go index c2ed4a7acb..b8038155c1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/pvc_watcher.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type PersistentVolumeClaimWatcher struct{} @@ -43,7 +43,7 @@ func (w *PersistentVolumeClaimWatcher) Watch(mgr manager.Manager, ctr controller handler.TypedEnqueueRequestForOwner[*corev1.PersistentVolumeClaim]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualImage{}, + &v1alpha2.VirtualImage{}, ), predicate.TypedFuncs[*corev1.PersistentVolumeClaim]{ UpdateFunc: func(e event.TypedUpdateEvent[*corev1.PersistentVolumeClaim]) bool { if e.ObjectOld.Status.Capacity[corev1.ResourceStorage] != e.ObjectNew.Status.Capacity[corev1.ResourceStorage] { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go index fb3316cf18..e92b6afa13 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/storageclass_watcher.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type StorageClassWatcher struct { @@ -47,7 +47,7 @@ type StorageClassWatcher struct { func NewStorageClassWatcher(client client.Client) *StorageClassWatcher { return &StorageClassWatcher{ client: client, - logger: slog.Default().With("watcher", strings.ToLower(virtv2.VirtualImageKind)), + logger: slog.Default().With("watcher", strings.ToLower(v1alpha2.VirtualImageKind)), } } @@ -81,7 +81,7 @@ func (w StorageClassWatcher) Watch(mgr manager.Manager, ctr controller.Controlle } func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1.StorageClass) []reconcile.Request { - var vis virtv2.VirtualImageList + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVIByStorageClass, sc.Name), }) @@ -90,12 +90,12 @@ func (w StorageClassWatcher) enqueueRequests(ctx context.Context, sc *storagev1. return []reconcile.Request{} } - viMap := make(map[string]virtv2.VirtualImage, len(vis.Items)) + viMap := make(map[string]v1alpha2.VirtualImage, len(vis.Items)) for _, vi := range vis.Items { viMap[vi.Name] = vi } - vis.Items = []virtv2.VirtualImage{} + vis.Items = []v1alpha2.VirtualImage{} isDefault, ok := sc.Annotations[annotations.AnnDefaultStorageClass] if ok && isDefault == "true" { diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go index 3a4d974b83..174b925de1 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vdsnapshot_watcher.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -44,7 +44,7 @@ type VirtualDiskSnapshotWatcher struct { func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWatcher { return &VirtualDiskSnapshotWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualDiskSnapshotKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualDiskSnapshotKind)), client: client, } } @@ -53,10 +53,10 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDiskSnapshot{}, + &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -67,8 +67,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var vis virtv2.VirtualImageList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis, &client.ListOptions{ Namespace: vdSnapshot.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVIByVDSnapshot, vdSnapshot.Name), @@ -95,12 +95,12 @@ func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnaps return } -func isSnapshotDataSource(ds virtv2.VirtualImageDataSource, vdSnapshotName string) bool { - if ds.Type != virtv2.DataSourceTypeObjectRef { +func isSnapshotDataSource(ds v1alpha2.VirtualImageDataSource, vdSnapshotName string) bool { + if ds.Type != v1alpha2.DataSourceTypeObjectRef { return false } - if ds.ObjectRef == nil || ds.ObjectRef.Kind != virtv2.VirtualImageObjectRefKindVirtualDiskSnapshot { + if ds.ObjectRef == nil || ds.ObjectRef.Kind != v1alpha2.VirtualImageObjectRefKindVirtualDiskSnapshot { return false } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go index 7d3fad3f8f..6ff61abd78 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/virdualdisk_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,10 +48,10 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequestsFromVDs), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { oldInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectOld.Status.Conditions) newInUseCondition, _ := conditions.GetCondition(vdcondition.InUseType, e.ObjectNew.Status.Conditions) @@ -69,8 +69,8 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var viList virtv2.VirtualImageList +func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var viList v1alpha2.VirtualImageList err := w.client.List(ctx, &viList, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -80,11 +80,11 @@ func (w *VirtualDiskWatcher) enqueueRequestsFromVDs(ctx context.Context, vd *vir } for _, vi := range viList.Items { - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef || vi.Spec.DataSource.ObjectRef == nil { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef || vi.Spec.DataSource.ObjectRef == nil { continue } - if vi.Spec.DataSource.ObjectRef.Kind != virtv2.VirtualDiskKind || vi.Spec.DataSource.ObjectRef.Name != vd.GetName() { + if vi.Spec.DataSource.ObjectRef.Kind != v1alpha2.VirtualDiskKind || vi.Spec.DataSource.ObjectRef.Name != vd.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go index 66a8055667..4126bc9197 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -50,16 +50,16 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.Object) }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return w.hasVirtualImageRef(e.ObjectOld) || w.hasVirtualImageRef(e.ObjectNew) }, }, @@ -70,16 +70,16 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { for _, ref := range vm.Status.BlockDeviceRefs { - if ref.Kind != virtv2.ImageDevice { + if ref.Kind != v1alpha2.ImageDevice { continue } vi, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: ref.Name, - }, w.client, &virtv2.VirtualImage{}) + }, w.client, &v1alpha2.VirtualImage{}) if err != nil { w.logger.Error("Failed to fetch vi to reconcile", logger.SlogErr(err)) continue @@ -100,15 +100,15 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) hasVirtualImageRef(vm *virtv2.VirtualMachine) bool { +func (w VirtualMachineWatcher) hasVirtualImageRef(vm *v1alpha2.VirtualMachine) bool { for _, ref := range vm.Spec.BlockDeviceRefs { - if ref.Kind == virtv2.ImageDevice { + if ref.Kind == v1alpha2.ImageDevice { return true } } for _, ref := range vm.Status.BlockDeviceRefs { - if ref.Kind == virtv2.ImageDevice { + if ref.Kind == v1alpha2.ImageDevice { return true } } diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_controller.go b/images/virtualization-artifact/pkg/controller/vi/vi_controller.go index ed69d604b9..8a37796a9f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_controller.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_controller.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" vicollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -48,7 +48,7 @@ const ( ) type Condition interface { - Handle(ctx context.Context, vi *virtv2.VirtualImage) error + Handle(ctx context.Context, vi *v1alpha2.VirtualImage) error } func NewController( @@ -63,7 +63,7 @@ func NewController( storageClassSettings config.VirtualImageStorageClassSettings, ) (controller.Controller, error) { stat := service.NewStatService(log) - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVIProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVIProtection) importer := service.NewImporterService(dvcr, mgr.GetClient(), importerImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) uploader := service.NewUploaderService(dvcr, mgr.GetClient(), uploaderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) bounder := service.NewBounderPodService(dvcr, mgr.GetClient(), bounderImage, requirements, PodPullPolicy, PodVerbose, ControllerName, protection) @@ -72,10 +72,10 @@ func NewController( recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) sources := source.NewSources() - sources.Set(virtv2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, disk)) - sources.Set(virtv2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), disk)) - sources.Set(virtv2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, bounder, dvcr, mgr.GetClient(), disk)) - sources.Set(virtv2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, disk)) + sources.Set(v1alpha2.DataSourceTypeHTTP, source.NewHTTPDataSource(recorder, stat, importer, dvcr, disk)) + sources.Set(v1alpha2.DataSourceTypeContainerImage, source.NewRegistryDataSource(recorder, stat, importer, dvcr, mgr.GetClient(), disk)) + sources.Set(v1alpha2.DataSourceTypeObjectRef, source.NewObjectRefDataSource(recorder, stat, importer, bounder, dvcr, mgr.GetClient(), disk)) + sources.Set(v1alpha2.DataSourceTypeUpload, source.NewUploadDataSource(recorder, stat, uploader, dvcr, disk)) reconciler := NewReconciler( mgr.GetClient(), @@ -102,7 +102,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualImage{}). + For(&v1alpha2.VirtualImage{}). WithValidator(NewValidator(log, mgr.GetClient(), scService)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go b/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go index 0220c5f636..ed4ae610e4 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_reconciler.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/controller/watchers" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Watcher interface { @@ -41,7 +41,7 @@ type Watcher interface { } type Handler interface { - Handle(ctx context.Context, vi *virtv2.VirtualImage) (reconcile.Result, error) + Handle(ctx context.Context, vi *v1alpha2.VirtualImage) (reconcile.Result, error) Name() string } @@ -84,10 +84,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualImage{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualImage]{}, - predicate.TypedFuncs[*virtv2.VirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualImage{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualImage]{}, + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, @@ -96,13 +96,13 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return fmt.Errorf("error setting watch on VirtualImage: %w", err) } - viFromVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.VirtualImage{}, virtv2.VirtualImageObjectRefKindVirtualImage) + viFromVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.VirtualImage{}, v1alpha2.VirtualImageObjectRefKindVirtualImage) viWatcher := watchers.NewObjectRefWatcher(watchers.NewVirtualImageFilter(), viFromVIEnqueuer) if err := viWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on VIs: %w", err) } - viFromCVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &virtv2.ClusterVirtualImage{}, virtv2.VirtualImageObjectRefKindClusterVirtualImage) + viFromCVIEnqueuer := watchers.NewVirtualImageRequestEnqueuer(mgr.GetClient(), &v1alpha2.ClusterVirtualImage{}, v1alpha2.VirtualImageObjectRefKindClusterVirtualImage) cviWatcher := watchers.NewObjectRefWatcher(watchers.NewClusterVirtualImageFilter(), viFromCVIEnqueuer) if err := cviWatcher.Run(mgr, ctr); err != nil { return fmt.Errorf("error setting watch on CVIs: %w", err) @@ -127,10 +127,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualImage { - return &virtv2.VirtualImage{} +func (r *Reconciler) factory() *v1alpha2.VirtualImage { + return &v1alpha2.VirtualImage{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualImage) virtv2.VirtualImageStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualImage) v1alpha2.VirtualImageStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go b/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go index feae28f1b5..63702c1de8 100644 --- a/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vi/vi_webhook.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/validate" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -52,7 +52,7 @@ func NewValidator(logger *log.Logger, client client.Client, scService *intsvc.Vi } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vi, ok := obj.(*virtv2.VirtualImage) + vi, ok := obj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -65,15 +65,15 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return nil, fmt.Errorf("the VirtualImage name %q is too long: it must be no more than %d characters", vi.Name, validate.MaxVirtualImageNameLen) } - if vi.Spec.Storage == virtv2.StorageKubernetes { + if vi.Spec.Storage == v1alpha2.StorageKubernetes { warnings := admission.Warnings{ fmt.Sprintf("Using the `%s` storage type is deprecated. It is recommended to use `%s` instead.", - virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim), + v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim), } return warnings, nil } - if vi.Spec.Storage == virtv2.StorageKubernetes || vi.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + if vi.Spec.Storage == v1alpha2.StorageKubernetes || vi.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { if vi.Spec.PersistentVolumeClaim.StorageClass != nil && *vi.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *vi.Spec.PersistentVolumeClaim.StorageClass) if err != nil { @@ -111,12 +111,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVI, ok := oldObj.(*virtv2.VirtualImage) + oldVI, ok := oldObj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected an old VirtualImage but got a %T", newObj) } - newVI, ok := newObj.(*virtv2.VirtualImage) + newVI, ok := newObj.(*v1alpha2.VirtualImage) if !ok { return nil, fmt.Errorf("expected a new VirtualImage but got a %T", newObj) } @@ -131,7 +131,7 @@ func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.O ready, _ := conditions.GetCondition(vicondition.ReadyType, newVI.Status.Conditions) switch { - case ready.Status == metav1.ConditionTrue, newVI.Status.Phase == virtv2.ImageReady, newVI.Status.Phase == virtv2.ImageLost: + case ready.Status == metav1.ConditionTrue, newVI.Status.Phase == v1alpha2.ImageReady, newVI.Status.Phase == v1alpha2.ImageLost: if !reflect.DeepEqual(oldVI.Spec.DataSource, newVI.Spec.DataSource) { return nil, errors.New("data source cannot be changed if the VirtualImage has already been provisioned") } @@ -139,12 +139,12 @@ func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.O if !reflect.DeepEqual(oldVI.Spec.PersistentVolumeClaim.StorageClass, newVI.Spec.PersistentVolumeClaim.StorageClass) { return nil, errors.New("storage class cannot be changed if the VirtualImage has already been provisioned") } - case newVI.Status.Phase == virtv2.ImageTerminating: + case newVI.Status.Phase == v1alpha2.ImageTerminating: if !reflect.DeepEqual(oldVI.Spec, newVI.Spec) { return nil, errors.New("spec cannot be changed if the VirtualImage is the process of termination") } - case newVI.Status.Phase == virtv2.ImagePending: - if newVI.Spec.Storage == virtv2.StorageKubernetes || newVI.Spec.Storage == virtv2.StoragePersistentVolumeClaim { + case newVI.Status.Phase == v1alpha2.ImagePending: + if newVI.Spec.Storage == v1alpha2.StorageKubernetes || newVI.Spec.Storage == v1alpha2.StoragePersistentVolumeClaim { if newVI.Spec.PersistentVolumeClaim.StorageClass != nil && *newVI.Spec.PersistentVolumeClaim.StorageClass != "" { sc, err := v.scService.GetStorageClass(ctx, *newVI.Spec.PersistentVolumeClaim.StorageClass) if err != nil { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/agent.go b/images/virtualization-artifact/pkg/controller/vm/internal/agent.go index a45e0cbdb4..d7aa3c5595 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/agent.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/agent.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -64,7 +64,7 @@ func (h *AgentHandler) Name() string { return nameAgentHandler } -func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *AgentHandler) syncAgentReady(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -73,7 +73,7 @@ func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.V defer func() { phase := vm.Status.Phase - if phase == virtv2.MachinePending || phase == virtv2.MachineStarting || phase == virtv2.MachineStopped { + if phase == v1alpha2.MachinePending || phase == v1alpha2.MachineStarting || phase == v1alpha2.MachineStopped { conditions.RemoveCondition(vmcondition.TypeAgentReady, &vm.Status.Conditions) } else { conditions.SetCondition(cb, &vm.Status.Conditions) @@ -107,7 +107,7 @@ func (h *AgentHandler) syncAgentReady(vm *virtv2.VirtualMachine, kvvmi *virtv1.V Message("Failed to connect to VM Agent.") } -func (h *AgentHandler) syncAgentVersionNotSupport(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *AgentHandler) syncAgentVersionNotSupport(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -116,7 +116,7 @@ func (h *AgentHandler) syncAgentVersionNotSupport(vm *virtv2.VirtualMachine, kvv defer func() { switch vm.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeAgentVersionNotSupported, &vm.Status.Conditions) default: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go index 0846898d62..b55de9214f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/agent_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -44,7 +44,7 @@ var _ = Describe("AgentHandler Tests", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -54,7 +54,7 @@ var _ = Describe("AgentHandler Tests", func() { vmState = nil }) - newVM := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { + newVM := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Status.Phase = phase return vm @@ -94,14 +94,14 @@ var _ = Describe("AgentHandler Tests", func() { } DescribeTable("AgentReady Condition Tests", - func(phase virtv2.MachinePhase, agentConnected bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(phase v1alpha2.MachinePhase, agentConnected bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm := newVM(phase) kvvmi := newKVVMI(agentConnected, false) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -111,34 +111,34 @@ var _ = Describe("AgentHandler Tests", func() { Expect(cond.Status).To(Equal(expectedStatus)) } }, - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineRunning, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineRunning, false, metav1.ConditionFalse, true), - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineStopping, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineStopping, false, metav1.ConditionFalse, true), - Entry("Should add AgentReady as True if agent is connected", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Should add AgentReady as False if agent is not connected", virtv2.MachineMigrating, false, metav1.ConditionFalse, true), + Entry("Should add AgentReady as True if agent is connected", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Should add AgentReady as False if agent is not connected", v1alpha2.MachineMigrating, false, metav1.ConditionFalse, true), - Entry("Should not add AgentReady if VM is in Pending phase and the agent is connected", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Pending phase and the agent is not connected", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Pending phase and the agent is connected", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Pending phase and the agent is not connected", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Starting phase and the agent is connected", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Starting phase and the agent is not connected", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Starting phase and the agent is connected", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Starting phase and the agent is not connected", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Stopped phase and the agent is connected", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Should not add AgentReady if VM is in Stopped phase and the agent is not connected", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Stopped phase and the agent is connected", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Should not add AgentReady if VM is in Stopped phase and the agent is not connected", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), ) DescribeTable("AgentVersionNotSupported Condition Tests", - func(phase virtv2.MachinePhase, agentUnsupported bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(phase v1alpha2.MachinePhase, agentUnsupported bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm := newVM(phase) vmi := newKVVMI(true, agentUnsupported) fakeClient, resource, vmState = setupEnvironment(vm, vmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -148,22 +148,22 @@ var _ = Describe("AgentHandler Tests", func() { Expect(cond.Status).To(Equal(expectedStatus)) } }, - Entry("Should set unsupported version condition as True in Running phase", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Should not set unsupported version condition as False in Running phase", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Running phase", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Should not set unsupported version condition as False in Running phase", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Should set unsupported version condition as True in Stopping phase", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Should set unsupported version condition as False in Stopping phase", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Stopping phase", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Should set unsupported version condition as False in Stopping phase", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Should set unsupported version condition as True in Migrating phase", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Should set unsupported version condition as False in Migrating phase", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Should set unsupported version condition as True in Migrating phase", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Should set unsupported version condition as False in Migrating phase", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Pending phase", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Pending phase", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Pending phase", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Pending phase", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Starting phase", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Starting phase", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Starting phase", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Starting phase", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as True in Stopped phase", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Should not set unsupported version condition as False in Stopped phase", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as True in Stopped phase", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Should not set unsupported version condition as False in Stopped phase", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go index 40b7e28eff..3adb173d1c 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_condition.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -54,7 +54,7 @@ func (h *BlockDeviceHandler) checkVirtualDisksToBeWFFC(ctx context.Context, s st } for _, vd := range vds { - if vd.Status.Phase == virtv2.DiskWaitForFirstConsumer { + if vd.Status.Phase == v1alpha2.DiskWaitForFirstConsumer { return true, nil } } @@ -86,7 +86,7 @@ const ( UsageTypeAnotherVM string = "by another VM" ) -func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds map[string]*virtv2.VirtualDisk) string { +func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds map[string]*v1alpha2.VirtualDisk) string { summaryCount := len(vds) var messages []string @@ -129,7 +129,7 @@ func (h *BlockDeviceHandler) getStatusMessage(diskState virtualDisksState, vds m return strings.Join(messages, "; ") + "." } -func (h *BlockDeviceHandler) setConditionReady(vm *virtv2.VirtualMachine) { +func (h *BlockDeviceHandler) setConditionReady(vm *v1alpha2.VirtualMachine) { conditions.SetCondition( conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). @@ -140,7 +140,7 @@ func (h *BlockDeviceHandler) setConditionReady(vm *virtv2.VirtualMachine) { ) } -func (h *BlockDeviceHandler) setConditionNotReady(vm *virtv2.VirtualMachine, message string) { +func (h *BlockDeviceHandler) setConditionNotReady(vm *v1alpha2.VirtualMachine, message string) { conditions.SetCondition( conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). @@ -151,7 +151,7 @@ func (h *BlockDeviceHandler) setConditionNotReady(vm *virtv2.VirtualMachine, mes ) } -func (h *BlockDeviceHandler) getVirtualDisksState(vm *virtv2.VirtualMachine, vds map[string]*virtv2.VirtualDisk) virtualDisksState { +func (h *BlockDeviceHandler) getVirtualDisksState(vm *v1alpha2.VirtualMachine, vds map[string]*v1alpha2.VirtualDisk) virtualDisksState { vdsState := virtualDisksState{} for _, vd := range vds { @@ -169,7 +169,7 @@ func (h *BlockDeviceHandler) getVirtualDisksState(vm *virtv2.VirtualMachine, vds } func (h *BlockDeviceHandler) handleImageCreationDisk( - vd *virtv2.VirtualDisk, + vd *v1alpha2.VirtualDisk, condition metav1.Condition, state *virtualDisksState, ) { @@ -180,8 +180,8 @@ func (h *BlockDeviceHandler) handleImageCreationDisk( } func (h *BlockDeviceHandler) handleAttachedDisk( - vd *virtv2.VirtualDisk, - vm *virtv2.VirtualMachine, + vd *v1alpha2.VirtualDisk, + vm *v1alpha2.VirtualMachine, condition metav1.Condition, state *virtualDisksState, ) { @@ -196,19 +196,19 @@ func (h *BlockDeviceHandler) handleAttachedDisk( } func (h *BlockDeviceHandler) handleReadyForUseDisk( - vd *virtv2.VirtualDisk, - vm *virtv2.VirtualMachine, + vd *v1alpha2.VirtualDisk, + vm *v1alpha2.VirtualMachine, condition metav1.Condition, state *virtualDisksState, ) { if condition.Status != metav1.ConditionTrue && - vm.Status.Phase == virtv2.MachineStopped && + vm.Status.Phase == v1alpha2.MachineStopped && h.checkVDToUseVM(vd, vm) { state.counts.readyToUse++ } } -func (h *BlockDeviceHandler) checkVDToUseVM(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine) bool { +func (h *BlockDeviceHandler) checkVDToUseVM(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine) bool { attachedVMs := vd.Status.AttachedToVirtualMachines for _, attachedVM := range attachedVMs { @@ -220,7 +220,7 @@ func (h *BlockDeviceHandler) checkVDToUseVM(vd *virtv2.VirtualDisk, vm *virtv2.V return false } -func (h *BlockDeviceHandler) checkVMToMountVD(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine) bool { +func (h *BlockDeviceHandler) checkVMToMountVD(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine) bool { attachedVMs := vd.Status.AttachedToVirtualMachines for _, attachedVM := range attachedVMs { @@ -278,7 +278,7 @@ func (h *BlockDeviceHandler) handleBlockDevicesReady(ctx context.Context, s stat } // countReadyBlockDevices check if all attached images and disks are ready to use by the VM. -func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s BlockDevicesState, wffc bool) (int, bool, []string) { +func (h *BlockDeviceHandler) countReadyBlockDevices(vm *v1alpha2.VirtualMachine, s BlockDevicesState, wffc bool) (int, bool, []string) { if vm == nil { return 0, false, nil } @@ -288,19 +288,19 @@ func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s canStartKVVM := true for _, bd := range vm.Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: - if vi, hasKey := s.VIByName[bd.Name]; hasKey && vi.Status.Phase == virtv2.ImageReady { + case v1alpha2.ImageDevice: + if vi, hasKey := s.VIByName[bd.Name]; hasKey && vi.Status.Phase == v1alpha2.ImageReady { ready++ continue } canStartKVVM = false - case virtv2.ClusterImageDevice: - if cvi, hasKey := s.CVIByName[bd.Name]; hasKey && cvi.Status.Phase == virtv2.ImageReady { + case v1alpha2.ClusterImageDevice: + if cvi, hasKey := s.CVIByName[bd.Name]; hasKey && cvi.Status.Phase == v1alpha2.ImageReady { ready++ continue } canStartKVVM = false - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vd, hasKey := s.VDByName[bd.Name] if !hasKey { canStartKVVM = false @@ -323,7 +323,7 @@ func (h *BlockDeviceHandler) countReadyBlockDevices(vm *virtv2.VirtualMachine, s ready++ } else { var msg string - if wffc && vm.Status.Phase == virtv2.MachineStopped { + if wffc && vm.Status.Phase == v1alpha2.MachineStopped { msg = fmt.Sprintf("Virtual disk %s is waiting for the virtual machine to be starting", vd.Name) } else { msg = fmt.Sprintf("Virtual disk %s is waiting for the underlying PVC to be bound", vd.Name) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go index 875e89f49f..de3166476d 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,9 +43,9 @@ func NewBlockDeviceHandler(cl client.Client, blockDeviceService BlockDeviceServi client: cl, blockDeviceService: blockDeviceService, - viProtection: service.NewProtectionService(cl, virtv2.FinalizerVIProtection), - cviProtection: service.NewProtectionService(cl, virtv2.FinalizerCVIProtection), - vdProtection: service.NewProtectionService(cl, virtv2.FinalizerVDProtection), + viProtection: service.NewProtectionService(cl, v1alpha2.FinalizerVIProtection), + cviProtection: service.NewProtectionService(cl, v1alpha2.FinalizerCVIProtection), + vdProtection: service.NewProtectionService(cl, v1alpha2.FinalizerVDProtection), } } @@ -169,7 +169,7 @@ func (h *BlockDeviceHandler) handleBlockDeviceConflicts(ctx context.Context, s s return false, nil } -func (h *BlockDeviceHandler) handleBlockDeviceLimit(ctx context.Context, vm *virtv2.VirtualMachine) (bool, error) { +func (h *BlockDeviceHandler) handleBlockDeviceLimit(ctx context.Context, vm *v1alpha2.VirtualMachine) (bool, error) { // Get number of connected block devices. // If it's greater than the limit, then set the condition to false. blockDeviceAttachedCount, err := h.blockDeviceService.CountBlockDevicesAttachedToVM(ctx, vm) @@ -202,133 +202,65 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state return "", err } - hotplugsByName := make(map[string]struct{}) - - for _, vmbdas := range vmbdasByBlockDevice { - for _, vmbda := range vmbdas { - switch vmbda.Status.Phase { - case virtv2.BlockDeviceAttachmentPhaseInProgress, - virtv2.BlockDeviceAttachmentPhaseAttached: - default: - continue - } - - var ( - cvi *virtv2.ClusterVirtualImage - vi *virtv2.VirtualImage - vd *virtv2.VirtualDisk - bdStatusRef virtv2.BlockDeviceStatusRef - ) - - switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: - vd, err = s.VirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name) - if err != nil { - return "", err - } - - if vd == nil { - continue - } - - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name) - bdStatusRef.Size = vd.Status.Capacity - case virtv2.VMBDAObjectRefKindVirtualImage: - vi, err = s.VirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) - if err != nil { - return "", err - } - - if vi == nil { - continue - } - - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.ImageDevice, vmbda.Spec.BlockDeviceRef.Name) - bdStatusRef.Size = vi.Status.Size.Unpacked - - case virtv2.VMBDAObjectRefKindClusterVirtualImage: - cvi, err = s.ClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) - if err != nil { - return "", err - } - - if cvi == nil { - continue - } - - bdStatusRef = h.getBlockDeviceStatusRef(virtv2.ClusterImageDevice, vmbda.Spec.BlockDeviceRef.Name) - bdStatusRef.Size = cvi.Status.Size.Unpacked - default: - return "", fmt.Errorf("unacceptable `Kind` of `BlockDeviceRef`: %s", vmbda.Spec.BlockDeviceRef.Kind) - } - - hotplugsByName[bdStatusRef.Name] = struct{}{} - } - } - var conflictedRefs []string - vm := s.VirtualMachine().Current() - for _, bdSpecRef := range vm.Spec.BlockDeviceRefs { + for _, bdSpecRef := range s.VirtualMachine().Current().Spec.BlockDeviceRefs { // It is a precaution to not apply changes in spec.blockDeviceRefs if disk is already // hotplugged using the VMBDA resource. // spec check is done by VirtualDisk status // the reverse check is done by the vmbda-controller. - if bdSpecRef.Kind == virtv2.DiskDevice { - if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict { - conflictedRefs = append(conflictedRefs, bdSpecRef.Name) - continue - } - } - - if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict { - conflictedRefs = append(conflictedRefs, bdSpecRef.Name) + _, conflict := vmbdasByBlockDevice[v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKind(bdSpecRef.Kind), + Name: bdSpecRef.Name, + }] + if conflict { + conflictedRefs = append(conflictedRefs, fmt.Sprintf("%s/%s", strings.ToLower(string(bdSpecRef.Kind)), bdSpecRef.Name)) continue } } var warning string if len(conflictedRefs) > 0 { - warning = fmt.Sprintf("spec.blockDeviceRefs field contains hotplugged disks (%s): unplug or remove them from spec to continue.", strings.Join(conflictedRefs, ", ")) + warning = fmt.Sprintf("spec.blockDeviceRefs field contains block devices to hotplug (%s): unplug or remove them from spec to continue.", strings.Join(conflictedRefs, ", ")) } return warning, nil } // setFinalizersOnBlockDevices sets protection finalizers on CVMI and VMD attached to the VM. -func (h *BlockDeviceHandler) setFinalizersOnBlockDevices(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState) error { +func (h *BlockDeviceHandler) setFinalizersOnBlockDevices(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState) error { return h.updateFinalizers(ctx, vm, s, func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error { return p.AddProtection }) } // removeFinalizersOnBlockDevices remove protection finalizers on CVI,VI and VMD attached to the VM. -func (h *BlockDeviceHandler) removeFinalizersOnBlockDevices(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState) error { +func (h *BlockDeviceHandler) removeFinalizersOnBlockDevices(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState) error { return h.updateFinalizers(ctx, vm, s, func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error { return p.RemoveProtection }) } // updateFinalizers remove protection finalizers on CVI,VI and VD attached to the VM. -func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *virtv2.VirtualMachine, s BlockDevicesState, update updaterProtection) error { +func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *v1alpha2.VirtualMachine, s BlockDevicesState, update updaterProtection) error { if vm == nil { return fmt.Errorf("VM is empty") } for _, bd := range vm.Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: if vi, hasKey := s.VIByName[bd.Name]; hasKey { if err := update(h.viProtection)(ctx, vi); err != nil { return err } } - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: if cvi, hasKey := s.CVIByName[bd.Name]; hasKey { if err := update(h.cviProtection)(ctx, cvi); err != nil { return err } } - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: if vd, hasKey := s.VDByName[bd.Name]; hasKey { if err := update(h.vdProtection)(ctx, vd); err != nil { return err @@ -344,17 +276,17 @@ func (h *BlockDeviceHandler) updateFinalizers(ctx context.Context, vm *virtv2.Vi func NewBlockDeviceState(s state.VirtualMachineState) BlockDevicesState { return BlockDevicesState{ s: s, - VIByName: make(map[string]*virtv2.VirtualImage), - CVIByName: make(map[string]*virtv2.ClusterVirtualImage), - VDByName: make(map[string]*virtv2.VirtualDisk), + VIByName: make(map[string]*v1alpha2.VirtualImage), + CVIByName: make(map[string]*v1alpha2.ClusterVirtualImage), + VDByName: make(map[string]*v1alpha2.VirtualDisk), } } type BlockDevicesState struct { s state.VirtualMachineState - VIByName map[string]*virtv2.VirtualImage - CVIByName map[string]*virtv2.ClusterVirtualImage - VDByName map[string]*virtv2.VirtualDisk + VIByName map[string]*v1alpha2.VirtualImage + CVIByName map[string]*v1alpha2.ClusterVirtualImage + VDByName map[string]*v1alpha2.VirtualDisk } func (s *BlockDevicesState) Reload(ctx context.Context) error { diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go index 9e1683fece..b92db5c0ff 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_device_status.go @@ -25,24 +25,24 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type nameKindKey struct { - kind virtv2.BlockDeviceKind + kind v1alpha2.BlockDeviceKind name string } // getBlockDeviceStatusRefs returns block device refs to populate .status.blockDeviceRefs of the virtual machine. // If kvvm is present, this method will reflect all volumes with prefixes (vi,vd, or cvi) into the slice of `BlockDeviceStatusRef`. // Block devices from the virtual machine specification will be added to the resulting slice if they have not been included in the previous step. -func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s state.VirtualMachineState) ([]virtv2.BlockDeviceStatusRef, error) { +func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s state.VirtualMachineState) ([]v1alpha2.BlockDeviceStatusRef, error) { kvvm, err := s.KVVM(ctx) if err != nil { return nil, err } - var refs []virtv2.BlockDeviceStatusRef + var refs []v1alpha2.BlockDeviceStatusRef // 1. There is no kvvm yet: populate block device refs with the spec. if kvvm == nil { @@ -92,10 +92,8 @@ func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s sta if err != nil { return nil, err } - ref.Hotplugged, err = h.isHotplugged(ctx, volume, kvvmiVolumeStatusByName, s) - if err != nil { - return nil, err - } + + ref.Hotplugged = h.isHotplugged(volume, kvvmiVolumeStatusByName) if ref.Hotplugged { ref.VirtualMachineBlockDeviceAttachmentName, err = h.getBlockDeviceAttachmentName(ctx, kind, bdName, s) if err != nil { @@ -131,22 +129,22 @@ func (h *BlockDeviceHandler) getBlockDeviceStatusRefs(ctx context.Context, s sta return refs, nil } -func (h *BlockDeviceHandler) getBlockDeviceStatusRef(kind virtv2.BlockDeviceKind, name string) virtv2.BlockDeviceStatusRef { - return virtv2.BlockDeviceStatusRef{ +func (h *BlockDeviceHandler) getBlockDeviceStatusRef(kind v1alpha2.BlockDeviceKind, name string) v1alpha2.BlockDeviceStatusRef { + return v1alpha2.BlockDeviceStatusRef{ Kind: kind, Name: name, } } type BlockDeviceGetter interface { - VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) - VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) - ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) + VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) + VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) + ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) } -func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virtv2.BlockDeviceStatusRef, getter BlockDeviceGetter) (string, error) { +func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref v1alpha2.BlockDeviceStatusRef, getter BlockDeviceGetter) (string, error) { switch ref.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: vi, err := getter.VirtualImage(ctx, ref.Name) if err != nil { return "", err @@ -157,7 +155,7 @@ func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virt } return vi.Status.Size.Unpacked, nil - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vd, err := getter.VirtualDisk(ctx, ref.Name) if err != nil { return "", err @@ -168,7 +166,7 @@ func (h *BlockDeviceHandler) getBlockDeviceRefSize(ctx context.Context, ref virt } return vd.Status.Capacity, nil - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: cvi, err := getter.ClusterVirtualImage(ctx, ref.Name) if err != nil { return "", err @@ -189,36 +187,26 @@ func (h *BlockDeviceHandler) getBlockDeviceTarget(volume virtv1.Volume, kvvmiVol return vs.Target, ok } -func (h *BlockDeviceHandler) isHotplugged(ctx context.Context, volume virtv1.Volume, kvvmiVolumeStatusByName map[string]virtv1.VolumeStatus, s state.VirtualMachineState) (bool, error) { +func (h *BlockDeviceHandler) isHotplugged(volume virtv1.Volume, kvvmiVolumeStatusByName map[string]virtv1.VolumeStatus) bool { switch { // 1. If kvvmi has volume status with hotplugVolume reference then it's 100% hot-plugged volume. case kvvmiVolumeStatusByName[volume.Name].HotplugVolume != nil: - return true, nil + return true // 2. If kvvm has volume with hot-pluggable pvc reference then it's 100% hot-plugged volume. case volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable: - return true, nil + return true - // 3. We cannot check volume.ContainerDisk.Hotpluggable, as this field was added in our patches and is not reflected in the api version of virtv1 used by us. - // Until we have a 3rd-party repository to import the modified virtv1, we have to make decisions based on indirect signs. - // If there was a previously hot-plugged block device and the VMBDA is still alive, then it's a hot-plugged block device. - // TODO: Use volume.ContainerDisk.Hotpluggable for decision-making when the 3rd-party repository is available. - case volume.ContainerDisk != nil: - bdName, kind := kvbuilder.GetOriginalDiskName(volume.Name) - if h.canBeHotPlugged(s.VirtualMachine().Current(), kind, bdName) { - vmbdaName, err := h.getBlockDeviceAttachmentName(ctx, kind, bdName, s) - if err != nil { - return false, err - } - return vmbdaName != "", nil - } + // 3. If kvvm has volume with hot-pluggable disk reference then it's 100% hot-plugged volume. + case volume.ContainerDisk != nil && volume.ContainerDisk.Hotpluggable: + return true } // 4. Is not hot-plugged. - return false, nil + return false } -func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, kind virtv2.BlockDeviceKind, bdName string, s state.VirtualMachineState) (string, error) { +func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, kind v1alpha2.BlockDeviceKind, bdName string, s state.VirtualMachineState) (string, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(nameBlockDeviceHandler)) vmbdasByRef, err := s.VirtualMachineBlockDeviceAttachments(ctx) @@ -226,8 +214,8 @@ func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, k return "", err } - vmbdas := vmbdasByRef[virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKind(kind), + vmbdas := vmbdasByRef[v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKind(kind), Name: bdName, }] @@ -243,19 +231,3 @@ func (h *BlockDeviceHandler) getBlockDeviceAttachmentName(ctx context.Context, k return vmbdas[0].Name, nil } - -func (h *BlockDeviceHandler) canBeHotPlugged(vm *virtv2.VirtualMachine, kind virtv2.BlockDeviceKind, bdName string) bool { - for _, bdRef := range vm.Status.BlockDeviceRefs { - if bdRef.Kind == kind && bdRef.Name == bdName { - return bdRef.Hotplugged - } - } - - for _, bdRef := range vm.Spec.BlockDeviceRefs { - if bdRef.Kind == kind && bdRef.Name == bdName { - return false - } - } - - return true -} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go index 9607d2894c..6d521d5bbf 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/block_devices_test.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -48,14 +48,14 @@ var _ = Describe("Test BlockDeviceReady condition", func() { }) okBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 1, nil }, } scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, corev1.AddToScheme, } { @@ -68,25 +68,25 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Name: "vm", } - getVMWithOneVD := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{ + getVMWithOneVD := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Phase: phase, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, @@ -94,13 +94,13 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - getNotReadyVD := func(name string, status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getNotReadyVD := func(name string, status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ + Status: v1alpha2.VirtualDiskStatus{ Conditions: []metav1.Condition{{ Type: vdcondition.InUseType.String(), Status: status, @@ -113,7 +113,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { nameVD1 := "vd1" nameVD2 := "vd2" - DescribeTable("One not ready disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One not ready disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -132,21 +132,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd AttachedToVirtualMachine & Running VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), @@ -154,21 +154,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd UsedForImageCreation & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd UsedForImageCreation & Running VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd UsedForImageCreation & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), @@ -176,35 +176,35 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd NotInUse & Running VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), Entry( "vd NotInUse & Stopped VM", getNotReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready.", ), ) - getWFFCVD := func(status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getWFFCVD := func(status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskWaitForFirstConsumer, - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskWaitForFirstConsumer, + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, Conditions: []metav1.Condition{{ @@ -212,7 +212,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Status: status, Reason: reason, }}, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: namespacedName.Name, Mounted: true, @@ -222,7 +222,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("One wffc disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One wffc disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -241,21 +241,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the underlying PVC to be bound.", ), Entry( "vd AttachedToVirtualMachine & Running VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the underlying PVC to be bound.", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getWFFCVD(metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the virtual machine to be starting.", ), @@ -263,37 +263,37 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Running VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Stopped VM", getWFFCVD(metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready; Virtual disk vd1 is waiting for the virtual machine to be starting.", ), ) - getReadyVD := func(name string, status metav1.ConditionStatus, reason string) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ + getReadyVD := func(name string, status metav1.ConditionStatus, reason string) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -307,7 +307,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: reason, }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: namespacedName.Name, Mounted: true, @@ -317,7 +317,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("One ready disk", func(vd *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("One ready disk", func(vd *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -336,21 +336,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionTrue, "", ), Entry( "vd AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionTrue, "", ), Entry( "vd AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -358,21 +358,21 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), Entry( "vd UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), Entry( "vd UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Virtual disk \"vd1\" is in use for image creation.", ), @@ -380,53 +380,53 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Entry( "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachinePending), + getVMWithOneVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineRunning), + getVMWithOneVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block device \"vd1\" to be ready to use.", ), Entry( "vd NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithOneVD(virtv2.MachineStopped), + getVMWithOneVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), ) - getVMWithTwoVD := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{ + getVMWithTwoVD := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, }, }, - Status: virtv2.VirtualMachineStatus{ + Status: v1alpha2.VirtualMachineStatus{ Phase: phase, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, }, @@ -434,7 +434,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { } } - DescribeTable("two disks: not ready disk & ready disk", func(vd1, vd2 *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("two disks: not ready disk & ready disk", func(vd1, vd2 *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd1, vd2).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -454,7 +454,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -462,7 +462,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -470,7 +470,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -479,7 +479,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -487,7 +487,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -495,7 +495,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -504,7 +504,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -512,7 +512,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), @@ -520,13 +520,13 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getNotReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready: 1/2.", ), ) - DescribeTable("two disks: two ready disks", func(vd1, vd2 *virtv2.VirtualDisk, vm *virtv2.VirtualMachine, status metav1.ConditionStatus, msg string) { + DescribeTable("two disks: two ready disks", func(vd1, vd2 *v1alpha2.VirtualDisk, vm *v1alpha2.VirtualMachine, status metav1.ConditionStatus, msg string) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vm, vd1, vd2).Build() vmResource := reconciler.NewResource(namespacedName, fakeClient, vmFactoryByVM(vm), vmStatusGetter) @@ -546,7 +546,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionTrue, "", ), @@ -554,7 +554,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionTrue, "", ), @@ -562,7 +562,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 AttachedToVirtualMachine & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -571,7 +571,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -579,7 +579,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -587,7 +587,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 UsedForImageCreation & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2; Virtual disk \"vd2\" is in use for image creation.", ), @@ -596,7 +596,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd NotInUse & Pending VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachinePending), + getVMWithTwoVD(v1alpha2.MachinePending), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2.", ), @@ -604,7 +604,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Running VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineRunning), + getVMWithTwoVD(v1alpha2.MachineRunning), metav1.ConditionFalse, "Waiting for block devices to be ready to use: 1/2.", ), @@ -612,7 +612,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "vd2 NotInUse & Stopped VM", getReadyVD(nameVD1, metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()), getReadyVD(nameVD2, metav1.ConditionFalse, vdcondition.NotInUse.String()), - getVMWithTwoVD(virtv2.MachineStopped), + getVMWithTwoVD(v1alpha2.MachineStopped), metav1.ConditionTrue, "", ), @@ -620,40 +620,40 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("three not ready disks", func() { It("blockDeviceReady condition set Status = False and Message = Waiting for block devices to be ready: 0/3.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, }, @@ -684,56 +684,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- one ready disk, "+ "- two disk using for create image", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -766,56 +766,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- one ready disk, one disk using for create image, "+ "- one disk attached to another vm", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -825,16 +825,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { vd2 := getReadyVD("vd2", metav1.ConditionFalse, vdcondition.NotInUse.String()) vd3 := getReadyVD("vd3", metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()) vd4 := getReadyVD("vd4", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) - vd5 := &virtv2.VirtualDisk{ + vd5 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd5", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -848,7 +848,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -878,56 +878,56 @@ var _ = Describe("Test BlockDeviceReady condition", func() { "- two disks using for create image, "+ "- two disks attached to another vm", func() { It("blockDeviceReady condition set Status = False and complex message.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd2", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd3", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd4", }, { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd5", }, }, @@ -936,16 +936,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { vd1 := getReadyVD("vd1", metav1.ConditionTrue, vdcondition.AttachedToVirtualMachine.String()) vd2 := getReadyVD("vd2", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) vd3 := getReadyVD("vd3", metav1.ConditionTrue, vdcondition.UsedForImageCreation.String()) - vd4 := &virtv2.VirtualDisk{ + vd4 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd4", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -959,7 +959,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "b-vm", Mounted: true, @@ -967,16 +967,16 @@ var _ = Describe("Test BlockDeviceReady condition", func() { }, }, } - vd5 := &virtv2.VirtualDisk{ + vd5 := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd5", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -990,7 +990,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1017,39 +1017,39 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("one disk attached to another vm", func() { It("blockDeviceReady condition set Status = False and Message = Virtual disk \"vd1\" is in use by another VM.", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, } - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskReady, + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1063,7 +1063,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1090,39 +1090,39 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Context("one not ready disk attached to another vm", func() { It("return false and message = Waiting for block device \"vd1\" to be ready", func() { - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: "vd1", }, }, }, } - vd := &virtv2.VirtualDisk{ + vd := &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{ Name: "vd1", Namespace: namespacedName.Namespace, }, - Status: virtv2.VirtualDiskStatus{ - Target: virtv2.DiskTarget{ + Status: v1alpha2.VirtualDiskStatus{ + Target: v1alpha2.DiskTarget{ PersistentVolumeClaim: "testPvc", }, - Phase: virtv2.DiskProvisioning, + Phase: v1alpha2.DiskProvisioning, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1136,7 +1136,7 @@ var _ = Describe("Test BlockDeviceReady condition", func() { Reason: vdcondition.AttachedToVirtualMachine.String(), }, }, - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ { Name: "a-vm", Mounted: true, @@ -1165,40 +1165,40 @@ var _ = Describe("Test BlockDeviceReady condition", func() { var _ = Describe("BlockDeviceHandler", func() { var h *BlockDeviceHandler - var vm *virtv2.VirtualMachine - var vi *virtv2.VirtualImage - var cvi *virtv2.ClusterVirtualImage - var vdFoo *virtv2.VirtualDisk - var vdBar *virtv2.VirtualDisk + var vm *v1alpha2.VirtualMachine + var vi *v1alpha2.VirtualImage + var cvi *v1alpha2.ClusterVirtualImage + var vdFoo *v1alpha2.VirtualDisk + var vdBar *v1alpha2.VirtualDisk blockDeviceHandlerMock := &BlockDeviceServiceMock{} - blockDeviceHandlerMock.CountBlockDevicesAttachedToVMFunc = func(_ context.Context, vm *virtv2.VirtualMachine) (int, error) { + blockDeviceHandlerMock.CountBlockDevicesAttachedToVMFunc = func(_ context.Context, vm *v1alpha2.VirtualMachine) (int, error) { return 1, nil } - getBlockDevicesState := func(vi *virtv2.VirtualImage, cvi *virtv2.ClusterVirtualImage, vdFoo, vdBar *virtv2.VirtualDisk) BlockDevicesState { + getBlockDevicesState := func(vi *v1alpha2.VirtualImage, cvi *v1alpha2.ClusterVirtualImage, vdFoo, vdBar *v1alpha2.VirtualDisk) BlockDevicesState { return BlockDevicesState{ - VIByName: map[string]*virtv2.VirtualImage{vi.Name: vi}, - CVIByName: map[string]*virtv2.ClusterVirtualImage{cvi.Name: cvi}, - VDByName: map[string]*virtv2.VirtualDisk{vdFoo.Name: vdFoo, vdBar.Name: vdBar}, + VIByName: map[string]*v1alpha2.VirtualImage{vi.Name: vi}, + CVIByName: map[string]*v1alpha2.ClusterVirtualImage{cvi.Name: cvi}, + VDByName: map[string]*v1alpha2.VirtualDisk{vdFoo.Name: vdFoo, vdBar.Name: vdBar}, } } BeforeEach(func() { h = NewBlockDeviceHandler(nil, blockDeviceHandlerMock) - vi = &virtv2.VirtualImage{ + vi = &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{Name: "vi-01"}, - Status: virtv2.VirtualImageStatus{Phase: virtv2.ImageReady}, + Status: v1alpha2.VirtualImageStatus{Phase: v1alpha2.ImageReady}, } - cvi = &virtv2.ClusterVirtualImage{ + cvi = &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{Name: "cvi-01"}, - Status: virtv2.ClusterVirtualImageStatus{Phase: virtv2.ImageReady}, + Status: v1alpha2.ClusterVirtualImageStatus{Phase: v1alpha2.ImageReady}, } - vdFoo = &virtv2.VirtualDisk{ + vdFoo = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd1-foo"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, + Target: v1alpha2.DiskTarget{PersistentVolumeClaim: "pvc-foo"}, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1213,11 +1213,11 @@ var _ = Describe("BlockDeviceHandler", func() { }, }, } - vdBar = &virtv2.VirtualDisk{ + vdBar = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd1-bar"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, - Target: virtv2.DiskTarget{PersistentVolumeClaim: "pvc-bar"}, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, + Target: v1alpha2.DiskTarget{PersistentVolumeClaim: "pvc-bar"}, Conditions: []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1232,13 +1232,13 @@ var _ = Describe("BlockDeviceHandler", func() { }, }, } - vm = &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ - {Name: vi.Name, Kind: virtv2.ImageDevice}, - {Name: cvi.Name, Kind: virtv2.ClusterImageDevice}, - {Name: vdFoo.Name, Kind: virtv2.DiskDevice}, - {Name: vdBar.Name, Kind: virtv2.DiskDevice}, + vm = &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ + {Name: vi.Name, Kind: v1alpha2.ImageDevice}, + {Name: cvi.Name, Kind: v1alpha2.ClusterImageDevice}, + {Name: vdFoo.Name, Kind: v1alpha2.DiskDevice}, + {Name: vdBar.Name, Kind: v1alpha2.DiskDevice}, }, }, } @@ -1265,7 +1265,7 @@ var _ = Describe("BlockDeviceHandler", func() { Context("Image is not ready", func() { It("VirtualImage not ready: cannot start, no warnings", func() { - vi.Status.Phase = virtv2.ImagePending + vi.Status.Phase = v1alpha2.ImagePending state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) Expect(ready).To(Equal(3)) @@ -1274,7 +1274,7 @@ var _ = Describe("BlockDeviceHandler", func() { }) It("ClusterVirtualImage not ready: cannot start, no warnings", func() { - cvi.Status.Phase = virtv2.ImagePending + cvi.Status.Phase = v1alpha2.ImagePending state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) Expect(ready).To(Equal(3)) @@ -1285,7 +1285,7 @@ var _ = Describe("BlockDeviceHandler", func() { Context("VirtualDisk is not ready", func() { It("VirtualDisk's target pvc is not yet created", func() { - vdFoo.Status.Phase = virtv2.DiskProvisioning + vdFoo.Status.Phase = v1alpha2.DiskProvisioning vdFoo.Status.Target.PersistentVolumeClaim = "" state := getBlockDevicesState(vi, cvi, vdFoo, vdBar) ready, canStart, warnings := h.countReadyBlockDevices(vm, state, false) @@ -1295,7 +1295,7 @@ var _ = Describe("BlockDeviceHandler", func() { }) It("VirtualDisk's target pvc is created", func() { - vdFoo.Status.Phase = virtv2.DiskProvisioning + vdFoo.Status.Phase = v1alpha2.DiskProvisioning vdFoo.Status.Conditions = []metav1.Condition{ { Type: vdcondition.ReadyType.String(), @@ -1327,7 +1327,7 @@ var _ = Describe("Capacity check", func() { Context("Handle call result based on the number of connected block devices", func() { scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, corev1.AddToScheme, } { @@ -1350,13 +1350,13 @@ var _ = Describe("Capacity check", func() { }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedName.Name, Namespace: namespacedName.Namespace, }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{ + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1376,7 +1376,7 @@ var _ = Describe("Capacity check", func() { It("Should be ok because fewer than 16 devices are connected", func() { okBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 1, nil }, } @@ -1392,7 +1392,7 @@ var _ = Describe("Capacity check", func() { }) It("There might be an issue since 16 or more devices are connected.", func() { erroredBlockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 17, nil }, } @@ -1411,14 +1411,14 @@ var _ = Describe("Capacity check", func() { Context("When images are hotplugged into a VirtualMachine", func() { It("checks that `VirtualMachine.Status.BlockDeviceRefs` contains the hotplugged images", func() { blockDeviceServiceMock := &BlockDeviceServiceMock{ - CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *virtv2.VirtualMachine) (int, error) { + CountBlockDevicesAttachedToVMFunc: func(_ context.Context, _ *v1alpha2.VirtualMachine) (int, error) { return 2, nil }, } scheme := apiruntime.NewScheme() for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, virtv1.AddToScheme, } { err := f(scheme) @@ -1439,40 +1439,40 @@ var _ = Describe("Capacity check", func() { Name: "cvi-hotplug", } - vi := &virtv2.VirtualImage{ + vi := &v1alpha2.VirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualImage.Name, Namespace: namespacedVirtualImage.Namespace, }, - Spec: virtv2.VirtualImageSpec{}, - Status: virtv2.VirtualImageStatus{ - Phase: virtv2.ImageReady, - Size: virtv2.ImageStatusSize{ + Spec: v1alpha2.VirtualImageSpec{}, + Status: v1alpha2.VirtualImageStatus{ + Phase: v1alpha2.ImageReady, + Size: v1alpha2.ImageStatusSize{ Unpacked: "200Mi", }, }, } - cvi := &virtv2.ClusterVirtualImage{ + cvi := &v1alpha2.ClusterVirtualImage{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedClusterVirtualImage.Name, }, - Spec: virtv2.ClusterVirtualImageSpec{}, - Status: virtv2.ClusterVirtualImageStatus{ - Phase: virtv2.ImageReady, - Size: virtv2.ImageStatusSize{ + Spec: v1alpha2.ClusterVirtualImageSpec{}, + Status: v1alpha2.ClusterVirtualImageStatus{ + Phase: v1alpha2.ImageReady, + Size: v1alpha2.ImageStatusSize{ Unpacked: "200Mi", }, }, } - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualMachine.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{ + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionUnknown, @@ -1518,37 +1518,37 @@ var _ = Describe("Capacity check", func() { }, } - vmbdaVi := &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaVi := &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualImage.Name, Namespace: namespacedVirtualImage.Namespace, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: namespacedVirtualMachine.Name, - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindVirtualImage, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindVirtualImage, Name: namespacedVirtualImage.Name, }, }, - Status: virtv2.VirtualMachineBlockDeviceAttachmentStatus{ - Phase: virtv2.BlockDeviceAttachmentPhaseAttached, + Status: v1alpha2.VirtualMachineBlockDeviceAttachmentStatus{ + Phase: v1alpha2.BlockDeviceAttachmentPhaseAttached, }, } - vmbdaCvi := &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbdaCvi := &v1alpha2.VirtualMachineBlockDeviceAttachment{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedClusterVirtualImage.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: namespacedVirtualMachine.Name, - BlockDeviceRef: virtv2.VMBDAObjectRef{ - Kind: virtv2.VMBDAObjectRefKindClusterVirtualImage, + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ + Kind: v1alpha2.VMBDAObjectRefKindClusterVirtualImage, Name: namespacedClusterVirtualImage.Name, }, }, - Status: virtv2.VirtualMachineBlockDeviceAttachmentStatus{ - Phase: virtv2.BlockDeviceAttachmentPhaseAttached, + Status: v1alpha2.VirtualMachineBlockDeviceAttachmentStatus{ + Phase: v1alpha2.BlockDeviceAttachmentPhaseAttached, }, } @@ -1566,12 +1566,12 @@ var _ = Describe("Capacity check", func() { Expect(bd.Attached).To(BeTrue(), "`attached` field should be `true`") Expect(bd.Hotplugged).To(BeTrue(), "`hotplugged` field should be `true`") switch bd.Kind { - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: Expect(bd.Name).To(Equal(namespacedClusterVirtualImage.Name), "`Name` should be %q", namespacedClusterVirtualImage.Name) Expect(bd.VirtualMachineBlockDeviceAttachmentName).To(Equal(namespacedClusterVirtualImage.Name), "`VirtualMachineBlockDeviceAttachmentName` should be %q", namespacedClusterVirtualImage.Name) Expect(bd.Size).To(Equal(cvi.Status.Size.Unpacked), "unpacked size of image should be %s", cvi.Status.Size.Unpacked) Expect(bd.Target).To(Equal(cviTarget), "`target` field should be %s", cviTarget) - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: Expect(bd.Name).To(Equal(namespacedVirtualImage.Name), "`Name` should be %q", namespacedVirtualImage.Name) Expect(bd.VirtualMachineBlockDeviceAttachmentName).To(Equal(namespacedVirtualImage.Name), "`VirtualMachineBlockDeviceAttachmentName` should be %q", namespacedVirtualImage.Name) Expect(bd.Size).To(Equal(vi.Status.Size.Unpacked), "unpacked size of image should be %s", vi.Status.Size.Unpacked) @@ -1582,12 +1582,12 @@ var _ = Describe("Capacity check", func() { }) }) -func vmFactoryByVM(vm *virtv2.VirtualMachine) func() *virtv2.VirtualMachine { - return func() *virtv2.VirtualMachine { +func vmFactoryByVM(vm *v1alpha2.VirtualMachine) func() *v1alpha2.VirtualMachine { + return func() *v1alpha2.VirtualMachine { return vm } } -func vmStatusGetter(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { +func vmStatusGetter(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/class.go b/images/virtualization-artifact/pkg/controller/vm/internal/class.go index 5be674b0ba..a78c5d0cc3 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/class.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/class.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -75,8 +75,8 @@ func (h *ClassHandler) Handle(ctx context.Context, s state.VirtualMachineState) cb := conditions.NewConditionBuilder(vmcondition.TypeClassReady). Generation(current.GetGeneration()) - if class != nil && class.Status.Phase == virtv2.ClassPhaseReady { - if (class.Spec.CPU.Type == virtv2.CPUTypeDiscovery || class.Spec.CPU.Type == virtv2.CPUTypeFeatures) && len(class.Status.CpuFeatures.Enabled) == 0 { + if class != nil && class.Status.Phase == v1alpha2.ClassPhaseReady { + if (class.Spec.CPU.Type == v1alpha2.CPUTypeDiscovery || class.Spec.CPU.Type == v1alpha2.CPUTypeFeatures) && len(class.Status.CpuFeatures.Enabled) == 0 { mgr.Update(cb. Message("No enabled processor features found"). Reason(vmcondition.ReasonClassNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go b/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go index 2bcd405871..c163fc238f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/deletion_handler.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -37,7 +37,7 @@ const deletionHandlerName = "DeletionHandler" func NewDeletionHandler(client client.Client) *DeletionHandler { return &DeletionHandler{ client: client, - protection: service.NewProtectionService(client, virtv2.FinalizerKVVMProtection), + protection: service.NewProtectionService(client, v1alpha2.FinalizerKVVMProtection), } } @@ -54,7 +54,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineStat } if s.VirtualMachine().Current().GetDeletionTimestamp().IsZero() { changed := s.VirtualMachine().Changed() - controllerutil.AddFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.AddFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } log.Info("Deletion observed: remove protection from KVVM") @@ -82,7 +82,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineStat } log.Info("Deletion observed: remove cleanup finalizer from VirtualMachine") - controllerutil.RemoveFinalizer(s.VirtualMachine().Changed(), virtv2.FinalizerVMCleanup) + controllerutil.RemoveFinalizer(s.VirtualMachine().Changed(), v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go index c64a113842..6b9399787e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/evict_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,7 +43,7 @@ var _ = Describe("TestEvictHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -53,7 +53,7 @@ var _ = Describe("TestEvictHandler", func() { vmState = nil }) - newVM := func(withCond bool) *virtv2.VirtualMachine { + newVM := func(withCond bool) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) if withCond { vm.Status.Conditions = append(vm.Status.Conditions, metav1.Condition{ @@ -81,11 +81,11 @@ var _ = Describe("TestEvictHandler", func() { } DescribeTable("Condition NeedEvict should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.NeedsEvictReason) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, condShouldExists bool, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.NeedsEvictReason) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go b/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go index e35430533b..acba3d2dd4 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/firmware.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -60,7 +60,7 @@ func (h *FirmwareHandler) Handle(ctx context.Context, s state.VirtualMachineStat return reconcile.Result{}, nil } -func (h *FirmwareHandler) syncFirmwareUpToDate(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *FirmwareHandler) syncFirmwareUpToDate(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if vm == nil { return } @@ -70,7 +70,7 @@ func (h *FirmwareHandler) syncFirmwareUpToDate(vm *virtv2.VirtualMachine, kvvmi cb := conditions.NewConditionBuilder(vmcondition.TypeFirmwareUpToDate).Generation(vm.GetGeneration()) defer func() { switch vm.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeFirmwareUpToDate, &vm.Status.Conditions) default: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go index 42d962a8cb..f2da792651 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/firmware_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -44,7 +44,7 @@ var _ = Describe("TestFirmwareHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -54,7 +54,7 @@ var _ = Describe("TestFirmwareHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } @@ -73,11 +73,11 @@ var _ = Describe("TestFirmwareHandler", func() { } DescribeTable("Condition TypeFirmwareUpToDate should be in expected state", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.FirmwareUpToDateReason, expectedExistence bool) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedReason vmcondition.FirmwareUpToDateReason, expectedExistence bool) { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -98,11 +98,11 @@ var _ = Describe("TestFirmwareHandler", func() { ) DescribeTable("Condition TypeFirmwareUpToDate should be in the expected state considering the VM phase", - func(vm *virtv2.VirtualMachine, phase virtv2.MachinePhase, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + func(vm *v1alpha2.VirtualMachine, phase v1alpha2.MachinePhase, kvvmi *virtv1.VirtualMachineInstance, expectedStatus metav1.ConditionStatus, expectedExistence bool) { vm.Status.Phase = phase fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) upToDate, exists := conditions.GetCondition(vmcondition.TypeFirmwareUpToDate, newVM.Status.Conditions) @@ -111,22 +111,22 @@ var _ = Describe("TestFirmwareHandler", func() { Expect(upToDate.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase, condition should not be set", newVM(), virtv2.MachineRunning, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Running phase, condition should be set", newVM(), virtv2.MachineRunning, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Running phase, condition should not be set", newVM(), v1alpha2.MachineRunning, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Running phase, condition should be set", newVM(), v1alpha2.MachineRunning, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Migrating phase, condition should not be set", newVM(), virtv2.MachineMigrating, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Migrating phase, condition should be set", newVM(), virtv2.MachineMigrating, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Migrating phase, condition should not be set", newVM(), v1alpha2.MachineMigrating, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Migrating phase, condition should be set", newVM(), v1alpha2.MachineMigrating, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Stopping phase, condition should not be set", newVM(), virtv2.MachineStopping, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Stopping phase, condition should be set", newVM(), virtv2.MachineStopping, newKVVMI("other-image-1"), metav1.ConditionFalse, true), + Entry("Stopping phase, condition should not be set", newVM(), v1alpha2.MachineStopping, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Stopping phase, condition should be set", newVM(), v1alpha2.MachineStopping, newKVVMI("other-image-1"), metav1.ConditionFalse, true), - Entry("Pending phase, condition should not be set", newVM(), virtv2.MachinePending, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Pending phase, condition should not be set", newVM(), virtv2.MachinePending, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Pending phase, condition should not be set", newVM(), v1alpha2.MachinePending, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Pending phase, condition should not be set", newVM(), v1alpha2.MachinePending, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), - Entry("Starting phase, condition should not be set", newVM(), virtv2.MachineStarting, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Starting phase, condition should not be set", newVM(), virtv2.MachineStarting, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Starting phase, condition should not be set", newVM(), v1alpha2.MachineStarting, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Starting phase, condition should not be set", newVM(), v1alpha2.MachineStarting, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), - Entry("Stopped phase, condition should not be set", newVM(), virtv2.MachineStopped, newKVVMI(expectedImage), metav1.ConditionUnknown, false), - Entry("Stopped phase, condition should not be set", newVM(), virtv2.MachineStopped, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), + Entry("Stopped phase, condition should not be set", newVM(), v1alpha2.MachineStopped, newKVVMI(expectedImage), metav1.ConditionUnknown, false), + Entry("Stopped phase, condition should not be set", newVM(), v1alpha2.MachineStopped, newKVVMI("other-image-1"), metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go index f7aa8c1fcf..b5cfbe8596 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/interfaces.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/tools/record" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . EventRecorder BlockDeviceService @@ -29,5 +29,5 @@ import ( type EventRecorder = record.EventRecorder type BlockDeviceService interface { - CountBlockDevicesAttachedToVM(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) + CountBlockDevicesAttachedToVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go b/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go index c4f673dcb2..79c7dacd86 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/ipam.go @@ -33,16 +33,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) const nameIpamHandler = "IPAMHandler" type IPAM interface { - IsBound(vmName string, vmip *virtv2.VirtualMachineIPAddress) bool - CheckIPAddressAvailableForBinding(vmName string, vmip *virtv2.VirtualMachineIPAddress) error - CreateIPAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client) error + IsBound(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) bool + CheckIPAddressAvailableForBinding(vmName string, vmip *v1alpha2.VirtualMachineIPAddress) error + CreateIPAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client) error } func NewIPAMHandler(ipam IPAM, cl client.Client, recorder eventrecord.EventRecorderLogger) *IPAMHandler { @@ -92,7 +92,7 @@ func (h *IPAMHandler) Handle(ctx context.Context, s state.VirtualMachineState) ( Reason(vmcondition.ReasonIPAddressReady). Condition()) changed.Status.VirtualMachineIPAddress = ipAddress.GetName() - if changed.Status.Phase != virtv2.MachineRunning && changed.Status.Phase != virtv2.MachineStopping { + if changed.Status.Phase != v1alpha2.MachineRunning && changed.Status.Phase != v1alpha2.MachineStopping { changed.Status.IPAddress = ipAddress.Status.Address } kvvmi, err := s.KVVMI(ctx) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go b/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go index c33e233376..699ef35652 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/lifecycle.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -73,12 +73,12 @@ func (h *LifeCycleHandler) Handle(ctx context.Context, s state.VirtualMachineSta changed.Status.ObservedGeneration = gen }() if isDeletion(current) { - changed.Status.Phase = virtv2.MachineTerminating + changed.Status.Phase = v1alpha2.MachineTerminating return reconcile.Result{}, nil } if updated := addAllUnknown(changed, vmcondition.TypeRunning); updated || changed.Status.Phase == "" { - changed.Status.Phase = virtv2.MachinePending + changed.Status.Phase = v1alpha2.MachinePending return reconcile.Result{Requeue: true}, nil } @@ -109,7 +109,7 @@ func (h *LifeCycleHandler) Name() string { return nameLifeCycleHandler } -func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, log *slog.Logger) { +func (h *LifeCycleHandler) syncRunning(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, log *slog.Logger) { cb := conditions.NewConditionBuilder(vmcondition.TypeRunning).Generation(vm.GetGeneration()) if pod != nil && pod.Status.Message != "" { @@ -123,7 +123,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V if kvvm != nil { podScheduled := service.GetKVVMCondition(string(corev1.PodScheduled), kvvm.Status.Conditions) if podScheduled != nil && podScheduled.Status == corev1.ConditionFalse { - vm.Status.Phase = virtv2.MachinePending + vm.Status.Phase = v1alpha2.MachinePending if podScheduled.Message != "" { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonPodNotStarted). @@ -174,7 +174,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V } } - if kvvmi != nil && vm.Status.Phase == virtv2.MachineRunning { + if kvvmi != nil && vm.Status.Phase == v1alpha2.MachineRunning { vm.Status.Versions.Libvirt = kvvmi.Annotations[annotations.AnnLibvirtVersion] vm.Status.Versions.Qemu = kvvmi.Annotations[annotations.AnnQemuVersion] } @@ -182,7 +182,7 @@ func (h *LifeCycleHandler) syncRunning(vm *virtv2.VirtualMachine, kvvm *virtv1.V if kvvmi != nil { vm.Status.Node = kvvmi.Status.NodeName - if vm.Status.Phase == virtv2.MachineRunning { + if vm.Status.Phase == v1alpha2.MachineRunning { cb.Reason(vmcondition.ReasonVmIsRunning).Status(metav1.ConditionTrue) conditions.SetCondition(cb, &vm.Status.Conditions) return diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/mac.go b/images/virtualization-artifact/pkg/controller/vm/internal/mac.go index ef3d6dc270..62841d21fd 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/mac.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/mac.go @@ -30,16 +30,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) const nameMACHandler = "MACHandler" type MACManager interface { - IsBound(vmName string, vmmac *virtv2.VirtualMachineMACAddress) bool - CheckMACAddressAvailableForBinding(vmmac *virtv2.VirtualMachineMACAddress) error - CreateMACAddress(ctx context.Context, vm *virtv2.VirtualMachine, client client.Client, macAddress string) error + IsBound(vmName string, vmmac *v1alpha2.VirtualMachineMACAddress) bool + CheckMACAddressAvailableForBinding(vmmac *v1alpha2.VirtualMachineMACAddress) error + CreateMACAddress(ctx context.Context, vm *v1alpha2.VirtualMachine, client client.Client, macAddress string) error } func NewMACHandler(mac MACManager, cl client.Client, recorder eventrecord.EventRecorderLogger) *MACHandler { @@ -168,7 +168,7 @@ func (h *MACHandler) Name() string { return nameMACHandler } -func countNetworksWithMACRequest(networkSpec []virtv2.NetworksSpec, vmmacs []*virtv2.VirtualMachineMACAddress) int { +func countNetworksWithMACRequest(networkSpec []v1alpha2.NetworksSpec, vmmacs []*v1alpha2.VirtualMachineMACAddress) int { existingMACNames := make(map[string]bool) for _, vmmac := range vmmacs { existingMACNames[vmmac.Name] = true @@ -176,7 +176,7 @@ func countNetworksWithMACRequest(networkSpec []virtv2.NetworksSpec, vmmacs []*vi count := 0 for _, ns := range networkSpec { - if ns.Type != virtv2.NetworksTypeMain { + if ns.Type != v1alpha2.NetworksTypeMain { continue } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/mac_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/mac_test.go index b3d9b98fa5..a97c76d3b1 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/mac_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/mac_test.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -44,22 +44,22 @@ var _ = Describe("MACHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine recorder *eventrecord.EventRecorderLoggerMock ) BeforeEach(func() { - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, UID: "test-uid", }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachinePending, + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachinePending, }, } recorder = &eventrecord.EventRecorderLoggerMock{ @@ -77,8 +77,8 @@ var _ = Describe("MACHandler", func() { recorder = nil }) - newMACAddress := func(name, address string, phase virtv2.VirtualMachineMACAddressPhase, attachedVM string) *virtv2.VirtualMachineMACAddress { - mac := &virtv2.VirtualMachineMACAddress{ + newMACAddress := func(name, address string, phase v1alpha2.VirtualMachineMACAddressPhase, attachedVM string) *v1alpha2.VirtualMachineMACAddress { + mac := &v1alpha2.VirtualMachineMACAddress{ TypeMeta: metav1.TypeMeta{ Kind: "VirtualMachineMACAddress", APIVersion: "virtualization.deckhouse.io/v1alpha2", @@ -90,7 +90,7 @@ var _ = Describe("MACHandler", func() { annotations.LabelVirtualMachineUID: string(vm.UID), }, }, - Status: virtv2.VirtualMachineMACAddressStatus{ + Status: v1alpha2.VirtualMachineMACAddressStatus{ Address: address, }, } @@ -117,7 +117,7 @@ var _ = Describe("MACHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -131,16 +131,16 @@ var _ = Describe("MACHandler", func() { Describe("NetworkSpec have only 'Main' interface", func() { It("Condition 'MACAddressReady' should have status 'True'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } vm.Spec.Networks = networkSpec fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -154,16 +154,16 @@ var _ = Describe("MACHandler", func() { Describe("NetworkSpec have many interfaces", func() { It("One macAddress exist - Condition 'MACAddressReady' should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network2", }, } @@ -174,7 +174,7 @@ var _ = Describe("MACHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, macAddress1) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -187,28 +187,28 @@ var _ = Describe("MACHandler", func() { }) It("One ready macAddress - Condition 'MACAddressReady' should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network2", }, } - macAddress1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", virtv2.VirtualMachineMACAddressPhaseAttached, name) + macAddress1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) macAddress2 := newMACAddress("test-mac-address2", "aa:bb:cc:dd:ee:ef", "", "") vm.Spec.Networks = networkSpec fakeClient, resource, vmState = setupEnvironment(vm, macAddress1, macAddress2) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -220,28 +220,28 @@ var _ = Describe("MACHandler", func() { }) It("two ready macAddresses - Condition 'MACAddressReady' should have status 'True'", func() { - networkSpec := []virtv2.NetworksSpec{ + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network1", }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network2", }, } - macAddress1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", virtv2.VirtualMachineMACAddressPhaseAttached, name) - macAddress2 := newMACAddress("test-mac-address2", "aa:bb:cc:dd:ee:ef", virtv2.VirtualMachineMACAddressPhaseAttached, name) + macAddress1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) + macAddress2 := newMACAddress("test-mac-address2", "aa:bb:cc:dd:ee:ef", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) vm.Spec.Networks = networkSpec fakeClient, resource, vmState = setupEnvironment(vm, macAddress1, macAddress2) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go index 093fc34ca7..d097bad772 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating.go @@ -17,8 +17,10 @@ limitations under the License. package internal import ( + "cmp" "context" "fmt" + "slices" "strings" corev1 "k8s.io/api/core/v1" @@ -32,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -91,7 +93,7 @@ func (h *MigratingHandler) Name() string { return nameMigratingHandler } -func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstance) *virtv2.VirtualMachineMigrationState { +func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstance) *v1alpha2.VirtualMachineMigrationState { if kvvmi == nil { return nil } @@ -102,35 +104,35 @@ func (h *MigratingHandler) wrapMigrationState(kvvmi *virtv1.VirtualMachineInstan return nil } - return &virtv2.VirtualMachineMigrationState{ + return &v1alpha2.VirtualMachineMigrationState{ StartTimestamp: migrationState.StartTimestamp, EndTimestamp: migrationState.EndTimestamp, - Target: virtv2.VirtualMachineLocation{ + Target: v1alpha2.VirtualMachineLocation{ Node: migrationState.TargetNode, Pod: migrationState.TargetPod, }, - Source: virtv2.VirtualMachineLocation{ + Source: v1alpha2.VirtualMachineLocation{ Node: migrationState.SourceNode, }, Result: h.getMigrationResult(migrationState), } } -func (h *MigratingHandler) getMigrationResult(state *virtv1.VirtualMachineInstanceMigrationState) virtv2.MigrationResult { +func (h *MigratingHandler) getMigrationResult(state *virtv1.VirtualMachineInstanceMigrationState) v1alpha2.MigrationResult { if state == nil { return "" } switch { case state.Completed && !state.Failed: - return virtv2.MigrationResultSucceeded + return v1alpha2.MigrationResultSucceeded case state.Failed: - return virtv2.MigrationResultFailed + return v1alpha2.MigrationResultFailed default: return "" } } -func (h *MigratingHandler) syncMigrating(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) error { +func (h *MigratingHandler) syncMigrating(ctx context.Context, s state.VirtualMachineState, vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) error { // 0. If KVVMI is nil, migration cannot be in progress. Remove Migrating condition, but keep if migration failed. if kvvmi == nil { migrating, _ := conditions.GetCondition(vmcondition.TypeMigrating, vm.Status.Conditions) @@ -208,20 +210,20 @@ func (h *MigratingHandler) syncMigrating(ctx context.Context, s state.VirtualMac conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) return nil - case virtv2.VMOPPhasePending: + case v1alpha2.VMOPPhasePending: cb.Reason(vmcondition.ReasonMigratingPending).Message("Wait until operation is completed") - case virtv2.VMOPPhaseInProgress: + case v1alpha2.VMOPPhaseInProgress: cb.Reason(vmcondition.ReasonMigratingInProgress).Message("Wait until operation is completed") - case virtv2.VMOPPhaseCompleted: + case v1alpha2.VMOPPhaseCompleted: conditions.RemoveCondition(vmcondition.TypeMigrating, &vm.Status.Conditions) return nil - case virtv2.VMOPPhaseFailed: + case v1alpha2.VMOPPhaseFailed: cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Operation failed") - case virtv2.VMOPPhaseTerminating: + case v1alpha2.VMOPPhaseTerminating: cb.Reason(vmcondition.ReasonLastMigrationFinishedWithError).Message("Operation terminated") } } @@ -279,30 +281,39 @@ func (h *MigratingHandler) syncWaitingForVMToBeReadyMigrate(ctx context.Context, return nil } -func (h *MigratingHandler) getVMOPCandidate(ctx context.Context, s state.VirtualMachineState) (*virtv2.VirtualMachineOperation, error) { +func (h *MigratingHandler) getVMOPCandidate(ctx context.Context, s state.VirtualMachineState) (*v1alpha2.VirtualMachineOperation, error) { vmops, err := s.VMOPs(ctx) if err != nil { return nil, err } - var candidate *virtv2.VirtualMachineOperation - if len(vmops) > 0 { - candidate = vmops[0] + if len(vmops) == 0 { + return nil, nil + } - for _, vmop := range vmops { - if !commonvmop.IsMigration(vmop) { - continue - } - if vmop.GetCreationTimestamp().Time.After(candidate.GetCreationTimestamp().Time) { - candidate = vmop - } + // sort vmops from the oldest to the newest + slices.SortFunc(vmops, func(a, b *v1alpha2.VirtualMachineOperation) int { + return cmp.Compare(a.GetCreationTimestamp().UnixNano(), b.GetCreationTimestamp().UnixNano()) + }) + + migrations := slices.DeleteFunc(vmops, func(vmop *v1alpha2.VirtualMachineOperation) bool { + return !commonvmop.IsMigration(vmop) + }) + + for _, migration := range migrations { + if commonvmop.IsInProgressOrPending(migration) { + return migration, nil } } - return candidate, nil + if len(migrations) > 0 { + return migrations[len(migrations)-1], nil + } + + return nil, nil } -func (h *MigratingHandler) syncMigratable(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { +func (h *MigratingHandler) syncMigratable(ctx context.Context, s state.VirtualMachineState, vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) error { cb := conditions.NewConditionBuilder(vmcondition.TypeMigratable).Generation(vm.GetGeneration()) if kvvm != nil { @@ -360,14 +371,14 @@ func (h *MigratingHandler) syncMigratable(ctx context.Context, s state.VirtualMa return nil } -func liveMigrationInProgress(migrationState *virtv2.VirtualMachineMigrationState) bool { +func liveMigrationInProgress(migrationState *v1alpha2.VirtualMachineMigrationState) bool { return migrationState != nil && migrationState.StartTimestamp != nil && migrationState.EndTimestamp == nil } -func liveMigrationFailed(migrationState *virtv2.VirtualMachineMigrationState) bool { - return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == virtv2.MigrationResultFailed +func liveMigrationFailed(migrationState *v1alpha2.VirtualMachineMigrationState) bool { + return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == v1alpha2.MigrationResultFailed } -func liveMigrationSucceeded(migrationState *virtv2.VirtualMachineMigrationState) bool { - return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == virtv2.MigrationResultSucceeded +func liveMigrationSucceeded(migrationState *v1alpha2.VirtualMachineMigrationState) bool { + return migrationState != nil && migrationState.EndTimestamp != nil && migrationState.Result == v1alpha2.MigrationResultSucceeded } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go index f3951a4258..e412ad68c2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/migrating_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" vmservice "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -47,7 +47,7 @@ var _ = Describe("MigratingHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -57,7 +57,7 @@ var _ = Describe("MigratingHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } @@ -67,12 +67,12 @@ var _ = Describe("MigratingHandler", func() { return kvvmi } - newVMOP := func(phase virtv2.VMOPPhase, reason string, isSignalSent bool) *virtv2.VirtualMachineOperation { + newVMOP := func(phase v1alpha2.VMOPPhase, reason string, isSignalSent bool) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.New( vmopbuilder.WithGenerateName("test-vmop-"), vmopbuilder.WithNamespace(namespace), vmopbuilder.WithVirtualMachine(name), - vmopbuilder.WithType(virtv2.VMOPTypeMigrate), + vmopbuilder.WithType(v1alpha2.VMOPTypeMigrate), ) vmop.Status.Phase = phase vmop.Status.Conditions = []metav1.Condition{ @@ -110,7 +110,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -132,7 +132,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -155,7 +155,7 @@ var _ = Describe("MigratingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvmi) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -167,12 +167,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with pending reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationPending.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationPending.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -186,12 +186,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with target ready reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationTargetReady.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationTargetReady.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -205,12 +205,12 @@ var _ = Describe("MigratingHandler", func() { It("Should set condition when vmop is in progress with running reason", func() { vm := newVM() kvvmi := newKVVMI(nil) - vmop := newVMOP(virtv2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationRunning.String(), true) + vmop := newVMOP(v1alpha2.VMOPPhaseInProgress, vmopcondition.ReasonMigrationRunning.String(), true) fakeClient, resource, vmState = setupEnvironment(vm, kvvmi, vmop) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/mock.go b/images/virtualization-artifact/pkg/controller/vm/internal/mock.go index 2a3a5080f4..47bc1a209e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/mock.go @@ -5,7 +5,7 @@ package internal import ( "context" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "k8s.io/apimachinery/pkg/runtime" "sync" ) @@ -246,7 +246,7 @@ var _ BlockDeviceService = &BlockDeviceServiceMock{} // // // make and configure a mocked BlockDeviceService // mockedBlockDeviceService := &BlockDeviceServiceMock{ -// CountBlockDevicesAttachedToVMFunc: func(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) { +// CountBlockDevicesAttachedToVMFunc: func(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) { // panic("mock out the CountBlockDevicesAttachedToVM method") // }, // } @@ -257,7 +257,7 @@ var _ BlockDeviceService = &BlockDeviceServiceMock{} // } type BlockDeviceServiceMock struct { // CountBlockDevicesAttachedToVMFunc mocks the CountBlockDevicesAttachedToVM method. - CountBlockDevicesAttachedToVMFunc func(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) + CountBlockDevicesAttachedToVMFunc func(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) // calls tracks calls to the methods. calls struct { @@ -266,20 +266,20 @@ type BlockDeviceServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } } lockCountBlockDevicesAttachedToVM sync.RWMutex } // CountBlockDevicesAttachedToVM calls CountBlockDevicesAttachedToVMFunc. -func (mock *BlockDeviceServiceMock) CountBlockDevicesAttachedToVM(ctx context.Context, vm *virtv2.VirtualMachine) (int, error) { +func (mock *BlockDeviceServiceMock) CountBlockDevicesAttachedToVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (int, error) { if mock.CountBlockDevicesAttachedToVMFunc == nil { panic("BlockDeviceServiceMock.CountBlockDevicesAttachedToVMFunc: method is nil but BlockDeviceService.CountBlockDevicesAttachedToVM was just called") } callInfo := struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ Ctx: ctx, VM: vm, @@ -296,11 +296,11 @@ func (mock *BlockDeviceServiceMock) CountBlockDevicesAttachedToVM(ctx context.Co // len(mockedBlockDeviceService.CountBlockDevicesAttachedToVMCalls()) func (mock *BlockDeviceServiceMock) CountBlockDevicesAttachedToVMCalls() []struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockCountBlockDevicesAttachedToVM.RLock() calls = mock.calls.CountBlockDevicesAttachedToVM diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/network.go b/images/virtualization-artifact/pkg/controller/vm/internal/network.go index e9e74ee5b2..e50ff5b0b0 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/network.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/network.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/featuregates" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -65,7 +65,11 @@ func (h *NetworkInterfaceHandler) Handle(ctx context.Context, s state.VirtualMac Generation(vm.GetGeneration()) defer func() { - conditions.SetCondition(cb, &vm.Status.Conditions) + if cb.Condition().Status == metav1.ConditionUnknown { + conditions.RemoveCondition(vmcondition.TypeNetworkReady, &vm.Status.Conditions) + } else { + conditions.SetCondition(cb, &vm.Status.Conditions) + } }() if len(vm.Spec.Networks) > 1 { @@ -98,10 +102,10 @@ func (h *NetworkInterfaceHandler) Name() string { return nameNetworkHandler } -func (h *NetworkInterfaceHandler) UpdateNetworkStatus(ctx context.Context, s state.VirtualMachineState, vm *virtv2.VirtualMachine) (reconcile.Result, error) { +func (h *NetworkInterfaceHandler) UpdateNetworkStatus(ctx context.Context, s state.VirtualMachineState, vm *v1alpha2.VirtualMachine) (reconcile.Result, error) { // check that vmmacName is not removed when deleting a network interface from the spec, as it is still in use if len(vm.Status.Networks) > len(vm.Spec.Networks) { - if vm.Status.Phase != virtv2.MachinePending && vm.Status.Phase != virtv2.MachineStopped { + if vm.Status.Phase != v1alpha2.MachinePending && vm.Status.Phase != v1alpha2.MachineStopped { return reconcile.Result{}, nil } } @@ -130,15 +134,15 @@ func (h *NetworkInterfaceHandler) UpdateNetworkStatus(ctx context.Context, s sta } } - networksStatus := []virtv2.NetworksStatus{ + networksStatus := []v1alpha2.NetworksStatus{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, Name: "default", }, } for _, interfaceSpec := range network.CreateNetworkSpec(vm, vmmacs) { - networksStatus = append(networksStatus, virtv2.NetworksStatus{ + networksStatus = append(networksStatus, v1alpha2.NetworksStatus{ Type: interfaceSpec.Type, Name: interfaceSpec.Name, MAC: macAddressesByInterfaceName[interfaceSpec.InterfaceName], diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go index 4f37be4ade..0ce49c45f7 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/network_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/featuregates" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -49,9 +49,9 @@ var _ = Describe("NetworkInterfaceHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine vmPod *corev1.Pod ) @@ -67,14 +67,14 @@ var _ = Describe("NetworkInterfaceHandler", func() { Spec: corev1.PodSpec{}, } - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, UID: "test-uid", }, - Spec: virtv2.VirtualMachineSpec{}, - Status: virtv2.VirtualMachineStatus{}, + Spec: v1alpha2.VirtualMachineSpec{}, + Status: v1alpha2.VirtualMachineStatus{}, } }) @@ -86,8 +86,8 @@ var _ = Describe("NetworkInterfaceHandler", func() { vmPod = nil }) - newMACAddress := func(name, address string, phase virtv2.VirtualMachineMACAddressPhase, attachedVM string) *virtv2.VirtualMachineMACAddress { - mac := &virtv2.VirtualMachineMACAddress{ + newMACAddress := func(name, address string, phase v1alpha2.VirtualMachineMACAddressPhase, attachedVM string) *v1alpha2.VirtualMachineMACAddress { + mac := &v1alpha2.VirtualMachineMACAddress{ TypeMeta: metav1.TypeMeta{ Kind: "VirtualMachineMACAddress", APIVersion: "virtualization.deckhouse.io/v1alpha2", @@ -99,7 +99,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { annotations.LabelVirtualMachineUID: string(vm.UID), }, }, - Status: virtv2.VirtualMachineMACAddressStatus{ + Status: v1alpha2.VirtualMachineMACAddressStatus{ Address: address, }, } @@ -130,50 +130,46 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) - cond, exists := conditions.GetCondition(vmcondition.TypeNetworkReady, newVM.Status.Conditions) - Expect(exists).To(BeTrue()) - Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) - Expect(cond.Reason).To(Equal(conditions.ReasonUnknown.String())) + _, exists := conditions.GetCondition(vmcondition.TypeNetworkReady, newVM.Status.Conditions) + Expect(exists).To(BeFalse()) Expect(newVM.Status.Networks).NotTo(BeNil()) }) }) Describe("NetworkSpec have only 'Main' interface", func() { - It("Network status is not exist; Condition should have status 'False'", func() { - networkSpec := []virtv2.NetworksSpec{ + It("Condition should have status 'Unknown'", func() { + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, } vm.Spec.Networks = networkSpec fakeClient, resource, vmState = setupEnvironment(vm, vmPod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) - cond, exists := conditions.GetCondition(vmcondition.TypeNetworkReady, newVM.Status.Conditions) - Expect(exists).To(BeTrue()) - Expect(cond.Status).To(Equal(metav1.ConditionUnknown)) - Expect(cond.Reason).To(Equal(conditions.ReasonUnknown.String())) + _, exists := conditions.GetCondition(vmcondition.TypeNetworkReady, newVM.Status.Conditions) + Expect(exists).To(BeFalse()) Expect(newVM.Status.Networks).NotTo(BeNil()) }) }) Describe("NetworkSpec have many interfaces", func() { It("Network status is not exist; Condition should have status 'False'", func() { - mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", virtv2.VirtualMachineMACAddressPhaseAttached, name) - networkSpec := []virtv2.NetworksSpec{ + mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -181,7 +177,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod, mac1) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -193,13 +189,13 @@ var _ = Describe("NetworkInterfaceHandler", func() { }) It("Network status is exist; Condition should have status 'True'", func() { - mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", virtv2.VirtualMachineMACAddressPhaseAttached, name) - networkSpec := []virtv2.NetworksSpec{ + mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -232,7 +228,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod, mac1) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -244,13 +240,13 @@ var _ = Describe("NetworkInterfaceHandler", func() { }) It("Network status is exist; Condition should have status 'False'", func() { - mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", virtv2.VirtualMachineMACAddressPhaseAttached, name) - networkSpec := []virtv2.NetworksSpec{ + mac1 := newMACAddress("test-mac-address1", "aa:bb:cc:dd:ee:ff", v1alpha2.VirtualMachineMACAddressPhaseAttached, name) + networkSpec := []v1alpha2.NetworksSpec{ { - Type: virtv2.NetworksTypeMain, + Type: v1alpha2.NetworksTypeMain, }, { - Type: virtv2.NetworksTypeNetwork, + Type: v1alpha2.NetworksTypeNetwork, Name: "test-network", }, } @@ -284,7 +280,7 @@ var _ = Describe("NetworkInterfaceHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmPod, mac1) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/pod.go b/images/virtualization-artifact/pkg/controller/vm/internal/pod.go index 21623ce7b8..1edeb96d9b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/pod.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/pod.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const namePodHandler = "PodHandler" @@ -33,7 +33,7 @@ const namePodHandler = "PodHandler" func NewPodHandler(client client.Client) *PodHandler { return &PodHandler{ client: client, - protection: service.NewProtectionService(client, virtv2.FinalizerPodProtection), + protection: service.NewProtectionService(client, v1alpha2.FinalizerPodProtection), } } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go b/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go index 045aa50ad1..b3511b33a2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/provisioning.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -72,7 +72,7 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine } p := current.Spec.Provisioning switch p.Type { - case virtv2.ProvisioningTypeUserData: + case v1alpha2.ProvisioningTypeUserData: if p.UserData != "" { cb.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonProvisioningReady) } else { @@ -80,11 +80,11 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine Reason(vmcondition.ReasonProvisioningNotReady). Message("Provisioning is defined but it is empty.") } - case virtv2.ProvisioningTypeUserDataRef: - if p.UserDataRef == nil || p.UserDataRef.Kind != virtv2.UserDataRefKindSecret { + case v1alpha2.ProvisioningTypeUserDataRef: + if p.UserDataRef == nil || p.UserDataRef.Kind != v1alpha2.UserDataRefKindSecret { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonProvisioningNotReady). - Message(fmt.Sprintf("userdataRef must be %q", virtv2.UserDataRefKindSecret)) + Message(fmt.Sprintf("userdataRef must be %q", v1alpha2.UserDataRefKindSecret)) } key := types.NamespacedName{Name: p.UserDataRef.Name, Namespace: current.GetNamespace()} err := h.genConditionFromSecret(ctx, cb, key) @@ -92,11 +92,11 @@ func (h *ProvisioningHandler) Handle(ctx context.Context, s state.VirtualMachine return reconcile.Result{}, err } - case virtv2.ProvisioningTypeSysprepRef: - if p.SysprepRef == nil || p.SysprepRef.Kind != virtv2.SysprepRefKindSecret { + case v1alpha2.ProvisioningTypeSysprepRef: + if p.SysprepRef == nil || p.SysprepRef.Kind != v1alpha2.SysprepRefKindSecret { cb.Status(metav1.ConditionFalse). Reason(vmcondition.ReasonProvisioningNotReady). - Message(fmt.Sprintf("sysprepRef must be %q", virtv2.SysprepRefKindSecret)) + Message(fmt.Sprintf("sysprepRef must be %q", v1alpha2.SysprepRefKindSecret)) } key := types.NamespacedName{Name: p.SysprepRef.Name, Namespace: current.GetNamespace()} err := h.genConditionFromSecret(ctx, cb, key) @@ -187,9 +187,9 @@ func (v provisioningValidator) Validate(ctx context.Context, key types.Namespace return err } switch secret.Type { - case virtv2.SecretTypeCloudInit: + case v1alpha2.SecretTypeCloudInit: return v.validateCloudInitSecret(secret) - case virtv2.SecretTypeSysprep: + case v1alpha2.SecretTypeSysprep: return v.validateSysprepSecret(secret) default: return unexpectedSecretTypeError(secret.Type) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go index 559233f7d5..bba2336fbc 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go @@ -17,9 +17,11 @@ limitations under the License. package service import ( + "cmp" "context" "fmt" "log/slog" + "slices" "time" corev1 "k8s.io/api/core/v1" @@ -35,12 +37,14 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/patch" commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" commonvm "github.com/deckhouse/virtualization-controller/pkg/common/vm" + commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) type MigrationVolumesService struct { @@ -79,6 +83,23 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. return reconcile.Result{}, nil } + if migrating.Reason == vmcondition.ReasonReadyToMigrate.String() { + return reconcile.Result{}, nil + } + + vmop, err := s.getVMOPCandidate(ctx, vmState) + if err != nil { + return reconcile.Result{}, err + } + + if vmop != nil { + completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) + switch completed.Reason { + case vmopcondition.ReasonMigrationPrepareTarget.String(), vmopcondition.ReasonMigrationTargetReady.String(), vmopcondition.ReasonMigrationRunning.String(): + return reconcile.Result{}, nil + } + } + kvvmInCluster, builtKVVM, builtKVVMWithMigrationVolumes, kvvmiInCluster, err := s.getMachines(ctx, vmState) if err != nil { return reconcile.Result{}, err @@ -89,7 +110,13 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. return reconcile.Result{}, nil } - kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) + // The pull policy for container disks are only set on the VMI spec and not on the VM spec. + // In order to correctly compare the volumes set, we need to set the pull policy on the VM spec as well. + kvvmInClusterCopy := kvvmInCluster.DeepCopy() + s.fillContainerDiskImagePullPolicies(kvvmInClusterCopy, kvvmiInCluster) + s.fillContainerDiskImagePullPolicies(builtKVVM, kvvmiInCluster) + + kvvmiSynced := equality.Semantic.DeepEqual(kvvmInClusterCopy.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) if !kvvmiSynced { // kubevirt does not sync volumes with kvvmi yet log.Info("kvvmi volumes are not synced yet, skip volume migration.") @@ -101,33 +128,37 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. return reconcile.Result{}, err } - // Check disks in generated KVVM before running kvvmSynced check: detect non-migratable disks and disks with changed storage class. - if !s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) { - log.Info("ReadWriteOnce disks are not synced yet, skip volume migration.") - return reconcile.Result{}, nil - } - if !s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) { - log.Info("Storage class changed disks are not synced yet, skip volume migration.") - return reconcile.Result{}, nil - } + readWriteOnceDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, readWriteOnceDisks) + storageClassChangedDisksSynced := s.areDisksSynced(builtKVVMWithMigrationVolumes, storageClassChangedDisks) kvvmSynced := equality.Semantic.DeepEqual(builtKVVMWithMigrationVolumes.Spec.Template.Spec.Volumes, kvvmInCluster.Spec.Template.Spec.Volumes) if kvvmSynced { + if vmop != nil && !(readWriteOnceDisksSynced && storageClassChangedDisksSynced) { + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } // we already synced our vm with kvvm log.Info("kvvm volumes are already synced, skip volume migration.") return reconcile.Result{}, nil } + if !equality.Semantic.DeepEqual(builtKVVM.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) { + return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) + } + migrationRequested := builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy != nil && *builtKVVMWithMigrationVolumes.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration - migrationInProgress := len(kvvmiInCluster.Status.MigratedVolumes) > 0 - if !migrationRequested && !migrationInProgress { - log.Info("Migration is not requested and not in progress, skip volume migration.") - return reconcile.Result{}, nil + // Check disks in generated KVVM before running kvvmSynced check: detect non-migratable disks and disks with changed storage class. + if !readWriteOnceDisksSynced { + log.Info("ReadWriteOnce disks are not synced yet, skip volume migration.") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + if !storageClassChangedDisksSynced { + log.Info("Storage class changed disks are not synced yet, skip volume migration.") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - if migrationRequested && !migrationInProgress { - // We should wait 10 seconds. This delay allows user to change storage class on other volumes + if migrationRequested { + // We should wait delayDuration seconds. This delay allows user to change storage class on other volumes if len(storageClassChangedDisks) > 0 { delay, exists := s.delay[vm.UID] if !exists { @@ -167,6 +198,24 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. // migration in progress // if some volumes is different, we should revert all and sync again in next reconcile + if s.shouldRevert(kvvmiInCluster, readWriteOnceDisks, storageClassChangedDisks) { + return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) + } + + return reconcile.Result{}, nil +} + +func getVolumesByName(vmiSpec *virtv1.VirtualMachineInstanceSpec) map[string]*virtv1.Volume { + volumes := map[string]*virtv1.Volume{} + for _, vol := range vmiSpec.Volumes { + volumes[vol.Name] = vol.DeepCopy() + } + return volumes +} + +// if any volume in kvvmi is not exists in readWriteOnceDisks or storageClassChangedDisks, +// this indicates that +func (s MigrationVolumesService) shouldRevert(kvvmi *virtv1.VirtualMachineInstance, readWriteOnceDisks, storageClassChangedDisks map[string]*v1alpha2.VirtualDisk) bool { migratedPVCNames := make(map[string]struct{}) for _, vd := range readWriteOnceDisks { @@ -176,21 +225,14 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state. migratedPVCNames[vd.Status.MigrationState.TargetPVC] = struct{}{} } - shouldRevert := false - for _, v := range kvvmiInCluster.Status.MigratedVolumes { - if v.DestinationPVCInfo != nil { - if _, ok := migratedPVCNames[v.DestinationPVCInfo.ClaimName]; !ok { - shouldRevert = true - break + for _, v := range kvvmi.Spec.Volumes { + if v.PersistentVolumeClaim != nil { + if _, ok := migratedPVCNames[v.PersistentVolumeClaim.ClaimName]; !ok { + return true } } } - - if shouldRevert { - return reconcile.Result{}, s.patchVolumes(ctx, builtKVVM) - } - - return reconcile.Result{}, nil + return false } func (s MigrationVolumesService) patchVolumes(ctx context.Context, kvvm *virtv1.VirtualMachine) error { @@ -202,7 +244,7 @@ func (s MigrationVolumesService) patchVolumes(ctx context.Context, kvvm *virtv1. return err } - logger.FromContext(ctx).Debug("Patch kvvm with migration volumes.", slog.String("patch", string(patchBytes))) + logger.FromContext(ctx).Info("The volume migration is detected: patch volumes", slog.String("patch", string(patchBytes))) err = s.client.Patch(ctx, kvvm, client.RawPatch(types.JSONPatchType, patchBytes)) return err @@ -233,10 +275,15 @@ func (s MigrationVolumesService) VolumesSynced(ctx context.Context, vmState stat return false, nil } - kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) + // The pull policy for container disks are only set on the VMI spec and not on the VM spec. + // In order to correctly compare the volumes set, we need to set the pull policy on the VM spec as well. + kvvmInClusterCopy := kvvmInCluster.DeepCopy() + s.fillContainerDiskImagePullPolicies(kvvmInClusterCopy, kvvmiInCluster) + + kvvmiSynced := equality.Semantic.DeepEqual(kvvmInClusterCopy.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes) if !kvvmiSynced { log.Info("kvvmi volumes are not synced yet") - log.Debug("", slog.Any("kvvmi", kvvmInCluster.Spec.Template.Spec.Volumes), slog.Any("kvvmi", kvvmiInCluster.Spec.Volumes)) + log.Debug("", slog.Any("kvvmi", kvvmInClusterCopy.Spec.Template.Spec.Volumes), slog.Any("kvvmi", kvvmiInCluster.Spec.Volumes)) return false, nil } @@ -412,21 +459,42 @@ func (s MigrationVolumesService) getReadyTargetPVCs(ctx context.Context, disks m return targetPVCs, nil } +func (s MigrationVolumesService) fillContainerDiskImagePullPolicies(kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { + volsVMI := getVolumesByName(&kvvmi.Spec) + for i, volume := range kvvm.Spec.Template.Spec.Volumes { + if volume.ContainerDisk == nil { + continue + } + vmiVol, ok := volsVMI[volume.Name] + if !ok { + continue + } + if vmiVol.ContainerDisk == nil { + continue + } + kvvm.Spec.Template.Spec.Volumes[i].ContainerDisk.ImagePullPolicy = vmiVol.ContainerDisk.ImagePullPolicy + } +} + func (s MigrationVolumesService) makeKVVMFromVirtualMachineSpec(ctx context.Context, vmState state.VirtualMachineState) (*virtv1.VirtualMachine, *virtv1.VirtualMachine, error) { kvvm, err := s.makeKVVMFromSpec(ctx, vmState) if err != nil { return nil, nil, err } + kvvmBuilder := kvbuilder.NewKVVM(kvvm.DeepCopy(), kvbuilder.DefaultOptions(vmState.VirtualMachine().Current())) vdByName, err := vmState.VirtualDisksByName(ctx) if err != nil { return nil, nil, err } + err = kvbuilder.ApplyMigrationVolumes(kvvmBuilder, vmState.VirtualMachine().Changed(), vdByName) if err != nil { return nil, nil, err } + kvvmWithMigrationVolumes := kvvmBuilder.GetResource() + return kvvm, kvvmWithMigrationVolumes, nil } @@ -452,3 +520,30 @@ func (s MigrationVolumesService) areDisksSynced(kvvm *virtv1.VirtualMachine, dis return true } + +func (s MigrationVolumesService) getVMOPCandidate(ctx context.Context, vmState state.VirtualMachineState) (*v1alpha2.VirtualMachineOperation, error) { + vmops, err := vmState.VMOPs(ctx) + if err != nil { + return nil, err + } + + if len(vmops) == 0 { + return nil, nil + } + + slices.SortFunc(vmops, func(a, b *v1alpha2.VirtualMachineOperation) int { + return cmp.Compare(a.GetCreationTimestamp().UnixNano(), b.GetCreationTimestamp().UnixNano()) + }) + + vmops = slices.DeleteFunc(vmops, func(vmop *v1alpha2.VirtualMachineOperation) bool { + return !commonvmop.IsMigration(vmop) || commonvmop.IsFinished(vmop) + }) + + for _, vmop := range vmops { + if commonvmop.IsInProgressOrPending(vmop) { + return vmop, nil + } + } + + return nil, nil +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go index 1310a511a7..bdac2b3c8b 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/size_policy_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -43,7 +43,7 @@ var _ = Describe("SizePolicyHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -53,7 +53,7 @@ var _ = Describe("SizePolicyHandler", func() { vmState = nil }) - newVM := func(vmClassName string) *virtv2.VirtualMachine { + newVM := func(vmClassName string) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) if vmClassName != "" { vm.Spec.VirtualMachineClassName = vmClassName @@ -76,7 +76,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) cond, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) @@ -93,7 +93,7 @@ var _ = Describe("SizePolicyHandler", func() { }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: vmClassName, }, @@ -101,7 +101,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) _, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) @@ -111,7 +111,7 @@ var _ = Describe("SizePolicyHandler", func() { It("Should not add condition if it was absent and size policy matches", func() { vm := newVM(vmClassName) - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: vmClassName, }, @@ -119,7 +119,7 @@ var _ = Describe("SizePolicyHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) _, exists := conditions.GetCondition(vmcondition.TypeSizingPolicyMatched, newVM.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go index 2ad42719c7..1f6bb25f7a 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -50,7 +50,7 @@ func (h *SnapshottingHandler) Handle(ctx context.Context, s state.VirtualMachine return reconcile.Result{}, nil } - var vmSnapshots virtv2.VirtualMachineSnapshotList + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := h.client.List(ctx, &vmSnapshots, client.InNamespace(vm.Namespace)) if err != nil { return reconcile.Result{}, err @@ -74,12 +74,12 @@ func (h *SnapshottingHandler) Handle(ctx context.Context, s state.VirtualMachine } switch vmSnapshot.Status.Phase { - case virtv2.VirtualMachineSnapshotPhasePending: + case v1alpha2.VirtualMachineSnapshotPhasePending: cb.Status(metav1.ConditionTrue). Message("The virtual machine is selected for taking a snapshot."). Reason(vmcondition.WaitingForTheSnapshotToStart) continue - case virtv2.VirtualMachineSnapshotPhaseInProgress: + case v1alpha2.VirtualMachineSnapshotPhaseInProgress: cb.Status(metav1.ConditionTrue). Message("The virtual machine is in the process of snapshotting."). Reason(vmcondition.ReasonSnapshottingInProgress) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go index a0c78e637c..7bdf3d33b8 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/snapshotting_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -42,7 +42,7 @@ var _ = Describe("SnapshottingHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -52,20 +52,20 @@ var _ = Describe("SnapshottingHandler", func() { vmState = nil }) - newVM := func() *virtv2.VirtualMachine { + newVM := func() *v1alpha2.VirtualMachine { return vmbuilder.NewEmpty(name, namespace) } - newVMSnapshot := func(vmName string, phase virtv2.VirtualMachineSnapshotPhase) *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{ + newVMSnapshot := func(vmName string, phase v1alpha2.VirtualMachineSnapshotPhase) *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: vmName + "-snapshot", Namespace: namespace, }, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vmName, }, - Status: virtv2.VirtualMachineSnapshotStatus{ + Status: v1alpha2.VirtualMachineSnapshotStatus{ Phase: phase, }, } @@ -82,12 +82,12 @@ var _ = Describe("SnapshottingHandler", func() { Describe("Condition presence and absence scenarios", func() { It("Should add condition if snapshot is in progress", func() { vm := newVM() - snapshot := newVMSnapshot(vm.Name, virtv2.VirtualMachineSnapshotPhaseInProgress) + snapshot := newVMSnapshot(vm.Name, v1alpha2.VirtualMachineSnapshotPhaseInProgress) fakeClient, resource, vmState = setupEnvironment(vm, snapshot) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -98,11 +98,11 @@ var _ = Describe("SnapshottingHandler", func() { It("Should not add condition if snapshot is ready", func() { vm := newVM() - snapshot := newVMSnapshot(vm.Name, virtv2.VirtualMachineSnapshotPhaseReady) + snapshot := newVMSnapshot(vm.Name, v1alpha2.VirtualMachineSnapshotPhaseReady) fakeClient, resource, vmState = setupEnvironment(vm, snapshot) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -115,7 +115,7 @@ var _ = Describe("SnapshottingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -134,7 +134,7 @@ var _ = Describe("SnapshottingHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go index 6338d2e4eb..6d24d8725c 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/state/state.go @@ -19,7 +19,6 @@ package state import ( "context" "fmt" - "sync" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -33,59 +32,49 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineState interface { - VirtualMachine() *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + VirtualMachine() *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] KVVM(ctx context.Context) (*virtv1.VirtualMachine, error) KVVMI(ctx context.Context) (*virtv1.VirtualMachineInstance, error) Pods(ctx context.Context) (*corev1.PodList, error) Pod(ctx context.Context) (*corev1.Pod, error) - VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) - VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) - ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) - VirtualDisksByName(ctx context.Context) (map[string]*virtv2.VirtualDisk, error) - VirtualImagesByName(ctx context.Context) (map[string]*virtv2.VirtualImage, error) - ClusterVirtualImagesByName(ctx context.Context) (map[string]*virtv2.ClusterVirtualImage, error) - VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment, error) - IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, error) - VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.VirtualMachineMACAddress, error) - Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) - VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) + VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) + VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) + ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) + VirtualDisksByName(ctx context.Context) (map[string]*v1alpha2.VirtualDisk, error) + VirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.VirtualImage, error) + ClusterVirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.ClusterVirtualImage, error) + VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) + IPAddress(ctx context.Context) (*v1alpha2.VirtualMachineIPAddress, error) + VirtualMachineMACAddresses(ctx context.Context) ([]*v1alpha2.VirtualMachineMACAddress, error) + Class(ctx context.Context) (*v1alpha2.VirtualMachineClass, error) + VMOPs(ctx context.Context) ([]*v1alpha2.VirtualMachineOperation, error) Shared(fn func(s *Shared)) - ReadWriteOnceVirtualDisks(ctx context.Context) ([]*virtv2.VirtualDisk, error) + ReadWriteOnceVirtualDisks(ctx context.Context) ([]*v1alpha2.VirtualDisk, error) } -func New(c client.Client, vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus]) VirtualMachineState { +func New(c client.Client, vm *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus]) VirtualMachineState { return &state{client: c, vm: vm} } -type state struct { - client client.Client - mu sync.RWMutex - vm *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] - pods *corev1.PodList - pod *corev1.Pod - vdByName map[string]*virtv2.VirtualDisk - viByName map[string]*virtv2.VirtualImage - cviByName map[string]*virtv2.ClusterVirtualImage - vmbdasByRef map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment - ipAddress *virtv2.VirtualMachineIPAddress - vmmacs []*virtv2.VirtualMachineMACAddress - vmClass *virtv2.VirtualMachineClass - shared Shared -} - type Shared struct { ShutdownInfo powerstate.ShutdownInfo } +type state struct { + client client.Client + vm *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] + shared Shared +} + func (s *state) Shared(fn func(s *Shared)) { fn(&s.shared) } -func (s *state) VirtualMachine() *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] { +func (s *state) VirtualMachine() *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] { return s.vm } @@ -106,15 +95,6 @@ func (s *state) KVVMI(ctx context.Context) (*virtv1.VirtualMachineInstance, erro } func (s *state) Pods(ctx context.Context) (*corev1.PodList, error) { - if s.vm == nil { - return nil, nil - } - if s.pods != nil { - return s.pods, nil - } - s.mu.Lock() - defer s.mu.Unlock() - podList := corev1.PodList{} err := s.client.List(ctx, &podList, &client.ListOptions{ Namespace: s.vm.Current().GetNamespace(), @@ -123,17 +103,10 @@ func (s *state) Pods(ctx context.Context) (*corev1.PodList, error) { if err != nil && !k8serrors.IsNotFound(err) { return nil, fmt.Errorf("unable to list virt-launcher Pod for KubeVirt VM %q: %w", s.vm.Current().GetName(), err) } - s.pods = &podList - return s.pods, nil + return &podList, nil } func (s *state) Pod(ctx context.Context) (*corev1.Pod, error) { - if s.vm == nil { - return nil, nil - } - if s.pod != nil { - return s.pod, nil - } pods, err := s.Pods(ctx) if err != nil { return nil, fmt.Errorf("failed to fetch pod for VirtualMachine %q: %w", s.vm.Current().GetName(), err) @@ -146,23 +119,11 @@ func (s *state) Pod(ctx context.Context) (*corev1.Pod, error) { if len(pods.Items) > 0 { pod = kvvmutil.GetVMPod(kvvmi, pods) } - s.mu.Lock() - defer s.mu.Unlock() - s.pod = pod return pod, nil } -func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment, error) { - if s.vm == nil { - return nil, nil - } - if len(s.vmbdasByRef) > 0 { - return s.vmbdasByRef, nil - } - s.mu.Lock() - defer s.mu.Unlock() - - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := s.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: s.vm.Name().Namespace, }) @@ -170,13 +131,13 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v return nil, err } - vmbdasByRef := make(map[virtv2.VMBDAObjectRef][]*virtv2.VirtualMachineBlockDeviceAttachment) + vmbdasByRef := make(map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment) for _, vmbda := range vmbdas.Items { if vmbda.Spec.VirtualMachineName != s.vm.Name().Name { continue } - key := virtv2.VMBDAObjectRef{ + key := v1alpha2.VMBDAObjectRef{ Kind: vmbda.Spec.BlockDeviceRef.Kind, Name: vmbda.Spec.BlockDeviceRef.Name, } @@ -184,47 +145,38 @@ func (s *state) VirtualMachineBlockDeviceAttachments(ctx context.Context) (map[v vmbdasByRef[key] = append(vmbdasByRef[key], &vmbda) } - s.vmbdasByRef = vmbdasByRef return vmbdasByRef, nil } -func (s *state) VirtualDisk(ctx context.Context, name string) (*virtv2.VirtualDisk, error) { +func (s *state) VirtualDisk(ctx context.Context, name string) (*v1alpha2.VirtualDisk, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualDisk{}) + }, s.client, &v1alpha2.VirtualDisk{}) } -func (s *state) VirtualImage(ctx context.Context, name string) (*virtv2.VirtualImage, error) { +func (s *state) VirtualImage(ctx context.Context, name string) (*v1alpha2.VirtualImage, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualImage{}) + }, s.client, &v1alpha2.VirtualImage{}) } -func (s *state) ClusterVirtualImage(ctx context.Context, name string) (*virtv2.ClusterVirtualImage, error) { +func (s *state) ClusterVirtualImage(ctx context.Context, name string) (*v1alpha2.ClusterVirtualImage, error) { return object.FetchObject(ctx, types.NamespacedName{ Name: name, - }, s.client, &virtv2.ClusterVirtualImage{}) + }, s.client, &v1alpha2.ClusterVirtualImage{}) } -func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.VirtualDisk, error) { - if s.vm == nil { - return nil, nil - } - if len(s.vdByName) > 0 { - return s.vdByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() - vdByName := make(map[string]*virtv2.VirtualDisk) +func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*v1alpha2.VirtualDisk, error) { + vdByName := make(map[string]*v1alpha2.VirtualDisk) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.DiskDevice: + case v1alpha2.DiskDevice: vd, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualDisk{}) + }, s.client, &v1alpha2.VirtualDisk{}) if err != nil { return nil, fmt.Errorf("unable to get virtual disk %q: %w", bd.Name, err) } @@ -236,27 +188,18 @@ func (s *state) VirtualDisksByName(ctx context.Context) (map[string]*virtv2.Virt continue } } - s.vdByName = vdByName return vdByName, nil } -func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.VirtualImage, error) { - if s.vm == nil { - return nil, nil - } - if len(s.viByName) > 0 { - return s.viByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() - viByName := make(map[string]*virtv2.VirtualImage) +func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.VirtualImage, error) { + viByName := make(map[string]*v1alpha2.VirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ImageDevice: + case v1alpha2.ImageDevice: vi, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.VirtualImage{}) + }, s.client, &v1alpha2.VirtualImage{}) if err != nil { return nil, fmt.Errorf("unable to get VI %q: %w", bd.Name, err) } @@ -268,27 +211,18 @@ func (s *state) VirtualImagesByName(ctx context.Context) (map[string]*virtv2.Vir continue } } - s.viByName = viByName return viByName, nil } -func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*virtv2.ClusterVirtualImage, error) { - if s.vm == nil { - return nil, nil - } - if len(s.cviByName) > 0 { - return s.cviByName, nil - } - s.mu.Lock() - defer s.mu.Unlock() - cviByName := make(map[string]*virtv2.ClusterVirtualImage) +func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*v1alpha2.ClusterVirtualImage, error) { + cviByName := make(map[string]*v1alpha2.ClusterVirtualImage) for _, bd := range s.vm.Current().Spec.BlockDeviceRefs { switch bd.Kind { - case virtv2.ClusterImageDevice: + case v1alpha2.ClusterImageDevice: cvi, err := object.FetchObject(ctx, types.NamespacedName{ Name: bd.Name, Namespace: s.vm.Current().GetNamespace(), - }, s.client, &virtv2.ClusterVirtualImage{}) + }, s.client, &v1alpha2.ClusterVirtualImage{}) if err != nil { return nil, fmt.Errorf("unable to get CVI %q: %w", bd.Name, err) } @@ -300,25 +234,14 @@ func (s *state) ClusterVirtualImagesByName(ctx context.Context) (map[string]*vir continue } } - s.cviByName = cviByName return cviByName, nil } -func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.VirtualMachineMACAddress, error) { - if s.vm == nil { - return nil, nil - } - - if s.vmmacs != nil { - return s.vmmacs, nil - } - s.mu.Lock() - defer s.mu.Unlock() - - var vmmacs []*virtv2.VirtualMachineMACAddress +func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*v1alpha2.VirtualMachineMACAddress, error) { + var vmmacs []*v1alpha2.VirtualMachineMACAddress for _, ns := range s.vm.Current().Spec.Networks { vmmacKey := types.NamespacedName{Name: ns.VirtualMachineMACAddressName, Namespace: s.vm.Current().GetNamespace()} - vmmac, err := object.FetchObject(ctx, vmmacKey, s.client, &virtv2.VirtualMachineMACAddress{}) + vmmac, err := object.FetchObject(ctx, vmmacKey, s.client, &v1alpha2.VirtualMachineMACAddress{}) if err != nil { return nil, fmt.Errorf("failed to fetch VirtualMachineMACAddress: %w", err) } @@ -327,7 +250,7 @@ func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.Virtu } } - vmmacList := &virtv2.VirtualMachineMACAddressList{} + vmmacList := &v1alpha2.VirtualMachineMACAddressList{} err := s.client.List(ctx, vmmacList, &client.ListOptions{ Namespace: s.vm.Current().GetNamespace(), LabelSelector: labels.SelectorFromSet(map[string]string{annotations.LabelVirtualMachineUID: string(s.vm.Current().GetUID())}), @@ -340,24 +263,13 @@ func (s *state) VirtualMachineMACAddresses(ctx context.Context) ([]*virtv2.Virtu vmmacs = append(vmmacs, &vmmac) } - s.vmmacs = vmmacs - return s.vmmacs, nil + return vmmacs, nil } -func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, error) { - if s.vm == nil { - return nil, nil - } - - if s.ipAddress != nil { - return s.ipAddress, nil - } - s.mu.Lock() - defer s.mu.Unlock() - +func (s *state) IPAddress(ctx context.Context) (*v1alpha2.VirtualMachineIPAddress, error) { vmipName := s.vm.Current().Spec.VirtualMachineIPAddress if vmipName == "" { - vmipList := &virtv2.VirtualMachineIPAddressList{} + vmipList := &v1alpha2.VirtualMachineIPAddressList{} err := s.client.List(ctx, vmipList, &client.ListOptions{ Namespace: s.vm.Current().GetNamespace(), @@ -372,50 +284,38 @@ func (s *state) IPAddress(ctx context.Context) (*virtv2.VirtualMachineIPAddress, return nil, nil } - s.ipAddress = &vmipList.Items[0] - } else { - vmipKey := types.NamespacedName{Name: vmipName, Namespace: s.vm.Current().GetNamespace()} + return &vmipList.Items[0], nil + } - ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &virtv2.VirtualMachineIPAddress{}) - if err != nil { - return nil, fmt.Errorf("failed to fetch VirtualMachineIPAddress: %w", err) - } - s.ipAddress = ipAddress + vmipKey := types.NamespacedName{Name: vmipName, Namespace: s.vm.Current().GetNamespace()} + + ipAddress, err := object.FetchObject(ctx, vmipKey, s.client, &v1alpha2.VirtualMachineIPAddress{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch VirtualMachineIPAddress: %w", err) } - return s.ipAddress, nil + return ipAddress, nil } -func (s *state) Class(ctx context.Context) (*virtv2.VirtualMachineClass, error) { - if s.vm == nil { - return nil, nil - } - if s.vmClass != nil { - return s.vmClass, nil - } +func (s *state) Class(ctx context.Context) (*v1alpha2.VirtualMachineClass, error) { className := s.vm.Current().Spec.VirtualMachineClassName classKey := types.NamespacedName{Name: className} - class, err := object.FetchObject(ctx, classKey, s.client, &virtv2.VirtualMachineClass{}) + class, err := object.FetchObject(ctx, classKey, s.client, &v1alpha2.VirtualMachineClass{}) if err != nil { return nil, fmt.Errorf("failed to fetch VirtualMachineClass: %w", err) } - s.vmClass = class - return s.vmClass, nil + return class, nil } -func (s *state) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, error) { - if s.vm == nil { - return nil, nil - } - +func (s *state) VMOPs(ctx context.Context) ([]*v1alpha2.VirtualMachineOperation, error) { vm := s.vm.Current() - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := s.client.List(ctx, vmops, client.InNamespace(vm.Namespace)) if err != nil { return nil, fmt.Errorf("failed to list VirtualMachineOperation: %w", err) } - var resultVMOPs []*virtv2.VirtualMachineOperation + var resultVMOPs []*v1alpha2.VirtualMachineOperation for _, vmop := range vmops.Items { if vmop.Spec.VirtualMachine == vm.Name { @@ -426,13 +326,13 @@ func (s *state) VMOPs(ctx context.Context) ([]*virtv2.VirtualMachineOperation, e return resultVMOPs, nil } -func (s *state) ReadWriteOnceVirtualDisks(ctx context.Context) ([]*virtv2.VirtualDisk, error) { +func (s *state) ReadWriteOnceVirtualDisks(ctx context.Context) ([]*v1alpha2.VirtualDisk, error) { vdByName, err := s.VirtualDisksByName(ctx) if err != nil { return nil, err } - var nonMigratableVirtualDisks []*virtv2.VirtualDisk + var nonMigratableVirtualDisks []*v1alpha2.VirtualDisk for _, vd := range vdByName { pvcKey := types.NamespacedName{Name: vd.Status.Target.PersistentVolumeClaim, Namespace: vd.Namespace} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go b/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go index 15d0e293ef..0b0f504e15 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/statistic.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/vm" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const nameStatisticHandler = "StatisticHandler" @@ -77,21 +77,21 @@ func (h *StatisticHandler) Name() string { return nameStatisticHandler } -func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, +func (h *StatisticHandler) syncResources(changed *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, ) { if changed == nil { return } - var resources virtv2.ResourcesStatus + var resources v1alpha2.ResourcesStatus switch pod { case nil: var ( cpuKVVMIRequest resource.Quantity memorySize resource.Quantity cores int - topology virtv2.Topology + topology v1alpha2.Topology coreFraction string ) if kvvmi == nil { @@ -99,7 +99,7 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, cores = changed.Spec.CPU.Cores coreFraction = changed.Spec.CPU.CoreFraction sockets, coresPerSocket := vm.CalculateCoresAndSockets(cores) - topology = virtv2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} + topology = v1alpha2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} } else { cpuKVVMIRequest = kvvmi.Spec.Domain.Resources.Requests[corev1.ResourceCPU] memorySize = kvvmi.Spec.Domain.Resources.Requests[corev1.ResourceMemory] @@ -108,14 +108,14 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, coreFraction = h.getCoreFractionByKVVMI(kvvmi) topology = h.getCurrentTopologyByKVVMI(kvvmi) } - resources = virtv2.ResourcesStatus{ - CPU: virtv2.CPUStatus{ + resources = v1alpha2.ResourcesStatus{ + CPU: v1alpha2.CPUStatus{ Cores: cores, CoreFraction: coreFraction, RequestedCores: cpuKVVMIRequest, Topology: topology, }, - Memory: virtv2.MemoryStatus{ + Memory: v1alpha2.MemoryStatus{ Size: memorySize, }, } @@ -148,15 +148,15 @@ func (h *StatisticHandler) syncResources(changed *virtv2.VirtualMachine, mi := int64(1024 * 1024) memoryOverhead = *resource.NewQuantity(int64(math.Ceil(float64(memoryOverhead.Value())/float64(mi)))*mi, resource.BinarySI) - resources = virtv2.ResourcesStatus{ - CPU: virtv2.CPUStatus{ + resources = v1alpha2.ResourcesStatus{ + CPU: v1alpha2.CPUStatus{ Cores: cores, CoreFraction: coreFraction, RequestedCores: cpuKVVMIRequest, RuntimeOverhead: cpuOverhead, Topology: topology, }, - Memory: virtv2.MemoryStatus{ + Memory: v1alpha2.MemoryStatus{ Size: memoryKVVMIRequest, RuntimeOverhead: memoryOverhead, }, @@ -181,20 +181,20 @@ func (h *StatisticHandler) getCoreFractionByKVVMI(kvvmi *virtv1.VirtualMachineIn return strconv.Itoa(int(cpuKVVMIRequest.MilliValue())*100/(h.getCoresByKVVMI(kvvmi)*1000)) + "%" } -func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachineInstance) virtv2.Topology { +func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachineInstance) v1alpha2.Topology { if kvvmi == nil { - return virtv2.Topology{} + return v1alpha2.Topology{} } if kvvmi.Status.CurrentCPUTopology != nil { - return virtv2.Topology{ + return v1alpha2.Topology{ CoresPerSocket: int(kvvmi.Status.CurrentCPUTopology.Cores), Sockets: int(kvvmi.Status.CurrentCPUTopology.Sockets), } } if kvvmi.Spec.Domain.CPU != nil { - return virtv2.Topology{ + return v1alpha2.Topology{ CoresPerSocket: int(kvvmi.Spec.Domain.CPU.Cores), Sockets: int(kvvmi.Spec.Domain.CPU.Sockets), } @@ -202,10 +202,10 @@ func (h *StatisticHandler) getCurrentTopologyByKVVMI(kvvmi *virtv1.VirtualMachin cores := h.getCoresByKVVMI(kvvmi) sockets, coresPerSocket := vm.CalculateCoresAndSockets(cores) - return virtv2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} + return v1alpha2.Topology{CoresPerSocket: coresPerSocket, Sockets: sockets} } -func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1.Pod, pods *corev1.PodList) { +func (h *StatisticHandler) syncPods(changed *v1alpha2.VirtualMachine, pod *corev1.Pod, pods *corev1.PodList) { if changed == nil { return } @@ -213,13 +213,13 @@ func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1. changed.Status.VirtualMachinePods = nil return } - virtualMachinePods := make([]virtv2.VirtualMachinePod, len(pods.Items)) + virtualMachinePods := make([]v1alpha2.VirtualMachinePod, len(pods.Items)) for i, p := range pods.Items { active := false if pod != nil && p.GetUID() == pod.GetUID() { active = true } - virtualMachinePods[i] = virtv2.VirtualMachinePod{ + virtualMachinePods[i] = v1alpha2.VirtualMachinePod{ Name: p.GetName(), Active: active, } @@ -227,13 +227,13 @@ func (h *StatisticHandler) syncPods(changed *virtv2.VirtualMachine, pod *corev1. changed.Status.VirtualMachinePods = virtualMachinePods } -func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { +func (h *StatisticHandler) syncStats(current, changed *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) { if current == nil || changed == nil { return } phaseChanged := current.Status.Phase != changed.Status.Phase - var stats virtv2.VirtualMachineStats + var stats v1alpha2.VirtualMachineStats if current.Status.Stats != nil { stats = *current.Status.Stats.DeepCopy() @@ -245,11 +245,11 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv launchTimeDuration := stats.LaunchTimeDuration switch changed.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStopped: launchTimeDuration.WaitingForDependencies = nil launchTimeDuration.VirtualMachineStarting = nil launchTimeDuration.GuestOSAgentStarting = nil - case virtv2.MachineStarting: + case v1alpha2.MachineStarting: launchTimeDuration.VirtualMachineStarting = nil launchTimeDuration.GuestOSAgentStarting = nil @@ -257,13 +257,13 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv for i := len(pts) - 1; i > 0; i-- { pt := pts[i] ptPrev := pts[i-1] - if pt.Phase == virtv2.MachineStarting && ptPrev.Phase == virtv2.MachinePending { + if pt.Phase == v1alpha2.MachineStarting && ptPrev.Phase == v1alpha2.MachinePending { launchTimeDuration.WaitingForDependencies = &metav1.Duration{Duration: pt.Timestamp.Sub(pts[i-1].Timestamp.Time)} break } } } - case virtv2.MachineRunning: + case v1alpha2.MachineRunning: if kvvmi != nil && osInfoIsEmpty(kvvmi.Status.GuestOSInfo) { launchTimeDuration.GuestOSAgentStarting = nil } @@ -272,8 +272,8 @@ func (h *StatisticHandler) syncStats(current, changed *virtv2.VirtualMachine, kv pt := pts[i] ptPrev := pts[i-1] - if pt.Phase == virtv2.MachineRunning { - if phaseChanged && ptPrev.Phase == virtv2.MachineStarting { + if pt.Phase == v1alpha2.MachineRunning { + if phaseChanged && ptPrev.Phase == v1alpha2.MachineStarting { launchTimeDuration.VirtualMachineStarting = &metav1.Duration{Duration: pt.Timestamp.Sub(pts[i-1].Timestamp.Time)} } if kvvmi != nil && osInfoIsEmpty(current.Status.GuestOSInfo) && !osInfoIsEmpty(kvvmi.Status.GuestOSInfo) && !pt.Timestamp.IsZero() { @@ -293,11 +293,11 @@ func osInfoIsEmpty(info virtv1.VirtualMachineInstanceGuestOSInfo) bool { return emptyOSInfo == info } -func NewPhaseTransitions(phaseTransitions []virtv2.VirtualMachinePhaseTransitionTimestamp, oldPhase, newPhase virtv2.MachinePhase) []virtv2.VirtualMachinePhaseTransitionTimestamp { +func NewPhaseTransitions(phaseTransitions []v1alpha2.VirtualMachinePhaseTransitionTimestamp, oldPhase, newPhase v1alpha2.MachinePhase) []v1alpha2.VirtualMachinePhaseTransitionTimestamp { now := metav1.NewTime(time.Now().Truncate(time.Second)) if oldPhase != newPhase { - phaseTransitions = append(phaseTransitions, virtv2.VirtualMachinePhaseTransitionTimestamp{ + phaseTransitions = append(phaseTransitions, v1alpha2.VirtualMachinePhaseTransitionTimestamp{ Phase: newPhase, Timestamp: now, }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go index a68885634a..06a0cb659e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/statistic_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("TestStatisticHandler", func() { @@ -44,15 +44,15 @@ var _ = Describe("TestStatisticHandler", func() { podUID types.UID = "test-pod-uid" ) - newVM := func(cores int, coreFraction *string, memorySize string) *virtv2.VirtualMachine { + newVM := func(cores int, coreFraction *string, memorySize string) *v1alpha2.VirtualMachine { vm := vmbuilder.New( vmbuilder.WithName(vmName), vmbuilder.WithNamespace(vmNamespace), vmbuilder.WithCPU(cores, coreFraction), vmbuilder.WithMemory(resource.MustParse(memorySize)), ) - vm.Status = virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, + vm.Status = v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, } return vm @@ -109,7 +109,7 @@ var _ = Describe("TestStatisticHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.Client - vmResource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + vmResource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState ) @@ -141,11 +141,11 @@ var _ = Describe("TestStatisticHandler", func() { } DescribeTable("Check Generated .status.resources", - func(vm *virtv2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, expect expectedValues) { + func(vm *v1alpha2.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, pod *corev1.Pod, expect expectedValues) { fakeClient, vmResource, vmState = setupEnvironment(vm, kvvmi, pod) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go index c71534d2f4..83abce214a 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/suite_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVirtualMachine(t *testing.T) { @@ -38,7 +38,7 @@ func TestVirtualMachine(t *testing.T) { RunSpecs(t, "VirtualMachine Handlers Suite") } -func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus], state.VirtualMachineState) { +func setupEnvironment(vm *v1alpha2.VirtualMachine, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus], state.VirtualMachineState) { GinkgoHelper() Expect(vm).ToNot(BeNil()) allObjects := []client.Object{vm} @@ -48,10 +48,10 @@ func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) (client. Expect(err).NotTo(HaveOccurred()) resource := reconciler.NewResource(client.ObjectKeyFromObject(vm), fakeClient, - func() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} + func() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} }, - func(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { + func(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status }) err = resource.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go index de3eba69e1..48a294bdcb 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go @@ -41,7 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -89,7 +89,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat defer func() { switch changed.Status.Phase { - case virtv2.MachinePending, virtv2.MachineStarting, virtv2.MachineStopped: + case v1alpha2.MachinePending, v1alpha2.MachineStarting, v1alpha2.MachineStopped: conditions.RemoveCondition(vmcondition.TypeConfigurationApplied, &changed.Status.Conditions) conditions.RemoveCondition(vmcondition.TypeAwaitingRestartToApplyConfiguration, &changed.Status.Conditions) @@ -127,7 +127,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat // 1. Set RestartAwaitingChanges. var ( - lastAppliedSpec *virtv2.VirtualMachineSpec + lastAppliedSpec *v1alpha2.VirtualMachineSpec changes vmchange.SpecChanges allChanges vmchange.SpecChanges classChanged bool @@ -191,13 +191,13 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat // 4. Set ConfigurationApplied condition. switch { case kvvmSyncErr != nil: - h.recorder.Event(current, corev1.EventTypeWarning, virtv2.ReasonErrVmNotSynced, kvvmSyncErr.Error()) + h.recorder.Event(current, corev1.EventTypeWarning, v1alpha2.ReasonErrVmNotSynced, kvvmSyncErr.Error()) cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). Message(service.CapitalizeFirstLetter(kvvmSyncErr.Error()) + ".") case len(changed.Status.RestartAwaitingChanges) > 0: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrRestartAwaitingChanges, "The virtual machine configuration successfully synced") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrRestartAwaitingChanges, "The virtual machine configuration successfully synced") cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). @@ -207,7 +207,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat Reason(vmcondition.ReasonRestartAwaitingChangesExist). Message("Waiting for the user to restart in order to apply the configuration changes.") case classChanged: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrRestartAwaitingChanges, "Restart required to propagate changes from the vmclass spec") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrRestartAwaitingChanges, "Restart required to propagate changes from the vmclass spec") cbConfApplied. Status(metav1.ConditionFalse). Reason(vmcondition.ReasonConfigurationNotApplied). @@ -217,7 +217,7 @@ func (h *SyncKvvmHandler) Handle(ctx context.Context, s state.VirtualMachineStat Reason(vmcondition.ReasonRestartAwaitingVMClassChangesExist). Message("VirtualMachineClass.spec has been modified. Waiting for the user to restart in order to apply the configuration changes.") case synced: - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonErrVmSynced, "The virtual machine configuration successfully synced") + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonErrVmSynced, "The virtual machine configuration successfully synced") cbConfApplied.Status(metav1.ConditionTrue).Reason(vmcondition.ReasonConfigurationApplied) default: log.Error("Unexpected case during kvvm sync, please report a bug") @@ -246,7 +246,7 @@ func (h *SyncKvvmHandler) Name() string { return nameSyncKvvmHandler } -func (h *SyncKvvmHandler) isWaiting(vm *virtv2.VirtualMachine) bool { +func (h *SyncKvvmHandler) isWaiting(vm *v1alpha2.VirtualMachine) bool { return !checkVirtualMachineConfiguration(vm) } @@ -279,6 +279,19 @@ func (h *SyncKvvmHandler) syncKVVM(ctx context.Context, s state.VirtualMachineSt } switch { + // This workaround is required due to a bug in the KVVM workflow. + // When a KVVM is created with conflicting placement rules and cannot be scheduled, + // it remains unschedulable even if these rules are changed or removed. + case h.isVMUnschedulable(s.VirtualMachine().Current(), kvvm) && h.isPlacementPolicyChanged(allChanges): + err := h.updateKVVM(ctx, s) + if err != nil { + return false, fmt.Errorf("failed to update internal virtual machine: %w", err) + } + err = object.DeleteObject(ctx, h.client, pod) + if err != nil { + return false, fmt.Errorf("failed to delete the internal virtual machine instance's pod: %w", err) + } + return true, nil case h.isVMStopped(s.VirtualMachine().Current(), kvvm, pod): // KVVM must be updated when the VM is stopped because all its components, // like VirtualDisk and other resources, @@ -407,8 +420,13 @@ func MakeKVVMFromVMSpec(ctx context.Context, s state.VirtualMachineState) (*virt networkSpec := network.CreateNetworkSpec(current, vmmacs) + vmbdas, err := s.VirtualMachineBlockDeviceAttachments(ctx) + if err != nil { + return nil, fmt.Errorf("get vmbdas: %w", err) + } + // Create kubevirt VirtualMachine resource from d8 VirtualMachine spec. - err = kvbuilder.ApplyVirtualMachineSpec(kvvmBuilder, current, bdState.VDByName, bdState.VIByName, bdState.CVIByName, class, ip.Status.Address, networkSpec) + err = kvbuilder.ApplyVirtualMachineSpec(kvvmBuilder, current, bdState.VDByName, bdState.VIByName, bdState.CVIByName, vmbdas, class, ip.Status.Address, networkSpec) if err != nil { return nil, err } @@ -427,7 +445,7 @@ func MakeKVVMFromVMSpec(ctx context.Context, s state.VirtualMachineState) (*virt return newKVVM, nil } -func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) *virtv2.VirtualMachineSpec { +func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) *v1alpha2.VirtualMachineSpec { if kvvm == nil || vm == nil { return nil } @@ -435,17 +453,17 @@ func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *v lastSpec, err := kvbuilder.LoadLastAppliedSpec(kvvm) // TODO Add smarter handler for empty/invalid annotation. if lastSpec == nil && err == nil { - h.recorder.Event(vm, corev1.EventTypeWarning, virtv2.ReasonVMLastAppliedSpecIsInvalid, "Could not find last applied spec. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.") - lastSpec = &virtv2.VirtualMachineSpec{} + h.recorder.Event(vm, corev1.EventTypeWarning, v1alpha2.ReasonVMLastAppliedSpecIsInvalid, "Could not find last applied spec. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.") + lastSpec = &v1alpha2.VirtualMachineSpec{} } if err != nil { msg := fmt.Sprintf("Could not restore last applied spec: %v. Possible old VM or partial backup restore. Restart or recreate VM to adopt it.", err) - h.recorder.Event(vm, corev1.EventTypeWarning, virtv2.ReasonVMLastAppliedSpecIsInvalid, msg) + h.recorder.Event(vm, corev1.EventTypeWarning, v1alpha2.ReasonVMLastAppliedSpecIsInvalid, msg) // In Automatic mode changes are applied immediately, so last-applied-spec annotation will be restored. - if vmutil.ApprovalMode(vm) == virtv2.Automatic { - lastSpec = &virtv2.VirtualMachineSpec{} + if vmutil.ApprovalMode(vm) == v1alpha2.Automatic { + lastSpec = &v1alpha2.VirtualMachineSpec{} } - if vmutil.ApprovalMode(vm) == virtv2.Manual { + if vmutil.ApprovalMode(vm) == v1alpha2.Manual { // Manual mode requires meaningful content in status.pendingChanges. // There are different paths: // 1. Return err and do nothing, user should restore annotation or recreate VM. @@ -456,14 +474,14 @@ func (h *SyncKvvmHandler) loadLastAppliedSpec(vm *virtv2.VirtualMachine, kvvm *v // // At this time, variant 2 is chosen. // TODO(future): Implement variant 3: restore some fields from KVVM. - lastSpec = &virtv2.VirtualMachineSpec{} + lastSpec = &v1alpha2.VirtualMachineSpec{} } } return lastSpec } -func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineClass, kvvm *virtv1.VirtualMachine) *virtv2.VirtualMachineClassSpec { +func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *v1alpha2.VirtualMachineClass, kvvm *virtv1.VirtualMachine) *v1alpha2.VirtualMachineClassSpec { if kvvm == nil || class == nil { return nil } @@ -471,13 +489,13 @@ func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineC lastSpec, err := kvbuilder.LoadLastAppliedClassSpec(kvvm) // TODO Add smarter handler for empty/invalid annotation. if lastSpec == nil && err == nil { - h.recorder.Event(class, corev1.EventTypeWarning, virtv2.ReasonVMClassLastAppliedSpecInvalid, "Could not find last applied spec. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.") - lastSpec = &virtv2.VirtualMachineClassSpec{} + h.recorder.Event(class, corev1.EventTypeWarning, v1alpha2.ReasonVMClassLastAppliedSpecInvalid, "Could not find last applied spec. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.") + lastSpec = &v1alpha2.VirtualMachineClassSpec{} } if err != nil { msg := fmt.Sprintf("Could not restore last applied spec: %v. Possible old VMClass or partial backup restore. Restart or recreate VM to adopt it.", err) - h.recorder.Event(class, corev1.EventTypeWarning, virtv2.ReasonVMClassLastAppliedSpecInvalid, msg) - lastSpec = &virtv2.VirtualMachineClassSpec{} + h.recorder.Event(class, corev1.EventTypeWarning, v1alpha2.ReasonVMClassLastAppliedSpecInvalid, msg) + lastSpec = &v1alpha2.VirtualMachineClassSpec{} } return lastSpec @@ -488,7 +506,7 @@ func (h *SyncKvvmHandler) loadClassLastAppliedSpec(class *virtv2.VirtualMachineC func (h *SyncKvvmHandler) detectSpecChanges( ctx context.Context, kvvm *virtv1.VirtualMachine, - currentSpec, lastSpec *virtv2.VirtualMachineSpec, + currentSpec, lastSpec *v1alpha2.VirtualMachineSpec, ) vmchange.SpecChanges { log := logger.FromContext(ctx) @@ -507,7 +525,7 @@ func (h *SyncKvvmHandler) detectSpecChanges( return specChanges } -func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentClassSpec, lastClassSpec *virtv2.VirtualMachineClassSpec) vmchange.SpecChanges { +func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentClassSpec, lastClassSpec *v1alpha2.VirtualMachineClassSpec) vmchange.SpecChanges { log := logger.FromContext(ctx) specChanges := vmchange.CompareClassSpecs(currentClassSpec, lastClassSpec) @@ -520,7 +538,7 @@ func (h *SyncKvvmHandler) detectClassSpecChanges(ctx context.Context, currentCla // IsVmStopped return true if the instance of the KVVM is not created or Pod is in the Complete state. func (h *SyncKvvmHandler) isVMStopped( - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, pod *corev1.Pod, ) bool { @@ -556,7 +574,7 @@ func (h *SyncKvvmHandler) detectKvvmSpecChanges(ctx context.Context, s state.Vir // // Wait if changes are disruptive, and approval mode is manual, and VM is still running. func (h *SyncKvvmHandler) hasNoneDisruptiveChanges( - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance, changes vmchange.SpecChanges, @@ -609,7 +627,7 @@ func (h *SyncKvvmHandler) applyVMChangesToKVVM(ctx context.Context, s state.Virt if changes.IsDisruptive() { message = "Apply disruptive changes without restart" } - h.recorder.Event(current, corev1.EventTypeNormal, virtv2.ReasonVMChangesApplied, message) + h.recorder.Event(current, corev1.EventTypeNormal, v1alpha2.ReasonVMChangesApplied, message) log.Debug(message, "vm.name", current.GetName(), "changes", changes) if err := h.updateKVVM(ctx, s); err != nil { @@ -633,9 +651,9 @@ func (h *SyncKvvmHandler) applyVMChangesToKVVM(ctx context.Context, s state.Virt // updateKVVMLastAppliedSpec updates last-applied-spec annotation on KubeVirt VirtualMachine. func (h *SyncKvvmHandler) updateKVVMLastAppliedSpec( ctx context.Context, - vm *virtv2.VirtualMachine, + vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, - class *virtv2.VirtualMachineClass, + class *v1alpha2.VirtualMachineClass, ) error { if vm == nil || kvvm == nil { return nil @@ -659,3 +677,28 @@ func (h *SyncKvvmHandler) updateKVVMLastAppliedSpec( return nil } + +func (h *SyncKvvmHandler) isVMUnschedulable( + vm *v1alpha2.VirtualMachine, + kvvm *virtv1.VirtualMachine, +) bool { + if vm.Status.Phase == v1alpha2.MachinePending && kvvm.Status.PrintableStatus == virtv1.VirtualMachineStatusUnschedulable { + return true + } + + return false +} + +// isPlacementPolicyChanged returns true if any of the Affinity, NodePlacement, or Toleration rules have changed. +func (h *SyncKvvmHandler) isPlacementPolicyChanged(allChanges vmchange.SpecChanges) bool { + for _, c := range allChanges.GetAll() { + switch c.Path { + case "affinity", "nodeSelector", "tolerations": + if !equality.Semantic.DeepEqual(c.CurrentValue, c.DesiredValue) { + return true + } + } + } + + return false +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go index 6da903c530..13a79262af 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm_test.go @@ -35,7 +35,7 @@ import ( vmservice "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -48,7 +48,7 @@ var _ = Describe("SyncKvvmHandler", func() { var ( ctx context.Context fakeClient client.WithWatch - resource *reconciler.Resource[*virtv2.VirtualMachine, virtv2.VirtualMachineStatus] + resource *reconciler.Resource[*v1alpha2.VirtualMachine, v1alpha2.VirtualMachineStatus] vmState state.VirtualMachineState recorder *eventrecord.EventRecorderLoggerMock ) @@ -72,22 +72,22 @@ var _ = Describe("SyncKvvmHandler", func() { recorder = nil }) - newVM := func(phase virtv2.MachinePhase) *virtv2.VirtualMachine { + newVM := func(phase v1alpha2.MachinePhase) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Status.Phase = phase vm.Spec.VirtualMachineClassName = "vmclass" vm.Spec.CPU.Cores = 2 - vm.Spec.RunPolicy = virtv2.ManualPolicy + vm.Spec.RunPolicy = v1alpha2.ManualPolicy vm.Spec.VirtualMachineIPAddress = "test-ip" - vm.Spec.OsType = virtv2.GenericOs - vm.Spec.Disruptions = &virtv2.Disruptions{ - RestartApprovalMode: virtv2.Manual, + vm.Spec.OsType = v1alpha2.GenericOs + vm.Spec.Disruptions = &v1alpha2.Disruptions{ + RestartApprovalMode: v1alpha2.Manual, } return vm } - newKVVM := func(vm *virtv2.VirtualMachine) *virtv1.VirtualMachine { + newKVVM := func(vm *v1alpha2.VirtualMachine) *virtv1.VirtualMachine { kvvm := &virtv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -99,27 +99,27 @@ var _ = Describe("SyncKvvmHandler", func() { } kvvm.Spec.RunStrategy = pointer.GetPointer(virtv1.RunStrategyAlways) - Expect(kvbuilder.SetLastAppliedSpec(kvvm, &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - CPU: virtv2.CPUSpec{ + Expect(kvbuilder.SetLastAppliedSpec(kvvm, &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + CPU: v1alpha2.CPUSpec{ Cores: vm.Spec.CPU.Cores, }, VirtualMachineIPAddress: vm.Spec.VirtualMachineIPAddress, RunPolicy: vm.Spec.RunPolicy, OsType: vm.Spec.OsType, VirtualMachineClassName: vm.Spec.VirtualMachineClassName, - Disruptions: &virtv2.Disruptions{ + Disruptions: &v1alpha2.Disruptions{ RestartApprovalMode: vm.Spec.Disruptions.RestartApprovalMode, }, }, })).To(Succeed()) - Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -143,28 +143,28 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(err).NotTo(HaveOccurred()) } - mutateKVVM := func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) { - Expect(kvbuilder.SetLastAppliedSpec(kvvm, &virtv2.VirtualMachine{ - Spec: virtv2.VirtualMachineSpec{ - CPU: virtv2.CPUSpec{ + mutateKVVM := func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) { + Expect(kvbuilder.SetLastAppliedSpec(kvvm, &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + CPU: v1alpha2.CPUSpec{ Cores: 1, }, VirtualMachineIPAddress: vm.Spec.VirtualMachineIPAddress, RunPolicy: vm.Spec.RunPolicy, OsType: "BIOS", VirtualMachineClassName: vm.Spec.VirtualMachineClassName, - Disruptions: &virtv2.Disruptions{ + Disruptions: &v1alpha2.Disruptions{ RestartApprovalMode: vm.Spec.Disruptions.RestartApprovalMode, }, }, })).To(Succeed()) - Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &virtv2.VirtualMachineClass{ - Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + Expect(kvbuilder.SetLastAppliedClassSpec(kvvm, &v1alpha2.VirtualMachineClass{ + Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node2": "node2", }, @@ -174,30 +174,30 @@ var _ = Describe("SyncKvvmHandler", func() { } DescribeTable("AwaitingRestart Condition Tests", - func(phase virtv2.MachinePhase, needChange bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { - ip := &virtv2.VirtualMachineIPAddress{ + func(phase v1alpha2.MachinePhase, needChange bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + ip := &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip", Namespace: namespace, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeStatic, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeStatic, StaticIP: "192.168.1.10", }, - Status: virtv2.VirtualMachineIPAddressStatus{ + Status: v1alpha2.VirtualMachineIPAddressStatus{ Address: "192.168.1.10", - Phase: virtv2.VirtualMachineIPAddressPhaseAttached, + Phase: v1alpha2.VirtualMachineIPAddressPhaseAttached, }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vmclass", - }, Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + }, Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -217,7 +217,7 @@ var _ = Describe("SyncKvvmHandler", func() { reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -227,50 +227,50 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(awaitCond.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase with changes", virtv2.MachineRunning, true, metav1.ConditionTrue, true), - Entry("Running phase without changes", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Running phase with changes", v1alpha2.MachineRunning, true, metav1.ConditionTrue, true), + Entry("Running phase without changes", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Migrating phase with changes, condition should exist", virtv2.MachineMigrating, true, metav1.ConditionTrue, true), - Entry("Migrating phase without changes, condition should not exist", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Migrating phase with changes, condition should exist", v1alpha2.MachineMigrating, true, metav1.ConditionTrue, true), + Entry("Migrating phase without changes, condition should not exist", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Stopping phase with changes, condition should exist", virtv2.MachineStopping, true, metav1.ConditionTrue, true), - Entry("Stopping phase without changes, condition should not exist", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Stopping phase with changes, condition should exist", v1alpha2.MachineStopping, true, metav1.ConditionTrue, true), + Entry("Stopping phase without changes, condition should not exist", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Stopped phase with changes, shouldn't have condition", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Stopped phase without changes, shouldn't have condition", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes, shouldn't have condition", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Stopped phase without changes, shouldn't have condition", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), - Entry("Starting phase with changes, shouldn't have condition", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Starting phase without changes, shouldn't have condition", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Starting phase with changes, shouldn't have condition", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Starting phase without changes, shouldn't have condition", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Pending phase with changes, shouldn't have condition", virtv2.MachinePending, true, metav1.ConditionUnknown, false), - Entry("Pending phase without changes, shouldn't have condition", virtv2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Pending phase with changes, shouldn't have condition", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Pending phase without changes, shouldn't have condition", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), ) DescribeTable("ConfigurationApplied Condition Tests", - func(phase virtv2.MachinePhase, notReady bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { - ip := &virtv2.VirtualMachineIPAddress{ + func(phase v1alpha2.MachinePhase, notReady bool, expectedStatus metav1.ConditionStatus, expectedExistence bool) { + ip := &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip", Namespace: namespace, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeStatic, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeStatic, StaticIP: "192.168.1.10", }, - Status: virtv2.VirtualMachineIPAddressStatus{ + Status: v1alpha2.VirtualMachineIPAddressStatus{ Address: "192.168.1.10", - Phase: virtv2.VirtualMachineIPAddressPhaseAttached, + Phase: v1alpha2.VirtualMachineIPAddressPhaseAttached, }, } - vmClass := &virtv2.VirtualMachineClass{ + vmClass := &v1alpha2.VirtualMachineClass{ ObjectMeta: metav1.ObjectMeta{ Name: "vmclass", - }, Spec: virtv2.VirtualMachineClassSpec{ - CPU: virtv2.CPU{ - Type: virtv2.CPUTypeHost, + }, Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeHost, }, - NodeSelector: virtv2.NodeSelector{ + NodeSelector: v1alpha2.NodeSelector{ MatchLabels: map[string]string{ "node1": "node1", }, @@ -291,7 +291,7 @@ var _ = Describe("SyncKvvmHandler", func() { fakeClient, resource, vmState = setupEnvironment(vm, kvvm, ip, vmClass) reconcile() - newVM := &virtv2.VirtualMachine{} + newVM := &v1alpha2.VirtualMachine{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vm), newVM) Expect(err).NotTo(HaveOccurred()) @@ -301,22 +301,22 @@ var _ = Describe("SyncKvvmHandler", func() { Expect(confAppliedCond.Status).To(Equal(expectedStatus)) } }, - Entry("Running phase with changes applied", virtv2.MachineRunning, false, metav1.ConditionUnknown, false), - Entry("Running phase with changes not applied", virtv2.MachineRunning, true, metav1.ConditionFalse, true), + Entry("Running phase with changes applied", v1alpha2.MachineRunning, false, metav1.ConditionUnknown, false), + Entry("Running phase with changes not applied", v1alpha2.MachineRunning, true, metav1.ConditionFalse, true), - Entry("Migrating phase with changes applied, condition should not exist", virtv2.MachineMigrating, false, metav1.ConditionUnknown, false), - Entry("Migrating phase with changes not applied, condition should exist", virtv2.MachineMigrating, true, metav1.ConditionFalse, true), + Entry("Migrating phase with changes applied, condition should not exist", v1alpha2.MachineMigrating, false, metav1.ConditionUnknown, false), + Entry("Migrating phase with changes not applied, condition should exist", v1alpha2.MachineMigrating, true, metav1.ConditionFalse, true), - Entry("Stopping phase with changes applied, condition should not exist", virtv2.MachineStopping, false, metav1.ConditionUnknown, false), - Entry("Stopping phase with changes not applied, condition should exist", virtv2.MachineStopping, true, metav1.ConditionFalse, true), + Entry("Stopping phase with changes applied, condition should not exist", v1alpha2.MachineStopping, false, metav1.ConditionUnknown, false), + Entry("Stopping phase with changes not applied, condition should exist", v1alpha2.MachineStopping, true, metav1.ConditionFalse, true), - Entry("Stopped phase with changes applied, condition should not exist", virtv2.MachineStopped, false, metav1.ConditionUnknown, false), - Entry("Stopped phase with changes not applied, condition should not exist", virtv2.MachineStopped, true, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes applied, condition should not exist", v1alpha2.MachineStopped, false, metav1.ConditionUnknown, false), + Entry("Stopped phase with changes not applied, condition should not exist", v1alpha2.MachineStopped, true, metav1.ConditionUnknown, false), - Entry("Starting phase with changes applied, condition should not exist", virtv2.MachineStarting, false, metav1.ConditionUnknown, false), - Entry("Starting phase with changes not applied, condition should not exist", virtv2.MachineStarting, true, metav1.ConditionUnknown, false), + Entry("Starting phase with changes applied, condition should not exist", v1alpha2.MachineStarting, false, metav1.ConditionUnknown, false), + Entry("Starting phase with changes not applied, condition should not exist", v1alpha2.MachineStarting, true, metav1.ConditionUnknown, false), - Entry("Pending phase with changes applied, condition should not exist", virtv2.MachinePending, false, metav1.ConditionUnknown, false), - Entry("Pending phase with changes not applied, condition should not exist", virtv2.MachinePending, true, metav1.ConditionUnknown, false), + Entry("Pending phase with changes applied, condition should not exist", v1alpha2.MachinePending, false, metav1.ConditionUnknown, false), + Entry("Pending phase with changes not applied, condition should not exist", v1alpha2.MachinePending, true, metav1.ConditionUnknown, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go index 8d6cd72879..7c2cb40094 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_metadata.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/merger" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const nameSyncMetadataHandler = "SyncMetadataHandler" @@ -148,7 +148,7 @@ func (h *SyncMetadataHandler) patchLabelsAndAnnotations(ctx context.Context, obj // PropagateVMMetadata merges labels and annotations from the input VM into destination object. // Attach related labels and some dangerous annotations are not copied. // Return true if destination object was changed. -func PropagateVMMetadata(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, destObj client.Object) (bool, error) { +func PropagateVMMetadata(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, destObj client.Object) (bool, error) { // No changes if dest is nil. if destObj == nil { return false, nil @@ -202,7 +202,7 @@ func GetLastPropagatedLabels(kvvm *virtv1.VirtualMachine) (map[string]string, er return lastPropagatedLabels, nil } -func SetLastPropagatedLabels(kvvm *virtv1.VirtualMachine, vm *virtv2.VirtualMachine) (bool, error) { +func SetLastPropagatedLabels(kvvm *virtv1.VirtualMachine, vm *v1alpha2.VirtualMachine) (bool, error) { data, err := json.Marshal(vm.GetLabels()) if err != nil { return false, err @@ -231,7 +231,7 @@ func GetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine) (map[string]strin return lastPropagatedAnno, nil } -func SetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine, vm *virtv2.VirtualMachine) (bool, error) { +func SetLastPropagatedAnnotations(kvvm *virtv1.VirtualMachine, vm *v1alpha2.VirtualMachine) (bool, error) { data, err := json.Marshal(RemoveNonPropagatableAnnotations(vm.GetAnnotations())) if err != nil { return false, err diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go index 4c2c35a73f..f3939f0ff0 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -93,7 +93,7 @@ func (h *SyncPowerStateHandler) syncPowerState( ctx context.Context, s state.VirtualMachineState, kvvm *virtv1.VirtualMachine, - runPolicy virtv2.RunPolicy, + runPolicy v1alpha2.RunPolicy, ) error { if kvvm == nil { return nil @@ -104,7 +104,7 @@ func (h *SyncPowerStateHandler) syncPowerState( return fmt.Errorf("find the virtual machine instance: %w", err) } - if runPolicy == virtv2.AlwaysOnUnlessStoppedManually { + if runPolicy == v1alpha2.AlwaysOnUnlessStoppedManually { if kvvmi != nil { err = h.ensureRunStrategy(ctx, kvvm, virtv1.RunStrategyManual) } else if kvvm.Spec.RunStrategy != nil && *kvvm.Spec.RunStrategy == virtv1.RunStrategyAlways { @@ -133,19 +133,19 @@ func (h *SyncPowerStateHandler) syncPowerState( var vmAction VMAction if maintenance.Status != metav1.ConditionTrue { switch runPolicy { - case virtv2.AlwaysOffPolicy: + case v1alpha2.AlwaysOffPolicy: vmAction = h.handleAlwaysOffPolicy(ctx, s, kvvmi) - case virtv2.AlwaysOnPolicy: + case v1alpha2.AlwaysOnPolicy: vmAction, err = h.handleAlwaysOnPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) if err != nil { return err } - case virtv2.AlwaysOnUnlessStoppedManually: + case v1alpha2.AlwaysOnUnlessStoppedManually: vmAction, err = h.handleAlwaysOnUnlessStoppedManuallyPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) if err != nil { return err } - case virtv2.ManualPolicy: + case v1alpha2.ManualPolicy: vmAction = h.handleManualPolicy(ctx, s, kvvm, kvvmi, isConfigurationApplied, shutdownInfo) } } else { @@ -158,13 +158,13 @@ func (h *SyncPowerStateHandler) syncPowerState( cbAwaitingRestart, exist := conditions.GetCondition(vmcondition.TypeAwaitingRestartToApplyConfiguration, vm.Status.Conditions) if exist && cbAwaitingRestart.Status == metav1.ConditionTrue && cbAwaitingRestart.ObservedGeneration == vm.GetGeneration() && - vm.Spec.Disruptions.RestartApprovalMode == virtv2.Automatic { + vm.Spec.Disruptions.RestartApprovalMode == v1alpha2.Automatic { log := logger.FromContext(ctx) - h.recorder.WithLogging(log).Event(vm, corev1.EventTypeNormal, virtv2.ReasonVMChangesApplied, "Apply disruptive changes with restart") + h.recorder.WithLogging(log).Event(vm, corev1.EventTypeNormal, v1alpha2.ReasonVMChangesApplied, "Apply disruptive changes with restart") h.recorder.WithLogging(log).Event( vm, corev1.EventTypeNormal, - virtv2.ReasonVMRestarted, + v1alpha2.ReasonVMRestarted, "Restart initiated by controller to apply changes", ) err = powerstate.RestartVM(ctx, h.client, kvvm, kvvmi, false) @@ -210,7 +210,7 @@ func (h *SyncPowerStateHandler) handleManualPolicy( shutdownInfo powerstate.ShutdownInfo, ) VMAction { if kvvmi == nil || kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.ManualPolicy) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.ManualPolicy) { return Start } return Nothing @@ -274,7 +274,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnPolicy( } if kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.AlwaysOnPolicy) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.AlwaysOnPolicy) { return Start, nil } return Nothing, nil @@ -311,7 +311,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnUnlessStoppedManuallyPolicy( shutdownInfo powerstate.ShutdownInfo, ) (VMAction, error) { if kvvmi == nil || kvvmi.DeletionTimestamp != nil { - if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, virtv2.AlwaysOnUnlessStoppedManually) { + if h.checkNeedStartVM(ctx, s, kvvm, isConfigurationApplied, v1alpha2.AlwaysOnUnlessStoppedManually) { return Start, nil } @@ -321,7 +321,7 @@ func (h *SyncPowerStateHandler) handleAlwaysOnUnlessStoppedManuallyPolicy( return Nothing, fmt.Errorf("load last applied spec: %w", err) } - if lastAppliedSpec != nil && lastAppliedSpec.RunPolicy == virtv2.AlwaysOffPolicy { + if lastAppliedSpec != nil && lastAppliedSpec.RunPolicy == v1alpha2.AlwaysOffPolicy { err = kvvmutil.AddStartAnnotation(ctx, h.client, kvvm) if err != nil { return Nothing, fmt.Errorf("add annotation to KVVM: %w", err) @@ -381,7 +381,7 @@ func (h *SyncPowerStateHandler) checkNeedStartVM( s state.VirtualMachineState, kvvm *virtv1.VirtualMachine, isConfigurationApplied bool, - runPolicy virtv2.RunPolicy, + runPolicy v1alpha2.RunPolicy, ) bool { if isConfigurationApplied && (kvvm.Annotations[annotations.AnnVMStartRequested] == "true" || kvvm.Annotations[annotations.AnnVMRestartRequested] == "true") { @@ -498,7 +498,7 @@ func (h *SyncPowerStateHandler) recordStartEventf(ctx context.Context, obj clien h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMStarted, + v1alpha2.ReasonVMStarted, messageFmt, args..., ) @@ -508,7 +508,7 @@ func (h *SyncPowerStateHandler) recordStopEventf(ctx context.Context, obj client h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMStopped, + v1alpha2.ReasonVMStopped, messageFmt, ) } @@ -517,7 +517,7 @@ func (h *SyncPowerStateHandler) recordRestartEventf(ctx context.Context, obj cli h.recorder.WithLogging(logger.FromContext(ctx)).Eventf( obj, corev1.EventTypeNormal, - virtv2.ReasonVMRestarted, + v1alpha2.ReasonVMRestarted, messageFmt, ) } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go index f3be8af643..cb58c52ea9 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/sync_power_state_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/powerstate" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("Test power actions with VMs", func() { @@ -42,7 +42,7 @@ var _ = Describe("Test power actions with VMs", func() { recorderMock *eventrecord.EventRecorderLoggerMock fakeClient client.Client vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine kvvm *virtv1.VirtualMachine kvvmi *virtv1.VirtualMachineInstance vmPod *corev1.Pod @@ -129,7 +129,7 @@ var _ = Describe("Test action getters for different run policy", func() { recorderMock *eventrecord.EventRecorderLoggerMock fakeClient client.Client vmState state.VirtualMachineState - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine kvvm *virtv1.VirtualMachine kvvmi *virtv1.VirtualMachineInstance vmPod *corev1.Pod @@ -360,18 +360,18 @@ var _ = Describe("Test action getters for different run policy", func() { }) }) -func createObjectsForPowerstateTest(namespacedVirtualMachine types.NamespacedName) (*virtv2.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, *corev1.Pod) { +func createObjectsForPowerstateTest(namespacedVirtualMachine types.NamespacedName) (*v1alpha2.VirtualMachine, *virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, *corev1.Pod) { const ( podName = "test-pod" nodeName = "test-node" podUID types.UID = "test-pod-uid" ) - vm := &virtv2.VirtualMachine{ + vm := &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: namespacedVirtualMachine.Name, Namespace: namespacedVirtualMachine.Namespace, }, - Status: virtv2.VirtualMachineStatus{}, + Status: v1alpha2.VirtualMachineStatus{}, } kvvm := &virtv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/util.go b/images/virtualization-artifact/pkg/controller/vm/internal/util.go index 04dc326008..e3289d9d4c 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/util.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/util.go @@ -29,17 +29,17 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) -func isDeletion(vm *virtv2.VirtualMachine) bool { +func isDeletion(vm *v1alpha2.VirtualMachine) bool { return vm == nil || !vm.GetDeletionTimestamp().IsZero() } type updaterProtection func(p *service.ProtectionService) func(ctx context.Context, objs ...client.Object) error -func addAllUnknown(vm *virtv2.VirtualMachine, conds ...vmcondition.Type) (update bool) { +func addAllUnknown(vm *v1alpha2.VirtualMachine, conds ...vmcondition.Type) (update bool) { for _, cond := range conds { if conditions.HasCondition(cond, vm.Status.Conditions) { continue @@ -67,126 +67,126 @@ func conditionStatus(status string) metav1.ConditionStatus { } func isVMPending(kvvm *virtv1.VirtualMachine) bool { - return getPhase(nil, kvvm) == virtv2.MachinePending + return getPhase(nil, kvvm) == v1alpha2.MachinePending } func isVMStopped(kvvm *virtv1.VirtualMachine) bool { - return getPhase(nil, kvvm) == virtv2.MachineStopped + return getPhase(nil, kvvm) == v1alpha2.MachineStopped } func isKVVMICreated(kvvm *virtv1.VirtualMachine) bool { return kvvm != nil && kvvm.Status.Created } -func getPhase(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { +func getPhase(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { if kvvm == nil { - return virtv2.MachinePending + return v1alpha2.MachinePending } if handler, exists := mapPhases[kvvm.Status.PrintableStatus]; exists { return handler(vm, kvvm) } - return virtv2.MachinePending + return v1alpha2.MachinePending } -type PhaseGetter func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase +type PhaseGetter func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase var mapPhases = map[virtv1.VirtualMachinePrintableStatus]PhaseGetter{ // VirtualMachineStatusStopped indicates that the virtual machine is currently stopped and isn't expected to start. - virtv1.VirtualMachineStatusStopped: func(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { + virtv1.VirtualMachineStatusStopped: func(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { if vm != nil && kvvm != nil { if !checkVirtualMachineConfiguration(vm) && kvvm != nil && kvvm.Annotations[annotations.AnnVMStartRequested] == "true" { - return virtv2.MachinePending + return v1alpha2.MachinePending } } - if vm != nil && vm.Status.Phase == virtv2.MachinePending && - (vm.Spec.RunPolicy == virtv2.AlwaysOnPolicy || vm.Spec.RunPolicy == virtv2.AlwaysOnUnlessStoppedManually) { - return virtv2.MachinePending + if vm != nil && vm.Status.Phase == v1alpha2.MachinePending && + (vm.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy || vm.Spec.RunPolicy == v1alpha2.AlwaysOnUnlessStoppedManually) { + return v1alpha2.MachinePending } - return virtv2.MachineStopped + return v1alpha2.MachineStopped }, // VirtualMachineStatusProvisioning indicates that cluster resources associated with the virtual machine // (e.g., DataVolumes) are being provisioned and prepared. - virtv1.VirtualMachineStatusProvisioning: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineStarting + virtv1.VirtualMachineStatusProvisioning: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineStarting }, // VirtualMachineStatusStarting indicates that the virtual machine is being prepared for running. - virtv1.VirtualMachineStatusStarting: func(_ *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine) virtv2.MachinePhase { + virtv1.VirtualMachineStatusStarting: func(_ *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine) v1alpha2.MachinePhase { synchronizedCondition, _ := conditions.GetKVVMCondition(conditions.VirtualMachineSynchronized, kvvm.Status.Conditions) if synchronizedCondition.Reason == failedCreatePodReason { - return virtv2.MachinePending + return v1alpha2.MachinePending } - return virtv2.MachineStarting + return v1alpha2.MachineStarting }, // VirtualMachineStatusRunning indicates that the virtual machine is running. - virtv1.VirtualMachineStatusRunning: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineRunning + virtv1.VirtualMachineStatusRunning: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineRunning }, // VirtualMachineStatusPaused indicates that the virtual machine is paused. - virtv1.VirtualMachineStatusPaused: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePause + virtv1.VirtualMachineStatusPaused: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePause }, // VirtualMachineStatusStopping indicates that the virtual machine is in the process of being stopped. - virtv1.VirtualMachineStatusStopping: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineStopping + virtv1.VirtualMachineStatusStopping: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineStopping }, // VirtualMachineStatusTerminating indicates that the virtual machine is in the process of deletion, // as well as its associated resources (VirtualMachineInstance, DataVolumes, …). - virtv1.VirtualMachineStatusTerminating: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineTerminating + virtv1.VirtualMachineStatusTerminating: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineTerminating }, // VirtualMachineStatusCrashLoopBackOff indicates that the virtual machine is currently in a crash loop waiting to be retried. - virtv1.VirtualMachineStatusCrashLoopBackOff: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusCrashLoopBackOff: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusMigrating indicates that the virtual machine is in the process of being migrated // to another host. - virtv1.VirtualMachineStatusMigrating: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachineMigrating + virtv1.VirtualMachineStatusMigrating: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachineMigrating }, // VirtualMachineStatusUnknown indicates that the state of the virtual machine could not be obtained, // typically due to an error in communicating with the host on which it's running. - virtv1.VirtualMachineStatusUnknown: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusUnknown: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusUnschedulable indicates that an error has occurred while scheduling the virtual machine, // e.g. due to unsatisfiable resource requests or unsatisfiable scheduling constraints. - virtv1.VirtualMachineStatusUnschedulable: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusUnschedulable: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusErrImagePull indicates that an error has occurred while pulling an image for // a containerDisk VM volume. - virtv1.VirtualMachineStatusErrImagePull: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusErrImagePull: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusImagePullBackOff indicates that an error has occurred while pulling an image for // a containerDisk VM volume, and that kubelet is backing off before retrying. - virtv1.VirtualMachineStatusImagePullBackOff: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusImagePullBackOff: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusPvcNotFound indicates that the virtual machine references a PVC volume which doesn't exist. - virtv1.VirtualMachineStatusPvcNotFound: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusPvcNotFound: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusDataVolumeError indicates that an error has been reported by one of the DataVolumes // referenced by the virtual machines. - virtv1.VirtualMachineStatusDataVolumeError: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusDataVolumeError: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, // VirtualMachineStatusWaitingForVolumeBinding indicates that some PersistentVolumeClaims backing // the virtual machine volume are still not bound. - virtv1.VirtualMachineStatusWaitingForVolumeBinding: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + virtv1.VirtualMachineStatusWaitingForVolumeBinding: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, - kvvmEmptyPhase: func(_ *virtv2.VirtualMachine, _ *virtv1.VirtualMachine) virtv2.MachinePhase { - return virtv2.MachinePending + kvvmEmptyPhase: func(_ *v1alpha2.VirtualMachine, _ *virtv1.VirtualMachine) v1alpha2.MachinePhase { + return v1alpha2.MachinePending }, } @@ -251,7 +251,7 @@ func podFinal(pod corev1.Pod) bool { return pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed } -func checkVirtualMachineConfiguration(vm *virtv2.VirtualMachine) bool { +func checkVirtualMachineConfiguration(vm *v1alpha2.VirtualMachine) bool { for _, c := range vm.Status.Conditions { switch vmcondition.Type(c.Type) { case vmcondition.TypeBlockDevicesReady: diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go b/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go index f7e59cf725..895f37d825 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/validators/block_device_limiter_validator.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type BlockDeviceLimiterValidator struct { @@ -40,15 +40,15 @@ func NewBlockDeviceLimiterValidator(service *service.BlockDeviceService, log *lo } } -func (v *BlockDeviceLimiterValidator) ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { return v.validate(ctx, vm) } -func (v *BlockDeviceLimiterValidator) ValidateUpdate(ctx context.Context, _, newVM *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) ValidateUpdate(ctx context.Context, _, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) { return v.validate(ctx, newVM) } -func (v *BlockDeviceLimiterValidator) validate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) { +func (v *BlockDeviceLimiterValidator) validate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVM(ctx, vm) if err != nil { v.log.Error(err.Error()) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator.go b/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator.go index 834368708a..bf441ecda2 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/component-base/featuregate" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -38,17 +39,21 @@ func NewNetworksValidator(featureGate featuregate.FeatureGate) *NetworksValidato } func (v *NetworksValidator) ValidateCreate(_ context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { - return v.Validate(vm) -} + networksSpec := vm.Spec.Networks + if len(networksSpec) == 0 { + return nil, nil + } -func (v *NetworksValidator) ValidateUpdate(_ context.Context, _, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) { - return v.Validate(newVM) -} + if !v.featureGate.Enabled(featuregates.SDN) { + return nil, fmt.Errorf("network configuration requires SDN to be enabled") + } -func (v *NetworksValidator) Validate(vm *v1alpha2.VirtualMachine) (admission.Warnings, error) { - networksSpec := vm.Spec.Networks + return v.validateNetworksSpec(networksSpec) +} - if len(networksSpec) == 0 { +func (v *NetworksValidator) ValidateUpdate(_ context.Context, oldVM, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) { + newNetworksSpec := newVM.Spec.Networks + if len(newNetworksSpec) == 0 { return nil, nil } @@ -56,6 +61,14 @@ func (v *NetworksValidator) Validate(vm *v1alpha2.VirtualMachine) (admission.War return nil, fmt.Errorf("network configuration requires SDN to be enabled") } + isChanged := !equality.Semantic.DeepEqual(newNetworksSpec, oldVM.Spec.Networks) + if isChanged { + return v.validateNetworksSpec(newNetworksSpec) + } + return nil, nil +} + +func (v *NetworksValidator) validateNetworksSpec(networksSpec []v1alpha2.NetworksSpec) (admission.Warnings, error) { if networksSpec[0].Type != v1alpha2.NetworksTypeMain { return nil, fmt.Errorf("first network in the list must be of type '%s'", v1alpha2.NetworksTypeMain) } @@ -63,6 +76,8 @@ func (v *NetworksValidator) Validate(vm *v1alpha2.VirtualMachine) (admission.War return nil, fmt.Errorf("network with type '%s' should not have a name", v1alpha2.NetworksTypeMain) } + namesSet := make(map[string]struct{}) + for i, network := range networksSpec { if network.Type == v1alpha2.NetworksTypeMain { if i > 0 { @@ -70,10 +85,14 @@ func (v *NetworksValidator) Validate(vm *v1alpha2.VirtualMachine) (admission.War } continue } - if network.Name == "" { return nil, fmt.Errorf("network at index %d with type '%s' must have a non-empty name", i, network.Type) } + + if _, exists := namesSet[network.Name]; exists { + return nil, fmt.Errorf("network name '%s' is duplicated", network.Name) + } + namesSet[network.Name] = struct{}{} } return nil, nil diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator_test.go b/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator_test.go index 0d66a9a803..9a35d4a80d 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator_test.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/validators/networks_validator_test.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func TestNetworksValidate(t *testing.T) { +func TestNetworksValidateCreate(t *testing.T) { tests := []struct { networks []v1alpha2.NetworksSpec sdnEnabled bool @@ -36,12 +36,13 @@ func TestNetworksValidate(t *testing.T) { {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain, Name: "main"}}, true, false}, {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, {Type: v1alpha2.NetworksTypeMain}}, true, false}, {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain}, {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}}, true, true}, + {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain}, {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}}, true, false}, {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain}, {Type: v1alpha2.NetworksTypeNetwork}}, true, false}, {[]v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain}}, false, false}, } for i, test := range tests { - t.Run(fmt.Sprintf("TestCase%d", i), func(t *testing.T) { + t.Run(fmt.Sprintf("CreateTestCase%d", i), func(t *testing.T) { vm := &v1alpha2.VirtualMachine{Spec: v1alpha2.VirtualMachineSpec{Networks: test.networks}} // Create feature gate with SDN @@ -51,13 +52,120 @@ func TestNetworksValidate(t *testing.T) { } networkValidator := NewNetworksValidator(featureGate) - _, err := networkValidator.Validate(vm) + _, err := networkValidator.ValidateCreate(t.Context(), vm) if test.valid && err != nil { - t.Errorf("For spec %s expected valid, but validation failed", test.networks) + t.Errorf("Validation failed for spec %s: expected valid, but got an error: %v", test.networks, err) } + if !test.valid && err == nil { + t.Errorf("Validation succeeded for spec %s: expected error, but got none", test.networks) + } + }) + } +} +func TestNetworksValidateUpdate(t *testing.T) { + tests := []struct { + oldNetworksSpec []v1alpha2.NetworksSpec + newNetworksSpec []v1alpha2.NetworksSpec + sdnEnabled bool + valid bool + }{ + { + oldNetworksSpec: []v1alpha2.NetworksSpec{}, + newNetworksSpec: []v1alpha2.NetworksSpec{}, + sdnEnabled: true, + valid: true, + }, + { + oldNetworksSpec: []v1alpha2.NetworksSpec{}, + newNetworksSpec: []v1alpha2.NetworksSpec{{Type: v1alpha2.NetworksTypeMain}}, + sdnEnabled: true, + valid: true, + }, + { + oldNetworksSpec: []v1alpha2.NetworksSpec{}, + newNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + sdnEnabled: true, + valid: true, + }, + { + oldNetworksSpec: []v1alpha2.NetworksSpec{}, + newNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + sdnEnabled: true, + valid: false, + }, + { + oldNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + newNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + sdnEnabled: true, + valid: false, + }, + { + oldNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + newNetworksSpec: []v1alpha2.NetworksSpec{ + {Type: v1alpha2.NetworksTypeMain}, + {Type: v1alpha2.NetworksTypeNetwork, Name: "test"}, + }, + sdnEnabled: true, + valid: true, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("UpdateTestCase%d", i), func(t *testing.T) { + oldVM := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + Networks: test.oldNetworksSpec, + }, + } + newVM := &v1alpha2.VirtualMachine{ + Spec: v1alpha2.VirtualMachineSpec{ + Networks: test.newNetworksSpec, + }, + } + + // Create feature gate with SDN + featureGate, _, setFromMap, _ := featuregates.New() + if test.sdnEnabled { + _ = setFromMap(map[string]bool{ + string(featuregates.SDN): true, + }) + } + networkValidator := NewNetworksValidator(featureGate) + _, err := networkValidator.ValidateUpdate(t.Context(), oldVM, newVM) + + if test.valid && err != nil { + t.Errorf( + "Validation failed for old spec %v and new spec %v: expected valid, but got an error: %v", + test.oldNetworksSpec, test.newNetworksSpec, err, + ) + } if !test.valid && err == nil { - t.Errorf("For spec %s expected not valid, but validation succeeded", test.networks) + t.Errorf( + "Validation succeeded for old spec %v and new spec %v: expected error, but got none", + test.oldNetworksSpec, test.newNetworksSpec, + ) } }) } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go index f910b5dc90..c8b253de71 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/clustervirtualimage_watcher.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewClusterVirtualImageWatcher() *CLusterVirtualImageWatcher { @@ -39,10 +39,10 @@ func (w *CLusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.C if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.ClusterVirtualImage{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.ClusterVirtualImage](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + &v1alpha2.ClusterVirtualImage{}, + handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*v1alpha2.ClusterVirtualImage](mgr.GetClient())), + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go index 52f4e80103..057d82ef19 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/kvvm_watcher.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewKVVMWatcher() *KVVMWatcher { @@ -47,7 +47,7 @@ func (w *KVVMWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro handler.TypedEnqueueRequestForOwner[*virtv1.VirtualMachine]( mgr.GetScheme(), mgr.GetRESTMapper(), - &virtv2.VirtualMachine{}, + &v1alpha2.VirtualMachine{}, handler.OnlyControllerOwner(), ), predicate.TypedFuncs[*virtv1.VirtualMachine]{ diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go index 95da890a7f..8c56133867 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualdisk_watcher.go @@ -17,35 +17,49 @@ limitations under the License. package watcher import ( + "context" "fmt" + "log/slog" + "strings" "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) -func NewVirtualDiskWatcher() *VirtualDiskWatcher { - return &VirtualDiskWatcher{} +func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { + return &VirtualDiskWatcher{ + client: client, + logger: slog.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineKind)), + } } -type VirtualDiskWatcher struct{} +type VirtualDiskWatcher struct { + client client.Client + logger *slog.Logger +} func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualDisk{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.VirtualDisk](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + &v1alpha2.VirtualDisk{}, + handler.TypedEnqueueRequestsFromMapFunc(w.enqueue), + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -79,3 +93,27 @@ func (w *VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controlle } return nil } + +func (w *VirtualDiskWatcher) enqueue(ctx context.Context, vd *v1alpha2.VirtualDisk) []reconcile.Request { + var vms v1alpha2.VirtualMachineList + err := w.client.List(ctx, &vms, &client.ListOptions{ + Namespace: vd.Namespace, + FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMByVD, vd.Name), + }) + if err != nil { + w.logger.Error(fmt.Sprintf("failed to list virtual machines: %v", err)) + return nil + } + + var result []reconcile.Request + for _, vm := range vms.Items { + result = append(result, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: vm.GetName(), + Namespace: vm.GetNamespace(), + }, + }) + } + + return result +} diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go index de0b772ea1..fe0b67e94e 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/virtualimage_watcher.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewVirtualImageWatcher() *VirtualImageWatcher { @@ -44,10 +44,10 @@ func (w *VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controll if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualImage{}, - handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*virtv2.VirtualImage](mgr.GetClient())), - predicate.TypedFuncs[*virtv2.VirtualImage]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + &v1alpha2.VirtualImage{}, + handler.TypedEnqueueRequestsFromMapFunc(enqueueRequestsBlockDevice[*v1alpha2.VirtualImage](mgr.GetClient())), + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -62,16 +62,16 @@ func enqueueRequestsBlockDevice[T client.Object](cl client.Client) func(ctx cont return func(ctx context.Context, obj T) []reconcile.Request { var opts []client.ListOption switch obj.GetObjectKind().GroupVersionKind().Kind { - case virtv2.VirtualImageKind: + case v1alpha2.VirtualImageKind: opts = append(opts, client.InNamespace(obj.GetNamespace()), client.MatchingFields{indexer.IndexFieldVMByVI: obj.GetName()}, ) - case virtv2.ClusterVirtualImageKind: + case v1alpha2.ClusterVirtualImageKind: opts = append(opts, client.MatchingFields{indexer.IndexFieldVMByCVI: obj.GetName()}, ) - case virtv2.VirtualDiskKind: + case v1alpha2.VirtualDiskKind: opts = append(opts, client.InNamespace(obj.GetNamespace()), client.MatchingFields{indexer.IndexFieldVMByVD: obj.GetName()}, @@ -79,7 +79,7 @@ func enqueueRequestsBlockDevice[T client.Object](cl client.Client) func(ctx cont default: return nil } - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList if err := cl.List(ctx, &vms, opts...); err != nil { return nil } diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go index fae48e704c..e0ef2f50ab 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmclass_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassWatcher struct{} @@ -46,10 +46,10 @@ func (w VirtualMachineClassWatcher) Watch(mgr manager.Manager, ctr controller.Co if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineClass{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmClass *virtv2.VirtualMachineClass) []reconcile.Request { + &v1alpha2.VirtualMachineClass{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmClass *v1alpha2.VirtualMachineClass) []reconcile.Request { c := mgr.GetClient() - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := c.List(ctx, vms, client.MatchingFields{ indexer.IndexFieldVMByClass: vmClass.GetName(), }) @@ -73,9 +73,9 @@ func (w VirtualMachineClassWatcher) Watch(mgr manager.Manager, ctr controller.Co } return requests }), - predicate.TypedFuncs[*virtv2.VirtualMachineClass]{ - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineClass]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineClass]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineClass]{ + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineClass]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineClass]) bool { return !equality.Semantic.DeepEqual(e.ObjectOld.Spec.SizingPolicies, e.ObjectNew.Spec.SizingPolicies) || !equality.Semantic.DeepEqual(e.ObjectOld.Spec.Tolerations, e.ObjectNew.Spec.Tolerations) || !equality.Semantic.DeepEqual(e.ObjectOld.Spec.NodeSelector, e.ObjectNew.Spec.NodeSelector) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go index 470ac7034d..603f8f07c1 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmip_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewVMIPWatcher() *VMIPWatcher { @@ -42,8 +42,8 @@ func (w *VMIPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineIPAddress{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) []reconcile.Request { + &v1alpha2.VirtualMachineIPAddress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) []reconcile.Request { name := vmip.Status.VirtualMachine if name == "" { return nil @@ -57,8 +57,8 @@ func (w *VMIPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineIPAddress]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineIPAddress]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineIPAddress]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineIPAddress]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || e.ObjectOld.Status.VirtualMachine != e.ObjectNew.Status.VirtualMachine }, diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmmac_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmmac_watcher.go index ce05a21393..c84408e31d 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmmac_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmmac_watcher.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func NewVMMACWatcher() *VMMACWatcher { @@ -43,12 +43,12 @@ func (w *VMMACWatcher) Watch(mgr manager.Manager, ctr controller.Controller) err if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineMACAddress{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) []reconcile.Request { + &v1alpha2.VirtualMachineMACAddress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) []reconcile.Request { name := vmmac.Status.VirtualMachine if name == "" { for _, ownerRef := range vmmac.OwnerReferences { - if ownerRef.Kind == virtv2.VirtualMachineKind && string(ownerRef.UID) == vmmac.Labels[annotations.LabelVirtualMachineUID] { + if ownerRef.Kind == v1alpha2.VirtualMachineKind && string(ownerRef.UID) == vmmac.Labels[annotations.LabelVirtualMachineUID] { name = ownerRef.Name break } @@ -67,14 +67,14 @@ func (w *VMMACWatcher) Watch(mgr manager.Manager, ctr controller.Controller) err }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineMACAddress]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineMACAddress]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineMACAddress]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineMACAddress]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineMACAddress]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase || e.ObjectOld.Status.VirtualMachine != e.ObjectNew.Status.VirtualMachine }, diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go index 9bd68d31d0..9d199807f9 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmop_watcher.go @@ -31,7 +31,7 @@ import ( commonvmop "github.com/deckhouse/virtualization-controller/pkg/common/vmop" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -45,8 +45,8 @@ func (w *VMOPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineOperation{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmop *virtv2.VirtualMachineOperation) []reconcile.Request { + &v1alpha2.VirtualMachineOperation{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -56,11 +56,11 @@ func (w *VMOPWatcher) Watch(mgr manager.Manager, ctr controller.Controller) erro }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineOperation]{ - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineOperation]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineOperation]{ + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineOperation]) bool { return commonvmop.IsMigration(e.Object) }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineOperation]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineOperation]) bool { oldCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, e.ObjectOld.Status.Conditions) newCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, e.ObjectNew.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go index 9ea7ff3059..55f0701674 100644 --- a/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vm/internal/watcher/vmsnapshot_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct{} @@ -42,8 +42,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineSnapshot{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) []reconcile.Request { + &v1alpha2.VirtualMachineSnapshot{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -53,8 +53,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, diff --git a/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go b/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go index b9b0ad792a..bf7da56e3f 100644 --- a/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vm/vm_reconciler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/state" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { @@ -57,7 +57,7 @@ type Reconciler struct { } func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { - if err := ctr.Watch(source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachine]{})); err != nil { + if err := ctr.Watch(source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachine]{})); err != nil { return fmt.Errorf("error setting watch on VM: %w", err) } @@ -67,7 +67,7 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr watcher.NewPodWatcher(), watcher.NewVirtualImageWatcher(), watcher.NewClusterVirtualImageWatcher(), - watcher.NewVirtualDiskWatcher(), + watcher.NewVirtualDiskWatcher(mgr.GetClient()), watcher.NewVMIPWatcher(), watcher.NewVirtualMachineClassWatcher(), watcher.NewVirtualMachineSnapshotWatcher(), @@ -111,10 +111,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go b/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go index 49f4ecd78b..ccd2bd0301 100644 --- a/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vm/vm_webhook.go @@ -29,12 +29,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/defaulter" "github.com/deckhouse/virtualization-controller/pkg/controller/vm/internal/validators" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachine) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachine) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachine) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachine) (admission.Warnings, error) } type Validator struct { @@ -61,7 +61,7 @@ func NewValidator(client client.Client, service *service.BlockDeviceService, fea } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vm, ok := obj.(*virtv2.VirtualMachine) + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -82,12 +82,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVM, ok := oldObj.(*virtv2.VirtualMachine) + oldVM, ok := oldObj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected an old VirtualMachine but got a %T", oldObj) } - newVM, ok := newObj.(*virtv2.VirtualMachine) + newVM, ok := newObj.(*v1alpha2.VirtualMachine) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", newObj) } @@ -117,7 +117,7 @@ func (v *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admissi } type VirtualMachineDefaulter interface { - Default(ctx context.Context, vm *virtv2.VirtualMachine) error + Default(ctx context.Context, vm *v1alpha2.VirtualMachine) error } type Defaulter struct { @@ -137,7 +137,7 @@ func NewDefaulter(client client.Client, vmClassService *service.VirtualMachineCl } func (d *Defaulter) Default(ctx context.Context, obj runtime.Object) error { - vm, ok := obj.(*virtv2.VirtualMachine) + vm, ok := obj.(*v1alpha2.VirtualMachine) if !ok { return fmt.Errorf("expected a VirtualMachine but got a %T", obj) } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go index 18a750f4b6..7dc4e24049 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_limiter.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -38,7 +38,7 @@ func NewBlockDeviceLimiter(service *service.BlockDeviceService) *BlockDeviceLimi return &BlockDeviceLimiter{service: service} } -func (h *BlockDeviceLimiter) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h *BlockDeviceLimiter) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { blockDeviceAttachedCount, err := h.service.CountBlockDevicesAttachedToVMName(ctx, vmbda.Spec.VirtualMachineName, vmbda.Namespace) if err != nil { return reconcile.Result{}, err diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go index 9d41c74e41..056672e8b1 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/block_device_ready.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -42,7 +42,7 @@ func NewBlockDeviceReadyHandler(attachment *service.AttachmentService) *BlockDev } } -func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmbdacondition.BlockDeviceReadyType) defer func() { conditions.SetCondition(cb.Generation(vmbda.Generation), &vmbda.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu } switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: vdKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, Namespace: vmbda.Namespace, @@ -83,7 +83,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase != virtv2.DiskReady && vd.Status.Phase != virtv2.DiskWaitForFirstConsumer { + if vd.Status.Phase != v1alpha2.DiskReady && vd.Status.Phase != v1alpha2.DiskWaitForFirstConsumer { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -91,7 +91,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase == virtv2.DiskReady { + if vd.Status.Phase == v1alpha2.DiskReady { diskReadyCondition, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) if diskReadyCondition.Status != metav1.ConditionTrue { cb. @@ -124,7 +124,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vd.Status.Phase == virtv2.DiskReady && pvc.Status.Phase != corev1.ClaimBound { + if vd.Status.Phase == v1alpha2.DiskReady && pvc.Status.Phase != corev1.ClaimBound { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -134,7 +134,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) return reconcile.Result{}, nil - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: viKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, Namespace: vmbda.Namespace, @@ -161,7 +161,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vi.Status.Phase != virtv2.ImageReady { + if vi.Status.Phase != v1alpha2.ImageReady { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -169,7 +169,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } switch vi.Spec.Storage { - case virtv2.StorageKubernetes, virtv2.StoragePersistentVolumeClaim: + case v1alpha2.StorageKubernetes, v1alpha2.StoragePersistentVolumeClaim: if vi.Status.Target.PersistentVolumeClaim == "" { cb. Status(metav1.ConditionFalse). @@ -191,7 +191,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if vi.Status.Phase == virtv2.ImageReady && pvc.Status.Phase != corev1.ClaimBound { + if vi.Status.Phase == v1alpha2.ImageReady && pvc.Status.Phase != corev1.ClaimBound { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). @@ -201,7 +201,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) - case virtv2.StorageContainerRegistry: + case v1alpha2.StorageContainerRegistry: if vi.Status.Target.RegistryURL == "" { cb. Status(metav1.ConditionFalse). @@ -213,7 +213,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.BlockDeviceReady) return reconcile.Result{}, nil - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: cviKey := types.NamespacedName{ Name: vmbda.Spec.BlockDeviceRef.Name, } @@ -238,7 +238,7 @@ func (h BlockDeviceReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Virtu return reconcile.Result{}, nil } - if cvi.Status.Phase != virtv2.ImageReady { + if cvi.Status.Phase != v1alpha2.ImageReady { cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.BlockDeviceNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go index 86d1df2591..de2f80940f 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/deletion.go @@ -32,13 +32,13 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" type UnplugInterface interface { - IsAttached(vm *virtv2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) bool + IsAttached(vm *v1alpha2.VirtualMachine, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) bool UnplugDisk(ctx context.Context, kvvm *virtv1.VirtualMachine, diskName string) error } type DeletionHandler struct { @@ -53,14 +53,14 @@ func NewDeletionHandler(unplug UnplugInterface, client client.Client) *DeletionH } } -func (h *DeletionHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { - controllerutil.AddFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) +func (h *DeletionHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { + controllerutil.AddFinalizer(vmbda, v1alpha2.FinalizerVMBDACleanup) if vmbda.DeletionTimestamp == nil { return reconcile.Result{}, nil } - vm, err := object.FetchObject(ctx, types.NamespacedName{Namespace: vmbda.GetNamespace(), Name: vmbda.Spec.VirtualMachineName}, h.client, &virtv2.VirtualMachine{}) + vm, err := object.FetchObject(ctx, types.NamespacedName{Namespace: vmbda.GetNamespace(), Name: vmbda.Spec.VirtualMachineName}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return reconcile.Result{}, fmt.Errorf("fetch vm: %w", err) } @@ -81,22 +81,22 @@ func (h *DeletionHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi log := logger.FromContext(ctx).With(logger.SlogHandler(deletionHandlerName)) log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineBlockDeviceAttachment") - controllerutil.RemoveFinalizer(vmbda, virtv2.FinalizerVMBDACleanup) + controllerutil.RemoveFinalizer(vmbda, v1alpha2.FinalizerVMBDACleanup) return reconcile.Result{}, nil } -func (h *DeletionHandler) detach(ctx context.Context, kvvm *virtv1.VirtualMachine, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h *DeletionHandler) detach(ctx context.Context, kvvm *virtv1.VirtualMachine, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { if kvvm == nil { return reconcile.Result{}, errors.New("intvirtvm not found to unplug") } var blockDeviceName string switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: blockDeviceName = kvbuilder.GenerateVDDiskName(vmbda.Spec.BlockDeviceRef.Name) - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: blockDeviceName = kvbuilder.GenerateVIDiskName(vmbda.Spec.BlockDeviceRef.Name) - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: blockDeviceName = kvbuilder.GenerateCVIDiskName(vmbda.Spec.BlockDeviceRef.Name) } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go index ef41f06e8e..36d14e0b17 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/life_cycle.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -43,7 +43,7 @@ func NewLifeCycleHandler(attacher *service.AttachmentService) *LifeCycleHandler } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) // TODO protect vd. @@ -57,7 +57,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi var ad *service.AttachmentDisk switch vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: + case v1alpha2.VMBDAObjectRefKindVirtualDisk: vd, err := h.attacher.GetVirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) if err != nil { return reconcile.Result{}, err @@ -65,7 +65,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if vd != nil { ad = service.NewAttachmentDiskFromVirtualDisk(vd) } - case virtv2.VMBDAObjectRefKindVirtualImage: + case v1alpha2.VMBDAObjectRefKindVirtualImage: vi, err := h.attacher.GetVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name, vmbda.Namespace) if err != nil { return reconcile.Result{}, err @@ -73,7 +73,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if vi != nil { ad = service.NewAttachmentDiskFromVirtualImage(vi) } - case virtv2.VMBDAObjectRefKindClusterVirtualImage: + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: cvi, err := h.attacher.GetClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name) if err != nil { return reconcile.Result{}, err @@ -97,7 +97,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vmbda.DeletionTimestamp != nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseTerminating + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseTerminating cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) return reconcile.Result{}, nil @@ -113,7 +113,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi log.Error("Hot plug has been started for Conflicted VMBDA, please report a bug") } - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.Conflict). @@ -127,12 +127,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vmbda.Status.Phase == "" { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending } blockDeviceReady, _ := conditions.GetCondition(vmbdacondition.BlockDeviceReadyType, vmbda.Status.Conditions) if blockDeviceReady.Status != metav1.ConditionTrue { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -142,7 +142,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi virtualMachineReady, _ := conditions.GetCondition(vmbdacondition.VirtualMachineReadyType, vmbda.Status.Conditions) if virtualMachineReady.Status != metav1.ConditionTrue { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -151,7 +151,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if ad == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -160,7 +160,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if vm == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -169,7 +169,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if kvvm == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -183,7 +183,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi } if kvvmi == nil { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -197,7 +197,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi isHotPlugged, err := h.attacher.IsHotPlugged(ad, vm, kvvmi) if err != nil { if errors.Is(err, service.ErrVolumeStatusNotReady) { - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). @@ -211,7 +211,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if isHotPlugged { log.Info("Hot plug is completed and disk is attached") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseAttached + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseAttached cb.Status(metav1.ConditionTrue).Reason(vmbdacondition.Attached) vmbda.Status.VirtualMachineName = vm.Name @@ -227,7 +227,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi if blockDeviceLimitCondition.Status != metav1.ConditionTrue { log.Info("Virtual machine block device capacity reached") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). @@ -248,7 +248,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi return reconcile.Result{}, err } - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). @@ -257,7 +257,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi case errors.Is(err, service.ErrBlockDeviceIsSpecAttached): log.Info("VirtualDisk is already attached to the virtual machine spec") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseFailed + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.Conflict). @@ -266,7 +266,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachi case errors.Is(err, service.ErrHotPlugRequestAlreadySent): log.Info("Attachment request sent: attachment is in progress.") - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhaseInProgress + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.AttachmentRequestSent). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go index f68f481d65..82d5147299 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/attachment_conflict_validator.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type AttachmentConflictValidator struct { @@ -39,7 +39,7 @@ func NewAttachmentConflictValidator(service *service.AttachmentService, log *log } } -func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { isConflicted, conflictWithName, err := v.service.IsConflictedAttachment(ctx, vmbda) if err != nil { v.log.Error("Failed to validate a VirtualMachineBlockDeviceAttachment creation", "err", err) @@ -57,6 +57,6 @@ func (v *AttachmentConflictValidator) ValidateCreate(ctx context.Context, vmbda return nil, nil } -func (v *AttachmentConflictValidator) ValidateUpdate(_ context.Context, _, _ *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *AttachmentConflictValidator) ValidateUpdate(_ context.Context, _, _ *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { return nil, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go index 7e7fca0f23..37e0e6bea3 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/spec_mutate_validator.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type SpecMutateValidator struct{} @@ -31,11 +31,11 @@ func NewSpecMutateValidator() *SpecMutateValidator { return &SpecMutateValidator{} } -func (v *SpecMutateValidator) ValidateCreate(_ context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *SpecMutateValidator) ValidateCreate(_ context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { return nil, nil } -func (v *SpecMutateValidator) ValidateUpdate(_ context.Context, oldVMBDA, newVMBDA *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *SpecMutateValidator) ValidateUpdate(_ context.Context, oldVMBDA, newVMBDA *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { if oldVMBDA.Generation != newVMBDA.Generation { return nil, fmt.Errorf("VirtualMachineBlockDeviceAttachment is an idempotent resource: specification changes are not available") } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go index 251fe2f282..713f7b4850 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/validators/vm_connect_limiter_validator.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VMConnectLimiterValidator struct { @@ -40,7 +40,7 @@ func NewVMConnectLimiterValidator(service *service.BlockDeviceService, log *log. } } -func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVMName(ctx, vmbda.Spec.VirtualMachineName, vmbda.Namespace) if err != nil { return nil, err @@ -54,7 +54,7 @@ func (v *VMConnectLimiterValidator) ValidateCreate(ctx context.Context, vmbda *v return nil, nil } -func (v *VMConnectLimiterValidator) ValidateUpdate(ctx context.Context, _, newVMBDA *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { +func (v *VMConnectLimiterValidator) ValidateUpdate(ctx context.Context, _, newVMBDA *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) { count, err := v.service.CountBlockDevicesAttachedToVMName(ctx, newVMBDA.Spec.VirtualMachineName, newVMBDA.Namespace) if err != nil { v.log.Error(err.Error()) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go index 12323cfb1d..280c090882 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/virtual_machine_ready.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" ) @@ -40,7 +40,7 @@ func NewVirtualMachineReadyHandler(attachment *service.AttachmentService) *Virtu } } -func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { +func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmbdacondition.VirtualMachineReadyType) defer func() { conditions.SetCondition(cb.Generation(vmbda.Generation), &vmbda.Status.Conditions) }() @@ -72,10 +72,10 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmbda *virtv2.Vi } switch vm.Status.Phase { - case virtv2.MachineRunning: + case v1alpha2.MachineRunning: // OK. - case virtv2.MachineStopping, virtv2.MachineStopped, virtv2.MachineStarting: - vmbda.Status.Phase = virtv2.BlockDeviceAttachmentPhasePending + case v1alpha2.MachineStopping, v1alpha2.MachineStopped, v1alpha2.MachineStarting: + vmbda.Status.Phase = v1alpha2.BlockDeviceAttachmentPhasePending cb. Status(metav1.ConditionFalse). Reason(vmbdacondition.NotAttached). diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go index bbfa6b2b03..57338dd8f1 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/cvi_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) @@ -48,11 +48,11 @@ func NewClusterVirtualImageWatcher(client client.Client) *ClusterVirtualImageWat func (w ClusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.ClusterVirtualImage{}, + source.Kind(mgr.GetCache(), &v1alpha2.ClusterVirtualImage{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.ClusterVirtualImage]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.ClusterVirtualImage]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.ClusterVirtualImage]) bool { + predicate.TypedFuncs[*v1alpha2.ClusterVirtualImage]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.ClusterVirtualImage]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.ClusterVirtualImage]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w ClusterVirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *virtv2.ClusterVirtualImage) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas) if err != nil { slog.Default().Error(fmt.Sprintf("failed to list vmbdas: %s", err)) @@ -79,7 +79,7 @@ func (w ClusterVirtualImageWatcher) enqueueRequests(ctx context.Context, cvi *vi } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindClusterVirtualImage && vmbda.Spec.BlockDeviceRef.Name != cvi.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindClusterVirtualImage && vmbda.Spec.BlockDeviceRef.Name != cvi.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go index 3b06eb8442..3b735580bc 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/kvvmi_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type KVVMIWatcher struct { @@ -93,7 +93,7 @@ func (eh KVVMIEventHandler) enqueueRequests(ctx context.Context, ns string, vsTo return } - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := eh.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: ns, }) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go index c8de235a8b..3971036f93 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vd_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -48,11 +48,11 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDisk]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDisk]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -81,7 +81,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindVirtualDisk && vmbda.Spec.BlockDeviceRef.Name != vd.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualDisk && vmbda.Spec.BlockDeviceRef.Name != vd.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go index e3991f6f89..00d942f7ef 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vi_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) @@ -48,11 +48,11 @@ func NewVirtualImageWatcherr(client client.Client) *VirtualImageWatcher { func (w VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualImage{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualImage{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualImage]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualImage]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualImage]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualImage]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualImage]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualImage]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -70,8 +70,8 @@ func (w VirtualImageWatcher) Watch(mgr manager.Manager, ctr controller.Controlle return nil } -func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *virtv2.VirtualImage) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *v1alpha2.VirtualImage) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vi.GetNamespace(), }) @@ -81,7 +81,7 @@ func (w VirtualImageWatcher) enqueueRequests(ctx context.Context, vi *virtv2.Vir } for _, vmbda := range vmbdas.Items { - if vmbda.Spec.BlockDeviceRef.Kind != virtv2.VMBDAObjectRefKindVirtualImage && vmbda.Spec.BlockDeviceRef.Name != vi.GetName() { + if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualImage && vmbda.Spec.BlockDeviceRef.Name != vi.GetName() { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go index 80ea898e6b..afe472de81 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -49,11 +49,11 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldRunningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, e.ObjectOld.Status.Conditions) newRunningCondition, _ := conditions.GetCondition(vmcondition.TypeRunning, e.ObjectNew.Status.Conditions) @@ -71,8 +71,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmbdas virtv2.VirtualMachineBlockDeviceAttachmentList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmbdas v1alpha2.VirtualMachineBlockDeviceAttachmentList err := w.client.List(ctx, &vmbdas, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -97,15 +97,15 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) hasBlockDeviceAttachmentChanges(oldVM, newVM *virtv2.VirtualMachine) bool { - var oldVMBDA []virtv2.BlockDeviceStatusRef +func (w VirtualMachineWatcher) hasBlockDeviceAttachmentChanges(oldVM, newVM *v1alpha2.VirtualMachine) bool { + var oldVMBDA []v1alpha2.BlockDeviceStatusRef for _, bdRef := range oldVM.Status.BlockDeviceRefs { if bdRef.VirtualMachineBlockDeviceAttachmentName != "" { oldVMBDA = append(oldVMBDA, bdRef) } } - var newVMBDA []virtv2.BlockDeviceStatusRef + var newVMBDA []v1alpha2.BlockDeviceStatusRef for _, bdRef := range newVM.Status.BlockDeviceRefs { if bdRef.VirtualMachineBlockDeviceAttachmentName != "" { newVMBDA = append(newVMBDA, bdRef) diff --git a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go index 85174d4c1e..169f7361c2 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/internal/watcher/vmbda_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentWatcher struct{} @@ -35,10 +35,10 @@ func NewVirtualMachineBlockDeviceAttachmentWatcher() *VirtualMachineBlockDeviceA func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineBlockDeviceAttachment{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineBlockDeviceAttachment]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineBlockDeviceAttachment]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineBlockDeviceAttachment]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineBlockDeviceAttachment{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineBlockDeviceAttachment]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineBlockDeviceAttachment]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineBlockDeviceAttachment]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go index 9ef2b135b9..3d799019b0 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_controller.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vmbdametrics "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vmbda" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmbda-controller" @@ -72,7 +72,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineBlockDeviceAttachment{}). + For(&v1alpha2.VirtualMachineBlockDeviceAttachment{}). WithValidator(NewValidator(attacher, blockDeviceService, lg)). Complete(); err != nil { return nil, err diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go index a8e4ac9ea3..6609e40934 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmbda/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) + Handle(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (reconcile.Result, error) } type Watcher interface { @@ -94,10 +94,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineBlockDeviceAttachment { - return &virtv2.VirtualMachineBlockDeviceAttachment{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineBlockDeviceAttachment) virtv2.VirtualMachineBlockDeviceAttachmentStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineBlockDeviceAttachment) v1alpha2.VirtualMachineBlockDeviceAttachmentStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go index 6f8432aa30..b355b03710 100644 --- a/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmbda/vmbda_webhook.go @@ -26,12 +26,12 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmbda/internal/validators" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachineBlockDeviceAttachment) (admission.Warnings, error) } type Validator struct { @@ -51,7 +51,7 @@ func NewValidator(attachmentService *service.AttachmentService, service *service } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vmbda, ok := obj.(*virtv2.VirtualMachineBlockDeviceAttachment) + vmbda, ok := obj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineBlockDeviceAttachment but got a %T", obj) } @@ -70,12 +70,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMBDA, ok := oldObj.(*virtv2.VirtualMachineBlockDeviceAttachment) + oldVMBDA, ok := oldObj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineBlockDeviceAttachment but got a %T", oldObj) } - newVMBDA, ok := newObj.(*virtv2.VirtualMachineBlockDeviceAttachment) + newVMBDA, ok := newObj.(*v1alpha2.VirtualMachineBlockDeviceAttachment) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineBlockDeviceAttachment but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go b/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go index cca57372e3..7d3951b293 100644 --- a/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go +++ b/images/virtualization-artifact/pkg/controller/vmchange/compare_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestActionRequiredOnCompare(t *testing.T) { @@ -369,9 +369,9 @@ enableParavirtualization: true } } -func loadVMSpec(t *testing.T, inYAML string) *virtv2.VirtualMachineSpec { +func loadVMSpec(t *testing.T, inYAML string) *v1alpha2.VirtualMachineSpec { t.Helper() - var spec virtv2.VirtualMachineSpec + var spec v1alpha2.VirtualMachineSpec err := yaml.Unmarshal([]byte(inYAML), &spec) require.NoError(t, err, "Should load vm spec from '%s'", inYAML) return &spec diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go index 6795684062..d3413b7a57 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/deletion.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -57,7 +57,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineClas } changed := s.VirtualMachineClass().Changed() if s.VirtualMachineClass().Current().GetDeletionTimestamp().IsZero() { - controllerutil.AddFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.AddFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } @@ -89,7 +89,7 @@ func (h *DeletionHandler) Handle(ctx context.Context, s state.VirtualMachineClas conditions.RemoveCondition(vmclasscondition.TypeInUse, &changed.Status.Conditions) h.logger.Info("Deletion observed: remove cleanup finalizer from VirtualMachineClass") - controllerutil.RemoveFinalizer(changed, virtv2.FinalizerVMCleanup) + controllerutil.RemoveFinalizer(changed, v1alpha2.FinalizerVMCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go index 5ffa860013..de4882f3b4 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/discovery.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -85,17 +85,17 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla featuresNotEnabled []string ) switch cpuType { - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: if fs := current.Status.CpuFeatures.Enabled; len(fs) > 0 { featuresEnabled = fs break } featuresEnabled = h.discoveryCommonFeatures(nodes) - case virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeFeatures: featuresEnabled = current.Spec.CPU.Features } - if cpuType == virtv2.CPUTypeDiscovery || cpuType == virtv2.CPUTypeFeatures { + if cpuType == v1alpha2.CPUTypeDiscovery || cpuType == v1alpha2.CPUTypeFeatures { commonFeatures := h.discoveryCommonFeatures(availableNodes) for _, cf := range commonFeatures { if !slices.Contains(featuresEnabled, cf) { @@ -106,7 +106,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla cb := conditions.NewConditionBuilder(vmclasscondition.TypeDiscovered).Generation(current.GetGeneration()) switch cpuType { - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: if len(featuresEnabled) > 0 { cb.Message("").Reason(vmclasscondition.ReasonDiscoverySucceeded).Status(metav1.ConditionTrue) break @@ -131,7 +131,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla h.recorder.Eventf( changed, corev1.EventTypeNormal, - virtv2.ReasonVMClassNodesWereUpdated, + v1alpha2.ReasonVMClassNodesWereUpdated, "List of available nodes was updated, added nodes: %q, removed nodes: %q", addedNodes, removedNodes, @@ -140,7 +140,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla h.recorder.Eventf( changed, corev1.EventTypeWarning, - virtv2.ReasonVMClassAvailableNodesListEmpty, + v1alpha2.ReasonVMClassAvailableNodesListEmpty, "List of available nodes was updated, now it's empty, removed nodes: %q", removedNodes, ) @@ -149,7 +149,7 @@ func (h *DiscoveryHandler) Handle(ctx context.Context, s state.VirtualMachineCla changed.Status.AvailableNodes = availableNodeNames changed.Status.MaxAllocatableResources = h.maxAllocatableResources(availableNodes) - changed.Status.CpuFeatures = virtv2.CpuFeatures{ + changed.Status.CpuFeatures = v1alpha2.CpuFeatures{ Enabled: featuresEnabled, NotEnabledCommon: featuresNotEnabled, } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go index bba47f1625..d8f6096a8f 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/lifecycle.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) @@ -48,21 +48,21 @@ func (h *LifeCycleHandler) Handle(_ context.Context, s state.VirtualMachineClass current := s.VirtualMachineClass().Current() changed := s.VirtualMachineClass().Changed() if isDeletion(current) { - changed.Status.Phase = virtv2.ClassPhaseTerminating + changed.Status.Phase = v1alpha2.ClassPhaseTerminating return reconcile.Result{}, nil } cb := conditions.NewConditionBuilder(vmclasscondition.TypeReady). Generation(current.GetGeneration()) - var phase virtv2.VirtualMachineClassPhase + var phase v1alpha2.VirtualMachineClassPhase switch current.Spec.CPU.Type { - case virtv2.CPUTypeHostPassthrough, virtv2.CPUTypeHost: + case v1alpha2.CPUTypeHostPassthrough, v1alpha2.CPUTypeHost: cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) - phase = virtv2.ClassPhaseReady - case virtv2.CPUTypeDiscovery: + phase = v1alpha2.ClassPhaseReady + case v1alpha2.CPUTypeDiscovery: var notReady bool if len(changed.Status.AvailableNodes) == 0 { cb.Message("No matching nodes found.") @@ -75,23 +75,23 @@ func (h *LifeCycleHandler) Handle(_ context.Context, s state.VirtualMachineClass notReady = true } if notReady { - phase = virtv2.ClassPhasePending + phase = v1alpha2.ClassPhasePending cb.Status(metav1.ConditionFalse) break } - phase = virtv2.ClassPhaseReady + phase = v1alpha2.ClassPhaseReady cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) default: if len(changed.Status.AvailableNodes) == 0 { - phase = virtv2.ClassPhasePending + phase = v1alpha2.ClassPhasePending cb.Message("No matching nodes found."). Reason(vmclasscondition.ReasonNoSuitableNodesFound). Status(metav1.ConditionFalse) break } - phase = virtv2.ClassPhaseReady + phase = v1alpha2.ClassPhaseReady cb.Message(""). Reason(vmclasscondition.ReasonSuitableNodesFound). Status(metav1.ConditionTrue) diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go index 6056da1270..f2f728c2fb 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/state/state.go @@ -30,12 +30,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/array" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassState interface { - VirtualMachineClass() *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] - VirtualMachines(ctx context.Context) ([]virtv2.VirtualMachine, error) + VirtualMachineClass() *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] + VirtualMachines(ctx context.Context) ([]v1alpha2.VirtualMachine, error) Nodes(ctx context.Context) ([]corev1.Node, error) AvailableNodes(nodes []corev1.Node) ([]corev1.Node, error) } @@ -43,23 +43,23 @@ type VirtualMachineClassState interface { type state struct { controllerNamespace string client client.Client - vmClass *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] + vmClass *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] } -func New(c client.Client, controllerNamespace string, vmClass *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus]) VirtualMachineClassState { +func New(c client.Client, controllerNamespace string, vmClass *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus]) VirtualMachineClassState { return &state{client: c, controllerNamespace: controllerNamespace, vmClass: vmClass} } -func (s *state) VirtualMachineClass() *reconciler.Resource[*virtv2.VirtualMachineClass, virtv2.VirtualMachineClassStatus] { +func (s *state) VirtualMachineClass() *reconciler.Resource[*v1alpha2.VirtualMachineClass, v1alpha2.VirtualMachineClassStatus] { return s.vmClass } -func (s *state) VirtualMachines(ctx context.Context) ([]virtv2.VirtualMachine, error) { +func (s *state) VirtualMachines(ctx context.Context) ([]v1alpha2.VirtualMachine, error) { if s.vmClass == nil || s.vmClass.IsEmpty() { return nil, nil } name := s.vmClass.Current().GetName() - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := s.client.List(ctx, vms, client.MatchingFields{ indexer.IndexFieldVMByClass: name, }) @@ -94,16 +94,16 @@ func (s *state) Nodes(ctx context.Context) ([]corev1.Node, error) { } switch curr.Spec.CPU.Type { - case virtv2.CPUTypeHost, virtv2.CPUTypeHostPassthrough: + case v1alpha2.CPUTypeHost, v1alpha2.CPUTypeHostPassthrough: // Node is always has the "Host" CPU type, no additional filters required. - case virtv2.CPUTypeDiscovery: + case v1alpha2.CPUTypeDiscovery: matchLabels = curr.Spec.CPU.Discovery.NodeSelector.MatchLabels filters = append(filters, func(node *corev1.Node) bool { return annotations.MatchExpressions(node.GetLabels(), curr.Spec.CPU.Discovery.NodeSelector.MatchExpressions) }) - case virtv2.CPUTypeModel: + case v1alpha2.CPUTypeModel: matchLabels = map[string]string{virtv1.CPUModelLabel + curr.Spec.CPU.Model: "true"} - case virtv2.CPUTypeFeatures: + case v1alpha2.CPUTypeFeatures: ml := make(map[string]string, len(curr.Spec.CPU.Features)) for _, feature := range curr.Spec.CPU.Features { ml[virtv1.CPUFeatureLabel+feature] = "true" diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go index 7e9f8d91a7..f6a7a72ddf 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/util.go @@ -20,15 +20,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmclasscondition" ) -func isDeletion(class *virtv2.VirtualMachineClass) bool { +func isDeletion(class *v1alpha2.VirtualMachineClass) bool { return class == nil || !class.GetDeletionTimestamp().IsZero() } -func addAllUnknown(class *virtv2.VirtualMachineClass, conds ...vmclasscondition.Type) (update bool) { +func addAllUnknown(class *v1alpha2.VirtualMachineClass, conds ...vmclasscondition.Type) (update bool) { //nolint:staticcheck // it's deprecated. mgr := conditions.NewManager(class.Status.Conditions) for _, c := range conds { diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go index a8952bd291..8c352820a4 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/node_watcher.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type NodesWatcher struct{} @@ -50,7 +50,7 @@ func (w *NodesWatcher) Watch(mgr manager.Manager, ctr controller.Controller) err handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, node *corev1.Node) []reconcile.Request { var result []reconcile.Request - classList := &virtv2.VirtualMachineClassList{} + classList := &v1alpha2.VirtualMachineClassList{} err := mgr.GetClient().List(ctx, classList) if err != nil { log.Error("failed to list VMClasses", "error", err) diff --git a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go index aaf00e055b..72e87462cd 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/internal/watcher/vm_watcher.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachinesWatcher struct{} @@ -44,12 +44,12 @@ func NewVirtualMachinesWatcher() *VirtualMachinesWatcher { func (w *VirtualMachinesWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { mgrClient := mgr.GetClient() if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { vmClassName := vm.Spec.VirtualMachineClassName vmc, err := object.FetchObject(ctx, types.NamespacedName{ Name: vmClassName, - }, mgrClient, &virtv2.VirtualMachineClass{}) + }, mgrClient, &v1alpha2.VirtualMachineClass{}) if vmc == nil { return nil @@ -68,11 +68,11 @@ func (w *VirtualMachinesWatcher) Watch(mgr manager.Manager, ctr controller.Contr }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return false }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go index 460645aaa2..f1bbfce39b 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_reconciler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/state" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { @@ -61,8 +61,8 @@ type Reconciler struct { func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( source.Kind(mgr.GetCache(), - &virtv2.VirtualMachineClass{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineClass]{}, + &v1alpha2.VirtualMachineClass{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineClass]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VMClass: %w", err) @@ -111,10 +111,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineClass { - return &virtv2.VirtualMachineClass{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineClass { + return &v1alpha2.VirtualMachineClass{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineClass) virtv2.VirtualMachineClassStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineClass) v1alpha2.VirtualMachineClassStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go index 81e3d4a1ea..5f0fbe6a7e 100644 --- a/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmclass/vmclass_webhook.go @@ -25,12 +25,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmclass/internal/validators" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineClassValidator interface { - ValidateCreate(ctx context.Context, vm *virtv2.VirtualMachineClass) (admission.Warnings, error) - ValidateUpdate(ctx context.Context, oldVM, newVM *virtv2.VirtualMachineClass) (admission.Warnings, error) + ValidateCreate(ctx context.Context, vm *v1alpha2.VirtualMachineClass) (admission.Warnings, error) + ValidateUpdate(ctx context.Context, oldVM, newVM *v1alpha2.VirtualMachineClass) (admission.Warnings, error) } type Validator struct { @@ -50,7 +50,7 @@ func NewValidator(client client.Client, log *log.Logger, recorder eventrecord.Ev } func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - vmclass, ok := obj.(*virtv2.VirtualMachineClass) + vmclass, ok := obj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected a new VirtualMachine but got a %T", obj) } @@ -69,12 +69,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMClass, ok := oldObj.(*virtv2.VirtualMachineClass) + oldVMClass, ok := oldObj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineClass but got a %T", oldObj) } - newVMClass, ok := newObj.(*virtv2.VirtualMachineClass) + newVMClass, ok := newObj.(*v1alpha2.VirtualMachineClass) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineClass but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go index 9b6d747f86..9bf4a651b0 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/attached_handler.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -46,7 +46,7 @@ func NewAttachedHandler(recorder eventrecord.EventRecorderLogger, client client. } } -func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *AttachedHandler) Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmipcondition.AttachedType).Generation(vmip.GetGeneration()) vm, err := h.getAttachedVirtualMachine(ctx, vmip) @@ -66,7 +66,7 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachin Reason(vmipcondition.VirtualMachineNotFound). Message("VirtualMachineIPAddress is not attached to any virtual machine.") conditions.SetCondition(cb, &vmip.Status.Conditions) - h.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonNotAttached, "VirtualMachineIPAddress is not attached to any virtual machine.") + h.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonNotAttached, "VirtualMachineIPAddress is not attached to any virtual machine.") return reconcile.Result{}, nil } @@ -76,13 +76,13 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachin Reason(vmipcondition.Attached). Message("") conditions.SetCondition(cb, &vmip.Status.Conditions) - h.recorder.Eventf(vmip, corev1.EventTypeNormal, virtv2.ReasonAttached, "VirtualMachineIPAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) + h.recorder.Eventf(vmip, corev1.EventTypeNormal, v1alpha2.ReasonAttached, "VirtualMachineIPAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) return reconcile.Result{}, nil } -func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachine, error) { - var vms virtv2.VirtualMachineList +func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachine, error) { + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{Namespace: vmip.Namespace}) if err != nil { return nil, fmt.Errorf("list vms: %w", err) @@ -90,7 +90,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v // Return the first one for which the status matches. // If no status matches, return the first one for which the spec matches. - var attachedVM *virtv2.VirtualMachine + var attachedVM *v1alpha2.VirtualMachine for _, vm := range vms.Items { if vm.Status.VirtualMachineIPAddress == vmip.Name { attachedVM = &vm @@ -109,7 +109,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v // If there's no match for the spec either, then try to find the vm by ownerRef. var vmName string for _, ownerRef := range vmip.OwnerReferences { - if ownerRef.Kind == virtv2.VirtualMachineKind && string(ownerRef.UID) == vmip.Labels[annotations.LabelVirtualMachineUID] { + if ownerRef.Kind == v1alpha2.VirtualMachineKind && string(ownerRef.UID) == vmip.Labels[annotations.LabelVirtualMachineUID] { vmName = ownerRef.Name break } @@ -120,7 +120,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmip *v } vmKey := types.NamespacedName{Name: vmName, Namespace: vmip.Namespace} - attachedVM, err = object.FetchObject(ctx, vmKey, h.client, &virtv2.VirtualMachine{}) + attachedVM, err = object.FetchObject(ctx, vmKey, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return nil, fmt.Errorf("fetch vm %s: %w", vmKey, err) } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go index aafbcb1b1d..8b0016d347 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/step" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -50,7 +50,7 @@ func NewBoundHandler(ipService IPAddressService, client client.Client, recorder } } -func (h *BoundHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *BoundHandler) Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmipcondition.BoundType).Generation(vmip.Generation) defer func() { conditions.SetCondition(cb, &vmip.Status.Conditions) }() @@ -75,7 +75,7 @@ func (h *BoundHandler) Handle(ctx context.Context, vmip *virtv2.VirtualMachineIP ctx = logger.ToContext(ctx, log) } - return steptaker.NewStepTakers[*virtv2.VirtualMachineIPAddress]( + return steptaker.NewStepTakers[*v1alpha2.VirtualMachineIPAddress]( step.NewBindStep(lease, cb), step.NewTakeLeaseStep(lease, h.client, cb, h.recorder), step.NewCreateLeaseStep(lease, h.ipService, h.client, cb, h.recorder), diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go index a38fa4080f..b643634642 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/bound_handler_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("BoundHandler", func() { var ( scheme *runtime.Scheme ctx context.Context - vmip *virtv2.VirtualMachineIPAddress - lease *virtv2.VirtualMachineIPAddressLease + vmip *v1alpha2.VirtualMachineIPAddress + lease *v1alpha2.VirtualMachineIPAddressLease svc *IPAddressServiceMock recorderMock *eventrecord.EventRecorderLoggerMock ) @@ -53,22 +53,22 @@ var _ = Describe("BoundHandler", func() { BeforeEach(func() { scheme = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() - vmip = &virtv2.VirtualMachineIPAddress{ + vmip = &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vmip", Namespace: "ns", }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, } - lease = &virtv2.VirtualMachineIPAddressLease{ + lease = &v1alpha2.VirtualMachineIPAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineIPAddressUID: string(vmip.UID), @@ -79,7 +79,7 @@ var _ = Describe("BoundHandler", func() { } svc = &IPAddressServiceMock{ - GetLeaseFunc: func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + GetLeaseFunc: func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { return nil, nil }, GetAllocatedIPsFunc: func(ctx context.Context) (ip.AllocatedIPs, error) { @@ -105,7 +105,7 @@ var _ = Describe("BoundHandler", func() { k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { - _, ok := obj.(*virtv2.VirtualMachineIPAddressLease) + _, ok := obj.(*v1alpha2.VirtualMachineIPAddressLease) Expect(ok).To(BeTrue()) leaseCreated = true return nil @@ -130,14 +130,14 @@ var _ = Describe("BoundHandler", func() { It("takes existing released lease", func() { var leaseUpdated bool - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { lease.Spec.VirtualMachineIPAddressRef = nil return lease, nil } k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(). WithInterceptorFuncs(interceptor.Funcs{ Update: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.UpdateOption) error { - updatedLease, ok := obj.(*virtv2.VirtualMachineIPAddressLease) + updatedLease, ok := obj.(*v1alpha2.VirtualMachineIPAddressLease) Expect(ok).To(BeTrue()) Expect(updatedLease.Spec.VirtualMachineIPAddressRef).NotTo(BeNil()) Expect(updatedLease.Spec.VirtualMachineIPAddressRef.Name).To(Equal(vmip.Name)) @@ -159,8 +159,8 @@ var _ = Describe("BoundHandler", func() { }) It("cannot take existing lease: it's bound to another vmip", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: "another-vmip", } @@ -176,8 +176,8 @@ var _ = Describe("BoundHandler", func() { }) It("cannot take existing lease: it belongs to different namespace", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace + "-different", } return lease, nil @@ -192,7 +192,7 @@ var _ = Describe("BoundHandler", func() { }) It("is lost", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { return nil, nil } h := NewBoundHandler(svc, nil, recorderMock) @@ -207,8 +207,8 @@ var _ = Describe("BoundHandler", func() { Context("Binding", func() { It("has non-bound lease with ref", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: vmip.Name, } @@ -225,8 +225,8 @@ var _ = Describe("BoundHandler", func() { }) It("has bound lease", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { - lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { + lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Namespace: vmip.Namespace, Name: vmip.Name, } @@ -250,7 +250,7 @@ var _ = Describe("BoundHandler", func() { }) }) -func ExpectCondition(vmip *virtv2.VirtualMachineIPAddress, status metav1.ConditionStatus, reason vmipcondition.BoundReason, msgExists bool) { +func ExpectCondition(vmip *v1alpha2.VirtualMachineIPAddress, status metav1.ConditionStatus, reason vmipcondition.BoundReason, msgExists bool) { ready, _ := conditions.GetCondition(vmipcondition.BoundType, vmip.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go b/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go index 982dc604f3..17d2945029 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/interface.go @@ -20,13 +20,13 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . IPAddressService type IPAddressService interface { - GetLease(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) + GetLease(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) step.Allocator } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go index 6f7b6f5ba3..c888879c6b 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/lifecycle_handler.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -39,22 +39,22 @@ func NewLifecycleHandler(recorder eventrecord.EventRecorderLogger) *LifecycleHan } } -func (h *LifecycleHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { boundCondition, _ := conditions.GetCondition(vmipcondition.BoundType, vmip.Status.Conditions) if boundCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmip) { - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhasePending + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhasePending return reconcile.Result{}, nil } attachedCondition, _ := conditions.GetCondition(vmipcondition.AttachedType, vmip.Status.Conditions) if attachedCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmip) { - if vmip.Status.Phase != virtv2.VirtualMachineIPAddressPhaseBound { - h.recorder.Eventf(vmip, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineIPAddress is bound.") + if vmip.Status.Phase != v1alpha2.VirtualMachineIPAddressPhaseBound { + h.recorder.Eventf(vmip, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineIPAddress is bound.") } - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhaseBound + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhaseBound return reconcile.Result{}, nil } - vmip.Status.Phase = virtv2.VirtualMachineIPAddressPhaseAttached + vmip.Status.Phase = v1alpha2.VirtualMachineIPAddressPhaseAttached return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/mock.go b/images/virtualization-artifact/pkg/controller/vmip/internal/mock.go index 3ec9fe0d9b..719c22c428 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/mock.go @@ -6,7 +6,7 @@ package internal import ( "context" "github.com/deckhouse/virtualization-controller/pkg/common/ip" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "sync" ) @@ -26,7 +26,7 @@ var _ IPAddressService = &IPAddressServiceMock{} // GetAllocatedIPsFunc: func(ctx context.Context) (ip.AllocatedIPs, error) { // panic("mock out the GetAllocatedIPs method") // }, -// GetLeaseFunc: func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +// GetLeaseFunc: func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { // panic("mock out the GetLease method") // }, // IsInsideOfRangeFunc: func(address string) error { @@ -46,7 +46,7 @@ type IPAddressServiceMock struct { GetAllocatedIPsFunc func(ctx context.Context) (ip.AllocatedIPs, error) // GetLeaseFunc mocks the GetLease method. - GetLeaseFunc func(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) + GetLeaseFunc func(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) // IsInsideOfRangeFunc mocks the IsInsideOfRange method. IsInsideOfRangeFunc func(address string) error @@ -68,7 +68,7 @@ type IPAddressServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vmip is the vmip argument value. - Vmip *virtv2.VirtualMachineIPAddress + Vmip *v1alpha2.VirtualMachineIPAddress } // IsInsideOfRange holds details about calls to the IsInsideOfRange method. IsInsideOfRange []struct { @@ -147,13 +147,13 @@ func (mock *IPAddressServiceMock) GetAllocatedIPsCalls() []struct { } // GetLease calls GetLeaseFunc. -func (mock *IPAddressServiceMock) GetLease(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +func (mock *IPAddressServiceMock) GetLease(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { if mock.GetLeaseFunc == nil { panic("IPAddressServiceMock.GetLeaseFunc: method is nil but IPAddressService.GetLease was just called") } callInfo := struct { Ctx context.Context - Vmip *virtv2.VirtualMachineIPAddress + Vmip *v1alpha2.VirtualMachineIPAddress }{ Ctx: ctx, Vmip: vmip, @@ -170,11 +170,11 @@ func (mock *IPAddressServiceMock) GetLease(ctx context.Context, vmip *virtv2.Vir // len(mockedIPAddressService.GetLeaseCalls()) func (mock *IPAddressServiceMock) GetLeaseCalls() []struct { Ctx context.Context - Vmip *virtv2.VirtualMachineIPAddress + Vmip *v1alpha2.VirtualMachineIPAddress } { var calls []struct { Ctx context.Context - Vmip *virtv2.VirtualMachineIPAddress + Vmip *v1alpha2.VirtualMachineIPAddress } mock.lockGetLease.RLock() calls = mock.calls.GetLease diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go index bdd2ae9314..dd2e099a92 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/protection_handler.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -34,8 +34,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) { - controllerutil.AddFinalizer(vmip, virtv2.FinalizerIPAddressCleanup) +func (h *ProtectionHandler) Handle(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) { + controllerutil.AddFinalizer(vmip, v1alpha2.FinalizerIPAddressCleanup) // 1. The vmip has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if vmip.GetDeletionTimestamp() == nil { @@ -49,10 +49,10 @@ func (h *ProtectionHandler) Handle(_ context.Context, vmip *virtv2.VirtualMachin } // 3. All checks have passed, the resource can be deleted. - controllerutil.RemoveFinalizer(vmip, virtv2.FinalizerIPAddressCleanup) + controllerutil.RemoveFinalizer(vmip, v1alpha2.FinalizerIPAddressCleanup) // 4. Remove legacy finalizer as well. It no longer attaches to new resources, but must be removed from old ones. - controllerutil.RemoveFinalizer(vmip, virtv2.FinalizerIPAddressProtection) + controllerutil.RemoveFinalizer(vmip, v1alpha2.FinalizerIPAddressProtection) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go b/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go index 41c7e9d143..23b5080e85 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/service/ip_address_service.go @@ -35,7 +35,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type IPAddressService struct { @@ -112,7 +112,7 @@ func (s IPAddressService) AllocateNewIP(allocatedIPs ip.AllocatedIPs) (string, e } func (s IPAddressService) GetAllocatedIPs(ctx context.Context) (ip.AllocatedIPs, error) { - var leases virtv2.VirtualMachineIPAddressLeaseList + var leases v1alpha2.VirtualMachineIPAddressLeaseList err := s.client.List(ctx, &leases) if err != nil { @@ -127,7 +127,7 @@ func (s IPAddressService) GetAllocatedIPs(ctx context.Context) (ip.AllocatedIPs, return allocatedIPs, nil } -func (s IPAddressService) GetLease(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) GetLease(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { // The IP address cannot be changed for a vmip. Once it has been assigned, it will remain the same. ipAddress := getAssignedIPAddress(vmip) if ipAddress != "" { @@ -139,9 +139,9 @@ func (s IPAddressService) GetLease(ctx context.Context, vmip *virtv2.VirtualMach return s.getLeaseByLabel(ctx, vmip) } -func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress string) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress string) (*v1alpha2.VirtualMachineIPAddressLease, error) { // 1. Trying to find the Lease in the local cache. - lease, err := object.FetchObject(ctx, types.NamespacedName{Name: ip.IPToLeaseName(ipAddress)}, s.client, &virtv2.VirtualMachineIPAddressLease{}) + lease, err := object.FetchObject(ctx, types.NamespacedName{Name: ip.IPToLeaseName(ipAddress)}, s.client, &v1alpha2.VirtualMachineIPAddressLease{}) if err != nil { return nil, fmt.Errorf("fetch lease in local cache: %w", err) } @@ -164,10 +164,10 @@ func (s IPAddressService) getLeaseByIPAddress(ctx context.Context, ipAddress str } } -func (s IPAddressService) getLeaseByLabel(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddressLease, error) { +func (s IPAddressService) getLeaseByLabel(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddressLease, error) { // 1. Trying to find the Lease in the local cache. { - leases := &virtv2.VirtualMachineIPAddressLeaseList{} + leases := &v1alpha2.VirtualMachineIPAddressLeaseList{} err := s.client.List(ctx, leases, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{annotations.LabelVirtualMachineIPAddressUID: string(vmip.GetUID())}), }) @@ -235,7 +235,7 @@ func isFirstLastIP(ip netip.Addr, cidr netip.Prefix) (bool, error) { return last.Equal(ip.AsSlice()), nil } -func getAssignedIPAddress(vmip *virtv2.VirtualMachineIPAddress) string { +func getAssignedIPAddress(vmip *v1alpha2.VirtualMachineIPAddress) string { if vmip.Spec.StaticIP != "" { return vmip.Spec.StaticIP } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go b/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go index 899b0d9187..4b53a743c4 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/service/reference.go @@ -17,10 +17,10 @@ limitations under the License. package service import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func HasReference(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.VirtualMachineIPAddressLease) bool { +func HasReference(vmip *v1alpha2.VirtualMachineIPAddress, lease *v1alpha2.VirtualMachineIPAddressLease) bool { if vmip == nil || lease == nil { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go index 9a39f09d08..6324e50316 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/bind_step.go @@ -26,18 +26,18 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/ip" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) type BindStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease cb *conditions.ConditionBuilder } func NewBindStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, cb *conditions.ConditionBuilder, ) *BindStep { return &BindStep{ @@ -46,7 +46,7 @@ func NewBindStep( } } -func (s BindStep) Take(_ context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s BindStep) Take(_ context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { // 1. The required Lease already exists; set its address in the vmip status. if s.lease != nil { vmip.Status.Address = ip.LeaseNameToIP(s.lease.Name) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go index 2f3d7fad41..7b75eec2a2 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/create_lease_step.go @@ -35,7 +35,7 @@ import ( intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) @@ -46,7 +46,7 @@ type Allocator interface { } type CreateLeaseStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease allocator Allocator client client.Client cb *conditions.ConditionBuilder @@ -54,7 +54,7 @@ type CreateLeaseStep struct { } func NewCreateLeaseStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, allocator Allocator, client client.Client, cb *conditions.ConditionBuilder, @@ -69,7 +69,7 @@ func NewCreateLeaseStep( } } -func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s CreateLeaseStep) Take(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { if s.lease != nil { err := fmt.Errorf("the VirtualMachineIPAddressLease %q already exists, no need to create a new one, please report this as a bug", vmip.Name) s.cb. @@ -85,7 +85,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseLost). Message(fmt.Sprintf("The VirtualMachineIPAddressLease %q doesn't exist.", ip.IPToLeaseName(vmip.Status.Address))) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonFailed, fmt.Sprintf("The VirtualMachineIPAddressLease %q is lost.", ip.IPToLeaseName(vmip.Status.Address))) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonFailed, fmt.Sprintf("The VirtualMachineIPAddressLease %q is lost.", ip.IPToLeaseName(vmip.Status.Address))) return &reconcile.Result{}, nil } @@ -101,7 +101,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP // 2. Allocate a new IP address or use the IP address provided in the spec. var ipAddress string - if vmip.Spec.Type == virtv2.VirtualMachineIPAddressTypeStatic { + if vmip.Spec.Type == v1alpha2.VirtualMachineIPAddressTypeStatic { ipAddress = vmip.Spec.StaticIP } else { ipAddress, err = s.allocator.AllocateNewIP(allocatedIPs) @@ -124,7 +124,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressIsOutOfTheValidRange). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonFailed, msg) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonFailed, msg) return &reconcile.Result{}, nil } @@ -143,7 +143,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseAlreadyExists). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeWarning, virtv2.ReasonBound, msg) + s.recorder.Event(vmip, corev1.EventTypeWarning, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil } @@ -158,7 +158,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseNotReady). Message(msg) - s.recorder.Event(vmip, corev1.EventTypeNormal, virtv2.ReasonBound, msg) + s.recorder.Event(vmip, corev1.EventTypeNormal, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil case k8serrors.IsAlreadyExists(err): // The cache is outdated and not keeping up with the state in the cluster. @@ -179,16 +179,16 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIP } } -func buildVirtualMachineIPAddressLease(vmip *virtv2.VirtualMachineIPAddress, ipAddress string) *virtv2.VirtualMachineIPAddressLease { - return &virtv2.VirtualMachineIPAddressLease{ +func buildVirtualMachineIPAddressLease(vmip *v1alpha2.VirtualMachineIPAddress, ipAddress string) *v1alpha2.VirtualMachineIPAddressLease { + return &v1alpha2.VirtualMachineIPAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineIPAddressUID: string(vmip.GetUID()), }, Name: ip.IPToLeaseName(ipAddress), }, - Spec: virtv2.VirtualMachineIPAddressLeaseSpec{ - VirtualMachineIPAddressRef: &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + Spec: v1alpha2.VirtualMachineIPAddressLeaseSpec{ + VirtualMachineIPAddressRef: &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Name: vmip.Name, Namespace: vmip.Namespace, }, diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go b/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go index 209caff855..437d9d9782 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/step/take_lease_step.go @@ -30,19 +30,19 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" ) type TakeLeaseStep struct { - lease *virtv2.VirtualMachineIPAddressLease + lease *v1alpha2.VirtualMachineIPAddressLease client client.Client cb *conditions.ConditionBuilder recorder eventrecord.EventRecorderLogger } func NewTakeLeaseStep( - lease *virtv2.VirtualMachineIPAddressLease, + lease *v1alpha2.VirtualMachineIPAddressLease, client client.Client, cb *conditions.ConditionBuilder, recorder eventrecord.EventRecorderLogger, @@ -55,7 +55,7 @@ func NewTakeLeaseStep( } } -func (s TakeLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*reconcile.Result, error) { +func (s TakeLeaseStep) Take(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*reconcile.Result, error) { if s.lease == nil { return nil, nil } @@ -78,12 +78,12 @@ func (s TakeLeaseStep) Take(ctx context.Context, vmip *virtv2.VirtualMachineIPAd s.cb. Status(metav1.ConditionFalse). Reason(vmipcondition.VirtualMachineIPAddressLeaseNotReady). - Message(fmt.Sprintf("The VirtualMachineIPAddressLease %q alrady has a reference to another VirtualMachineIPAddress.", s.lease.Name)) + Message(fmt.Sprintf("The VirtualMachineIPAddressLease %q already has a reference to another VirtualMachineIPAddress.", s.lease.Name)) return &reconcile.Result{}, nil } // All checks have passed, the Lease is unoccupied, and it can be taken. - s.lease.Spec.VirtualMachineIPAddressRef = &virtv2.VirtualMachineIPAddressLeaseIpAddressRef{ + s.lease.Spec.VirtualMachineIPAddressRef = &v1alpha2.VirtualMachineIPAddressLeaseIpAddressRef{ Name: vmip.Name, Namespace: vmip.Namespace, } diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go index 014eafea53..56bb41faf9 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vm_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -44,16 +44,16 @@ type VirtualMachineWatcher struct { func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { return &VirtualMachineWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineKind)), } } func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldVM := e.ObjectOld newVM := e.ObjectNew return oldVM.Spec.VirtualMachineIPAddress != newVM.Spec.VirtualMachineIPAddress || @@ -67,7 +67,7 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { var requests []reconcile.Request vmipNames := make(map[string]struct{}) @@ -80,7 +80,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V vmipNames[vm.Status.VirtualMachineIPAddress] = struct{}{} } - vmips := &virtv2.VirtualMachineIPAddressList{} + vmips := &v1alpha2.VirtualMachineIPAddressList{} err := w.client.List(ctx, vmips, client.InNamespace(vm.Namespace), &client.MatchingFields{ indexer.IndexFieldVMIPByVM: vm.Name, }) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go index e4bf9f47ba..462d17c607 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmip_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressWatcher struct{} @@ -35,8 +35,8 @@ func NewVirtualMachineIPAddressWatcher() *VirtualMachineIPAddressWatcher { func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddress{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineIPAddress]{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddress{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineIPAddress]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualMachineIPAddress: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go index 25b0acbb03..4567b1ce02 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmip/internal/watcher/vmiplease_watcher.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressLeaseWatcher struct { @@ -41,13 +41,13 @@ type VirtualMachineIPAddressLeaseWatcher struct { func NewVirtualMachineIPAddressLeaseWatcher(client client.Client) *VirtualMachineIPAddressLeaseWatcher { return &VirtualMachineIPAddressLeaseWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineIPAddressLeaseKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineIPAddressLeaseKind)), } } func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddressLease{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddressLease{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -56,7 +56,7 @@ func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr cont return nil } -func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (requests []reconcile.Request) { +func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (requests []reconcile.Request) { var opts client.ListOptions vmipRef := lease.Spec.VirtualMachineIPAddressRef if vmipRef != nil && vmipRef.Namespace != "" { @@ -72,7 +72,7 @@ func (w VirtualMachineIPAddressLeaseWatcher) enqueueRequests(ctx context.Context opts.Namespace = vmipRef.Namespace } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err := w.client.List(ctx, &vmips, &opts) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vmips: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go b/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go index 9c01415c0f..11d6f26bcf 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmip/vmip_reconciler.go @@ -30,11 +30,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmip/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (reconcile.Result, error) + Handle(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (reconcile.Result, error) } type Watcher interface { @@ -100,10 +100,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineIPAddress { - return &virtv2.VirtualMachineIPAddress{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineIPAddress { + return &v1alpha2.VirtualMachineIPAddress{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineIPAddress) virtv2.VirtualMachineIPAddressStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineIPAddress) v1alpha2.VirtualMachineIPAddressStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmip/vmip_webhook.go b/images/virtualization-artifact/pkg/controller/vmip/vmip_webhook.go index 49f99b77ec..89a78edb66 100644 --- a/images/virtualization-artifact/pkg/controller/vmip/vmip_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmip/vmip_webhook.go @@ -82,7 +82,7 @@ func (v *Validator) ValidateCreate(ctx context.Context, obj runtime.Object) (adm return warnings, nil } -func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { +func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { oldVmip, ok := oldObj.(*v1alpha2.VirtualMachineIPAddress) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineIP but got a %T", oldObj) @@ -103,6 +103,20 @@ func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Obj return nil, fmt.Errorf("error validating VirtualMachineIP update: %w", err) } + var warnings admission.Warnings + + if newVmip.Spec.StaticIP != "" && oldVmip.Spec.StaticIP != newVmip.Spec.StaticIP { + err = v.validateAllocatedIPAddresses(ctx, newVmip.Spec.StaticIP) + switch { + case err == nil: + // OK. + case errors.Is(err, service.ErrIPAddressOutOfRange): + warnings = append(warnings, fmt.Sprintf("The requested address %s is out of the valid range", newVmip.Spec.StaticIP)) + default: + return nil, err + } + } + boundCondition, _ := conditions.GetCondition(vmipcondition.BoundType, oldVmip.Status.Conditions) if boundCondition.Status == metav1.ConditionTrue { if oldVmip.Spec.Type == v1alpha2.VirtualMachineIPAddressTypeAuto && newVmip.Spec.Type == v1alpha2.VirtualMachineIPAddressTypeStatic { @@ -123,7 +137,7 @@ func (v *Validator) ValidateUpdate(_ context.Context, oldObj, newObj runtime.Obj } } - return nil, nil + return warnings, nil } func (v *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go index a9e4986945..300f37cca5 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/lifecycle_handler.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -47,11 +47,11 @@ func NewLifecycleHandler(client client.Client, recorder eventrecord.EventRecorde } } -func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmiplcondition.BoundType).Generation(lease.GetGeneration()) vmipKey := types.NamespacedName{Name: lease.Spec.VirtualMachineIPAddressRef.Name, Namespace: lease.Spec.VirtualMachineIPAddressRef.Namespace} - vmip, err := object.FetchObject(ctx, vmipKey, h.client, &virtv2.VirtualMachineIPAddress{}) + vmip, err := object.FetchObject(ctx, vmipKey, h.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { cb. Status(metav1.ConditionUnknown). @@ -64,10 +64,10 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach // Lease is Bound, if there is a vmip with matched Ref. if isBound(lease, vmip) { annotations.AddLabel(lease, annotations.LabelVirtualMachineIPAddressUID, string(vmip.UID)) - if lease.Status.Phase != virtv2.VirtualMachineIPAddressLeasePhaseBound { - h.recorder.Eventf(lease, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineIPAddressLease is bound to \"%s/%s\".", vmip.Namespace, vmip.Name) + if lease.Status.Phase != v1alpha2.VirtualMachineIPAddressLeasePhaseBound { + h.recorder.Eventf(lease, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineIPAddressLease is bound to \"%s/%s\".", vmip.Namespace, vmip.Name) } - lease.Status.Phase = virtv2.VirtualMachineIPAddressLeasePhaseBound + lease.Status.Phase = v1alpha2.VirtualMachineIPAddressLeasePhaseBound cb. Status(metav1.ConditionTrue). Reason(vmiplcondition.Bound). @@ -79,10 +79,10 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach lease.Spec.VirtualMachineIPAddressRef.Name = "" } - if lease.Status.Phase != virtv2.VirtualMachineIPAddressLeasePhaseReleased { - h.recorder.Eventf(lease, corev1.EventTypeWarning, virtv2.ReasonReleased, "VirtualMachineIPAddressLease is released.") + if lease.Status.Phase != v1alpha2.VirtualMachineIPAddressLeasePhaseReleased { + h.recorder.Eventf(lease, corev1.EventTypeWarning, v1alpha2.ReasonReleased, "VirtualMachineIPAddressLease is released.") } - lease.Status.Phase = virtv2.VirtualMachineIPAddressLeasePhaseReleased + lease.Status.Phase = v1alpha2.VirtualMachineIPAddressLeasePhaseReleased cb. Status(metav1.ConditionFalse). Reason(vmiplcondition.Released). @@ -93,7 +93,7 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach return reconcile.Result{}, nil } -func isBound(lease *virtv2.VirtualMachineIPAddressLease, vmip *virtv2.VirtualMachineIPAddress) bool { +func isBound(lease *v1alpha2.VirtualMachineIPAddressLease, vmip *v1alpha2.VirtualMachineIPAddress) bool { if lease == nil || vmip == nil { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go index 063339405f..227c020067 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/protection_handler.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -33,8 +33,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(_ context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { - controllerutil.AddFinalizer(lease, virtv2.FinalizerIPAddressLeaseCleanup) +func (h *ProtectionHandler) Handle(_ context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { + controllerutil.AddFinalizer(lease, v1alpha2.FinalizerIPAddressLeaseCleanup) // 1. The lease has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if lease.GetDeletionTimestamp() == nil { @@ -48,6 +48,6 @@ func (h *ProtectionHandler) Handle(_ context.Context, lease *virtv2.VirtualMachi } // 3. All checks have passed, the resource can be deleted. - controllerutil.RemoveFinalizer(lease, virtv2.FinalizerIPAddressLeaseCleanup) + controllerutil.RemoveFinalizer(lease, v1alpha2.FinalizerIPAddressLeaseCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go index 787ba94cce..2431f10463 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/retention_handler.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" ) @@ -45,7 +45,7 @@ func NewRetentionHandler(retentionDuration time.Duration, client client.Client) } } -func (h *RetentionHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) { +func (h *RetentionHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler(retentionHandlerName)) // Make sure that the Lease can be deleted only if it has already been verified that it is indeed Released. diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go index 522002e028..e1305f185f 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmip_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/ip" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressWatcher struct { @@ -43,17 +43,17 @@ type VirtualMachineIPAddressWatcher struct { func NewVirtualMachineIPAddressWatcher(client client.Client) *VirtualMachineIPAddressWatcher { return &VirtualMachineIPAddressWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineIPAddressKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineIPAddressKind)), client: client, } } func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddress{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddress{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachineIPAddress]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineIPAddress]) bool { return false }, + predicate.TypedFuncs[*v1alpha2.VirtualMachineIPAddress]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineIPAddress]) bool { return false }, }, ), ); err != nil { @@ -62,8 +62,8 @@ func (w VirtualMachineIPAddressWatcher) Watch(mgr manager.Manager, ctr controlle return nil } -func (w VirtualMachineIPAddressWatcher) enqueueRequests(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (requests []reconcile.Request) { - var leases virtv2.VirtualMachineIPAddressLeaseList +func (w VirtualMachineIPAddressWatcher) enqueueRequests(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (requests []reconcile.Request) { + var leases v1alpha2.VirtualMachineIPAddressLeaseList err := w.client.List(ctx, &leases, &client.ListOptions{}) if err != nil { w.logger.Error(fmt.Sprintf("failed to list leases: %s", err)) diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go index 49e651035d..03d5aa3b13 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/internal/watcher/vmiplease_watcher.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressLeaseWatcher struct{} @@ -35,8 +35,8 @@ func NewVirtualMachineIPAddressLeaseWatcher() *VirtualMachineIPAddressLeaseWatch func (w VirtualMachineIPAddressLeaseWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineIPAddressLease{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineIPAddressLease]{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineIPAddressLease{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineIPAddressLease]{}, ), ); err != nil { return fmt.Errorf("error setting watch on VirtualMachineIPAddressLease: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go b/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go index a960883db8..fd3e32f0c4 100644 --- a/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmiplease/vmiplease_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmiplease/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, lease *virtv2.VirtualMachineIPAddressLease) (reconcile.Result, error) + Handle(ctx context.Context, lease *v1alpha2.VirtualMachineIPAddressLease) (reconcile.Result, error) } type Watcher interface { @@ -83,7 +83,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return h.Handle(ctx, lease.Changed()) }) rec.SetResourceUpdater(func(ctx context.Context) error { - var specToUpdate *virtv2.VirtualMachineIPAddressLeaseSpec + var specToUpdate *v1alpha2.VirtualMachineIPAddressLeaseSpec if !reflect.DeepEqual(lease.Current().Spec, lease.Changed().Spec) { specToUpdate = lease.Changed().Spec.DeepCopy() } @@ -109,10 +109,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineIPAddressLease { - return &virtv2.VirtualMachineIPAddressLease{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineIPAddressLease { + return &v1alpha2.VirtualMachineIPAddressLease{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineIPAddressLease) virtv2.VirtualMachineIPAddressLeaseStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineIPAddressLease) v1alpha2.VirtualMachineIPAddressLeaseStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/attached_handler.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/attached_handler.go index 2187ae60eb..dd0a57eca5 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/attached_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/attached_handler.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -43,7 +43,7 @@ func NewAttachedHandler(recorder eventrecord.EventRecorderLogger, client client. } } -func (h *AttachedHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) { +func (h *AttachedHandler) Handle(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmmaccondition.AttachedType).Generation(vmmac.GetGeneration()) vm, err := h.getAttachedVirtualMachine(ctx, vmmac) @@ -63,7 +63,7 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachi Reason(vmmaccondition.VirtualMachineNotFound). Message("VirtualMachineMACAddress is not attached to any virtual machine.") conditions.SetCondition(cb, &vmmac.Status.Conditions) - h.recorder.Event(vmmac, corev1.EventTypeWarning, virtv2.ReasonNotAttached, "VirtualMachineMACAddress is not attached to any virtual machine.") + h.recorder.Event(vmmac, corev1.EventTypeWarning, v1alpha2.ReasonNotAttached, "VirtualMachineMACAddress is not attached to any virtual machine.") return reconcile.Result{}, nil } @@ -73,13 +73,13 @@ func (h *AttachedHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachi Reason(vmmaccondition.Attached). Message("") conditions.SetCondition(cb, &vmmac.Status.Conditions) - h.recorder.Eventf(vmmac, corev1.EventTypeNormal, virtv2.ReasonAttached, "VirtualMachineMACAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) + h.recorder.Eventf(vmmac, corev1.EventTypeNormal, v1alpha2.ReasonAttached, "VirtualMachineMACAddress is attached to \"%s/%s\".", vm.Namespace, vm.Name) return reconcile.Result{}, nil } -func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachine, error) { - var vms virtv2.VirtualMachineList +func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachine, error) { + var vms v1alpha2.VirtualMachineList err := h.client.List(ctx, &vms, &client.ListOptions{Namespace: vmmac.Namespace}) if err != nil { return nil, fmt.Errorf("list vms: %w", err) @@ -88,7 +88,7 @@ func (h *AttachedHandler) getAttachedVirtualMachine(ctx context.Context, vmmac * // Return the first one for which the status matches. // If no status matches, return the first one for which the spec matches. var found bool - var attachedVM *virtv2.VirtualMachine + var attachedVM *v1alpha2.VirtualMachine for _, vm := range vms.Items { for _, ns := range vm.Status.Networks { if ns.VirtualMachineMACAddressName == vmmac.Name { diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler.go index 7634820b13..ae922c8e38 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmmac/internal/step" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -50,7 +50,7 @@ func NewBoundHandler(macService MACAddressService, client client.Client, recorde } } -func (h *BoundHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) { +func (h *BoundHandler) Handle(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmmaccondition.BoundType).Generation(vmmac.Generation) defer func() { conditions.SetCondition(cb, &vmmac.Status.Conditions) }() @@ -75,7 +75,7 @@ func (h *BoundHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachineM ctx = logger.ToContext(ctx, log) } - return steptaker.NewStepTakers[*virtv2.VirtualMachineMACAddress]( + return steptaker.NewStepTakers[*v1alpha2.VirtualMachineMACAddress]( step.NewBindStep(lease, cb), step.NewCreateLeaseStep(lease, h.macService, h.client, cb, h.recorder), ).Run(ctx, vmmac) diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler_test.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler_test.go index de26eeac5e..6829bffbfb 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler_test.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/bound_handler_test.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/mac" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaclcondition" ) @@ -44,8 +44,8 @@ var _ = Describe("BoundHandler", func() { var ( scheme *runtime.Scheme ctx context.Context - vmmac *virtv2.VirtualMachineMACAddress - lease *virtv2.VirtualMachineMACAddressLease + vmmac *v1alpha2.VirtualMachineMACAddress + lease *v1alpha2.VirtualMachineMACAddressLease svc *MACAddressServiceMock recorderMock *eventrecord.EventRecorderLoggerMock ) @@ -53,20 +53,20 @@ var _ = Describe("BoundHandler", func() { BeforeEach(func() { scheme = runtime.NewScheme() Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) - Expect(virtv2.AddToScheme(scheme)).To(Succeed()) + Expect(v1alpha2.AddToScheme(scheme)).To(Succeed()) Expect(virtv1.AddToScheme(scheme)).To(Succeed()) ctx = context.TODO() - vmmac = &virtv2.VirtualMachineMACAddress{ + vmmac = &v1alpha2.VirtualMachineMACAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vmmac", Namespace: "ns", }, - Spec: virtv2.VirtualMachineMACAddressSpec{}, + Spec: v1alpha2.VirtualMachineMACAddressSpec{}, } - lease = &virtv2.VirtualMachineMACAddressLease{ + lease = &v1alpha2.VirtualMachineMACAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineMACAddressUID: string(vmmac.UID), @@ -77,7 +77,7 @@ var _ = Describe("BoundHandler", func() { } svc = &MACAddressServiceMock{ - GetLeaseFunc: func(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { + GetLeaseFunc: func(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { return nil, nil }, GetAllocatedAddressesFunc: func(ctx context.Context) (mac.AllocatedMACs, error) { @@ -100,7 +100,7 @@ var _ = Describe("BoundHandler", func() { k8sClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(). WithInterceptorFuncs(interceptor.Funcs{ Create: func(_ context.Context, _ client.WithWatch, obj client.Object, _ ...client.CreateOption) error { - _, ok := obj.(*virtv2.VirtualMachineMACAddressLease) + _, ok := obj.(*v1alpha2.VirtualMachineMACAddressLease) Expect(ok).To(BeTrue()) leaseCreated = true return nil @@ -124,7 +124,7 @@ var _ = Describe("BoundHandler", func() { }) It("is lost", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { return nil, nil } h := NewBoundHandler(svc, nil, recorderMock) @@ -139,8 +139,8 @@ var _ = Describe("BoundHandler", func() { Context("Binding", func() { It("has non-bound lease with ref", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { - lease.Spec.VirtualMachineMACAddressRef = &virtv2.VirtualMachineMACAddressLeaseMACAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { + lease.Spec.VirtualMachineMACAddressRef = &v1alpha2.VirtualMachineMACAddressLeaseMACAddressRef{ Namespace: vmmac.Namespace, Name: vmmac.Name, } @@ -157,8 +157,8 @@ var _ = Describe("BoundHandler", func() { }) It("has bound lease", func() { - svc.GetLeaseFunc = func(_ context.Context, _ *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { - lease.Spec.VirtualMachineMACAddressRef = &virtv2.VirtualMachineMACAddressLeaseMACAddressRef{ + svc.GetLeaseFunc = func(_ context.Context, _ *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { + lease.Spec.VirtualMachineMACAddressRef = &v1alpha2.VirtualMachineMACAddressLeaseMACAddressRef{ Namespace: vmmac.Namespace, Name: vmmac.Name, } @@ -182,7 +182,7 @@ var _ = Describe("BoundHandler", func() { }) }) -func ExpectCondition(vmmac *virtv2.VirtualMachineMACAddress, status metav1.ConditionStatus, reason vmmaccondition.BoundReason, msgExists bool) { +func ExpectCondition(vmmac *v1alpha2.VirtualMachineMACAddress, status metav1.ConditionStatus, reason vmmaccondition.BoundReason, msgExists bool) { ready, _ := conditions.GetCondition(vmmaccondition.BoundType, vmmac.Status.Conditions) Expect(ready.Status).To(Equal(status)) Expect(ready.Reason).To(Equal(reason.String())) diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/deletion_handler.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/deletion_handler.go index a02860241d..6a3606186c 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/deletion_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/deletion_handler.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -40,7 +40,7 @@ func NewDeletionHandler(client client.Client) *DeletionHandler { } } -func (h *DeletionHandler) Handle(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) { +func (h *DeletionHandler) Handle(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) { attachedCondition, _ := conditions.GetCondition(vmmaccondition.AttachedType, vmmac.Status.Conditions) if attachedCondition.Status == metav1.ConditionTrue || !conditions.IsLastUpdated(attachedCondition, vmmac) { return reconcile.Result{}, nil diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/interface.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/interface.go index f765c2d172..1dc2fd6b5d 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/interface.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/interface.go @@ -20,13 +20,13 @@ import ( "context" "github.com/deckhouse/virtualization-controller/pkg/controller/vmmac/internal/step" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate moq -rm -out mock.go . MACAddressService type MACAddressService interface { - GetLease(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) + GetLease(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) step.Allocator } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/lifecycle_handler.go index 58bc01c0ac..3f9627982b 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/lifecycle_handler.go @@ -25,7 +25,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -39,22 +39,22 @@ func NewLifecycleHandler(recorder eventrecord.EventRecorderLogger) *LifecycleHan } } -func (h *LifecycleHandler) Handle(_ context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(_ context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) { boundCondition, _ := conditions.GetCondition(vmmaccondition.BoundType, vmmac.Status.Conditions) if boundCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmmac) { - vmmac.Status.Phase = virtv2.VirtualMachineMACAddressPhasePending + vmmac.Status.Phase = v1alpha2.VirtualMachineMACAddressPhasePending return reconcile.Result{}, nil } attachedCondition, _ := conditions.GetCondition(vmmaccondition.AttachedType, vmmac.Status.Conditions) if attachedCondition.Status != metav1.ConditionTrue || !conditions.IsLastUpdated(boundCondition, vmmac) { - if vmmac.Status.Phase != virtv2.VirtualMachineMACAddressPhaseBound { - h.recorder.Eventf(vmmac, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineMACAddress is bound.") + if vmmac.Status.Phase != v1alpha2.VirtualMachineMACAddressPhaseBound { + h.recorder.Eventf(vmmac, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineMACAddress is bound.") } - vmmac.Status.Phase = virtv2.VirtualMachineMACAddressPhaseBound + vmmac.Status.Phase = v1alpha2.VirtualMachineMACAddressPhaseBound return reconcile.Result{}, nil } - vmmac.Status.Phase = virtv2.VirtualMachineMACAddressPhaseAttached + vmmac.Status.Phase = v1alpha2.VirtualMachineMACAddressPhaseAttached return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/mock.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/mock.go index 9f6c050470..c215c9e54a 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/mock.go @@ -6,7 +6,7 @@ package internal import ( "context" "github.com/deckhouse/virtualization-controller/pkg/common/mac" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "sync" ) @@ -26,7 +26,7 @@ var _ MACAddressService = &MACAddressServiceMock{} // GetAllocatedAddressesFunc: func(ctx context.Context) (mac.AllocatedMACs, error) { // panic("mock out the GetAllocatedAddresses method") // }, -// GetLeaseFunc: func(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { +// GetLeaseFunc: func(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { // panic("mock out the GetLease method") // }, // } @@ -43,7 +43,7 @@ type MACAddressServiceMock struct { GetAllocatedAddressesFunc func(ctx context.Context) (mac.AllocatedMACs, error) // GetLeaseFunc mocks the GetLease method. - GetLeaseFunc func(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) + GetLeaseFunc func(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) // calls tracks calls to the methods. calls struct { @@ -62,7 +62,7 @@ type MACAddressServiceMock struct { // Ctx is the ctx argument value. Ctx context.Context // Vmmac is the vmmac argument value. - Vmmac *virtv2.VirtualMachineMACAddress + Vmmac *v1alpha2.VirtualMachineMACAddress } } lockAllocateNewAddress sync.RWMutex @@ -135,13 +135,13 @@ func (mock *MACAddressServiceMock) GetAllocatedAddressesCalls() []struct { } // GetLease calls GetLeaseFunc. -func (mock *MACAddressServiceMock) GetLease(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { +func (mock *MACAddressServiceMock) GetLease(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { if mock.GetLeaseFunc == nil { panic("MACAddressServiceMock.GetLeaseFunc: method is nil but MACAddressService.GetLease was just called") } callInfo := struct { Ctx context.Context - Vmmac *virtv2.VirtualMachineMACAddress + Vmmac *v1alpha2.VirtualMachineMACAddress }{ Ctx: ctx, Vmmac: vmmac, @@ -158,11 +158,11 @@ func (mock *MACAddressServiceMock) GetLease(ctx context.Context, vmmac *virtv2.V // len(mockedMACAddressService.GetLeaseCalls()) func (mock *MACAddressServiceMock) GetLeaseCalls() []struct { Ctx context.Context - Vmmac *virtv2.VirtualMachineMACAddress + Vmmac *v1alpha2.VirtualMachineMACAddress } { var calls []struct { Ctx context.Context - Vmmac *virtv2.VirtualMachineMACAddress + Vmmac *v1alpha2.VirtualMachineMACAddress } mock.lockGetLease.RLock() calls = mock.calls.GetLease diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/protection_handler.go index 680abf0506..e8333e9d2c 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/protection_handler.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -34,8 +34,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(_ context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) { - controllerutil.AddFinalizer(vmmac, virtv2.FinalizerMACAddressCleanup) +func (h *ProtectionHandler) Handle(_ context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) { + controllerutil.AddFinalizer(vmmac, v1alpha2.FinalizerMACAddressCleanup) // 1. The vmmac has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if vmmac.GetDeletionTimestamp() == nil { @@ -45,7 +45,7 @@ func (h *ProtectionHandler) Handle(_ context.Context, vmmac *virtv2.VirtualMachi // 2. It is necessary to keep vmmac protected until we can unequivocally ensure that the resource is not in the Attached state. attachedCondition, _ := conditions.GetCondition(vmmaccondition.AttachedType, vmmac.Status.Conditions) if attachedCondition.Status == metav1.ConditionFalse && conditions.IsLastUpdated(attachedCondition, vmmac) { - controllerutil.RemoveFinalizer(vmmac, virtv2.FinalizerMACAddressCleanup) + controllerutil.RemoveFinalizer(vmmac, v1alpha2.FinalizerMACAddressCleanup) } return reconcile.Result{}, nil diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service.go index ea2e53beca..93477169a6 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service.go @@ -36,7 +36,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const MaxCount int = 16777216 @@ -157,7 +157,7 @@ func (s MACAddressService) AllocateNewAddress(allocatedMACs mac.AllocatedMACs) ( } func (s MACAddressService) GetAllocatedAddresses(ctx context.Context) (mac.AllocatedMACs, error) { - var leases virtv2.VirtualMachineMACAddressLeaseList + var leases v1alpha2.VirtualMachineMACAddressLeaseList err := s.client.List(ctx, &leases) if err != nil { @@ -172,7 +172,7 @@ func (s MACAddressService) GetAllocatedAddresses(ctx context.Context) (mac.Alloc return allocatedMACs, nil } -func (s MACAddressService) GetLease(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { +func (s MACAddressService) GetLease(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { // The MAC address cannot be changed for a vmmac. Once it has been assigned, it will remain the same. macAddress := getAssignedMACAddress(vmmac) if macAddress != "" { @@ -184,9 +184,9 @@ func (s MACAddressService) GetLease(ctx context.Context, vmmac *virtv2.VirtualMa return s.getLeaseByLabel(ctx, vmmac) } -func (s MACAddressService) getLeaseByMACAddress(ctx context.Context, macAddress string) (*virtv2.VirtualMachineMACAddressLease, error) { +func (s MACAddressService) getLeaseByMACAddress(ctx context.Context, macAddress string) (*v1alpha2.VirtualMachineMACAddressLease, error) { // 1. Trying to find the Lease in the local cache. - lease, err := object.FetchObject(ctx, types.NamespacedName{Name: mac.AddressToLeaseName(macAddress)}, s.client, &virtv2.VirtualMachineMACAddressLease{}) + lease, err := object.FetchObject(ctx, types.NamespacedName{Name: mac.AddressToLeaseName(macAddress)}, s.client, &v1alpha2.VirtualMachineMACAddressLease{}) if err != nil { return nil, fmt.Errorf("fetch lease in local cache: %w", err) } @@ -209,10 +209,10 @@ func (s MACAddressService) getLeaseByMACAddress(ctx context.Context, macAddress } } -func (s MACAddressService) getLeaseByLabel(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*virtv2.VirtualMachineMACAddressLease, error) { +func (s MACAddressService) getLeaseByLabel(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*v1alpha2.VirtualMachineMACAddressLease, error) { // 1. Trying to find the Lease in the local cache. { - leases := &virtv2.VirtualMachineMACAddressLeaseList{} + leases := &v1alpha2.VirtualMachineMACAddressLeaseList{} err := s.client.List(ctx, leases, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{annotations.LabelVirtualMachineMACAddressUID: string(vmmac.GetUID())}), }) @@ -252,7 +252,7 @@ func (s MACAddressService) getLeaseByLabel(ctx context.Context, vmmac *virtv2.Vi } } -func getAssignedMACAddress(vmmac *virtv2.VirtualMachineMACAddress) string { +func getAssignedMACAddress(vmmac *v1alpha2.VirtualMachineMACAddress) string { if vmmac.Spec.Address != "" { return vmmac.Spec.Address } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service_test.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service_test.go index d3a486ca19..c2963b948e 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service_test.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/mac_address_service_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/gomega" "github.com/deckhouse/virtualization-controller/pkg/common/mac" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("MACAddressService", func() { @@ -69,16 +69,16 @@ var _ = Describe("MACAddressService", func() { }) It("should return error for a duplicate MAC address", func() { - ref := virtv2.VirtualMachineMACAddressLeaseMACAddressRef{ + ref := v1alpha2.VirtualMachineMACAddressLeaseMACAddressRef{ Name: "test", Namespace: "test", } - spec := virtv2.VirtualMachineMACAddressLeaseSpec{ + spec := v1alpha2.VirtualMachineMACAddressLeaseSpec{ VirtualMachineMACAddressRef: &ref, } - lease := &virtv2.VirtualMachineMACAddressLease{ + lease := &v1alpha2.VirtualMachineMACAddressLease{ Spec: spec, } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/reference.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/reference.go index 06240a2b13..4a26a3bce9 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/service/reference.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/service/reference.go @@ -17,10 +17,10 @@ limitations under the License. package service import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func HasReference(vmmac *virtv2.VirtualMachineMACAddress, lease *virtv2.VirtualMachineMACAddressLease) bool { +func HasReference(vmmac *v1alpha2.VirtualMachineMACAddress, lease *v1alpha2.VirtualMachineMACAddressLease) bool { if vmmac == nil || lease == nil { return false } diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/step/bind_step.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/step/bind_step.go index 03a4235136..e7b227cdd5 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/step/bind_step.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/step/bind_step.go @@ -26,18 +26,18 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/mac" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vmmac/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaclcondition" ) type BindStep struct { - lease *virtv2.VirtualMachineMACAddressLease + lease *v1alpha2.VirtualMachineMACAddressLease cb *conditions.ConditionBuilder } func NewBindStep( - lease *virtv2.VirtualMachineMACAddressLease, + lease *v1alpha2.VirtualMachineMACAddressLease, cb *conditions.ConditionBuilder, ) *BindStep { return &BindStep{ @@ -46,7 +46,7 @@ func NewBindStep( } } -func (s BindStep) Take(_ context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*reconcile.Result, error) { +func (s BindStep) Take(_ context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*reconcile.Result, error) { // 1. The required Lease already exists; set its address in the vmmac status. if s.lease != nil { vmmac.Status.Address = mac.LeaseNameToAddress(s.lease.Name) diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/step/create_lease_step.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/step/create_lease_step.go index c221846cc7..726080fd2e 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/step/create_lease_step.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/step/create_lease_step.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaccondition" ) @@ -43,7 +43,7 @@ type Allocator interface { } type CreateLeaseStep struct { - lease *virtv2.VirtualMachineMACAddressLease + lease *v1alpha2.VirtualMachineMACAddressLease allocator Allocator client client.Client cb *conditions.ConditionBuilder @@ -51,7 +51,7 @@ type CreateLeaseStep struct { } func NewCreateLeaseStep( - lease *virtv2.VirtualMachineMACAddressLease, + lease *v1alpha2.VirtualMachineMACAddressLease, allocator Allocator, client client.Client, cb *conditions.ConditionBuilder, @@ -66,14 +66,14 @@ func NewCreateLeaseStep( } } -func (s CreateLeaseStep) Take(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (*reconcile.Result, error) { +func (s CreateLeaseStep) Take(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (*reconcile.Result, error) { // 1. Check if MAC address has been already allocated but lost. if vmmac.Status.Address != "" { s.cb. Status(metav1.ConditionFalse). Reason(vmmaccondition.VirtualMachineMACAddressLeaseLost). Message(fmt.Sprintf("VirtualMachineMACAddress lost its lease: VirtualMachineMACAddressLease %q should exist", mac.AddressToLeaseName(vmmac.Status.Address))) - s.recorder.Event(vmmac, corev1.EventTypeWarning, virtv2.ReasonFailed, fmt.Sprintf("The VirtualMachineMACAddressLease %q is lost.", mac.AddressToLeaseName(vmmac.Status.Address))) + s.recorder.Event(vmmac, corev1.EventTypeWarning, v1alpha2.ReasonFailed, fmt.Sprintf("The VirtualMachineMACAddressLease %q is lost.", mac.AddressToLeaseName(vmmac.Status.Address))) return &reconcile.Result{}, nil } @@ -110,7 +110,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmmac *virtv2.VirtualMachineM Status(metav1.ConditionFalse). Reason(vmmaccondition.VirtualMachineMACAddressLeaseAlreadyExists). Message(msg) - s.recorder.Event(vmmac, corev1.EventTypeWarning, virtv2.ReasonBound, msg) + s.recorder.Event(vmmac, corev1.EventTypeWarning, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil } @@ -125,7 +125,7 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmmac *virtv2.VirtualMachineM Status(metav1.ConditionFalse). Reason(vmmaccondition.VirtualMachineMACAddressLeaseNotReady). Message(msg) - s.recorder.Event(vmmac, corev1.EventTypeNormal, virtv2.ReasonBound, msg) + s.recorder.Event(vmmac, corev1.EventTypeNormal, v1alpha2.ReasonBound, msg) return &reconcile.Result{}, nil case k8serrors.IsAlreadyExists(err): // The cache is outdated and not keeping up with the state in the cluster. @@ -146,16 +146,16 @@ func (s CreateLeaseStep) Take(ctx context.Context, vmmac *virtv2.VirtualMachineM } } -func buildVirtualMachineMACAddressLease(vmmac *virtv2.VirtualMachineMACAddress, macAddress string) *virtv2.VirtualMachineMACAddressLease { - return &virtv2.VirtualMachineMACAddressLease{ +func buildVirtualMachineMACAddressLease(vmmac *v1alpha2.VirtualMachineMACAddress, macAddress string) *v1alpha2.VirtualMachineMACAddressLease { + return &v1alpha2.VirtualMachineMACAddressLease{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ annotations.LabelVirtualMachineMACAddressUID: string(vmmac.GetUID()), }, Name: mac.AddressToLeaseName(macAddress), }, - Spec: virtv2.VirtualMachineMACAddressLeaseSpec{ - VirtualMachineMACAddressRef: &virtv2.VirtualMachineMACAddressLeaseMACAddressRef{ + Spec: v1alpha2.VirtualMachineMACAddressLeaseSpec{ + VirtualMachineMACAddressRef: &v1alpha2.VirtualMachineMACAddressLeaseMACAddressRef{ Name: vmmac.Name, Namespace: vmmac.Namespace, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vm_watcher.go index 5b084ca682..1ba81ff49f 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vm_watcher.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -45,7 +45,7 @@ type VirtualMachineWatcher struct { func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { return &VirtualMachineWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineKind)), } } @@ -53,8 +53,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachine{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *virtv2.VirtualMachine) []reconcile.Request { + &v1alpha2.VirtualMachine{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vm *v1alpha2.VirtualMachine) []reconcile.Request { vmmacNames := make(map[string]struct{}) if len(vm.Status.Networks) > 0 { @@ -71,7 +71,7 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control } } - vmmacs := &virtv2.VirtualMachineMACAddressList{} + vmmacs := &v1alpha2.VirtualMachineMACAddressList{} if err := w.client.List(ctx, vmmacs, client.InNamespace(vm.Namespace), &client.MatchingFields{ indexer.IndexFieldVMMACByVM: vm.Name, }); err != nil { @@ -95,14 +95,14 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return requests }), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return true }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachine]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachine]) bool { return true }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { return !reflect.DeepEqual(e.ObjectOld.Status.Networks, e.ObjectNew.Status.Networks) }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmac_watcher.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmac_watcher.go index 4db1371d08..73978ebc41 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmac_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmac_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineMACAddressWatcher struct{} @@ -42,8 +42,8 @@ func (w *VirtualMachineMACAddressWatcher) Watch(mgr manager.Manager, ctr control if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineMACAddress{}, - handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, vmmac *virtv2.VirtualMachineMACAddress) []reconcile.Request { + &v1alpha2.VirtualMachineMACAddress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -53,14 +53,14 @@ func (w *VirtualMachineMACAddressWatcher) Watch(mgr manager.Manager, ctr control }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineMACAddress]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineMACAddress]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineMACAddress]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineMACAddress]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineMACAddress]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmaclease_watcher.go b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmaclease_watcher.go index 97566ffd7d..f9f5eace77 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmaclease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/internal/watcher/vmmaclease_watcher.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineMACAddressLeaseWatcher struct { @@ -43,7 +43,7 @@ type VirtualMachineMACAddressLeaseWatcher struct { func NewVirtualMachineMACAddressLeaseWatcher(client client.Client) *VirtualMachineMACAddressLeaseWatcher { return &VirtualMachineMACAddressLeaseWatcher{ client: client, - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineMACAddressLeaseKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineMACAddressLeaseKind)), } } @@ -51,8 +51,8 @@ func (w VirtualMachineMACAddressLeaseWatcher) Watch(mgr manager.Manager, ctr con if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineMACAddressLease{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, lease *virtv2.VirtualMachineMACAddressLease) (requests []reconcile.Request) { + &v1alpha2.VirtualMachineMACAddressLease{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) (requests []reconcile.Request) { vmmacRef := lease.Spec.VirtualMachineMACAddressRef if vmmacRef != nil { if vmmacRef.Name != "" && vmmacRef.Namespace != "" { @@ -69,14 +69,14 @@ func (w VirtualMachineMACAddressLeaseWatcher) Watch(mgr manager.Manager, ctr con return }), - predicate.TypedFuncs[*virtv2.VirtualMachineMACAddressLease]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineMACAddressLease]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return true }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return true }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return true }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmac/vmmac_reconciler.go b/images/virtualization-artifact/pkg/controller/vmmac/vmmac_reconciler.go index 048ade4e7c..75d5c2dfdb 100644 --- a/images/virtualization-artifact/pkg/controller/vmmac/vmmac_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmmac/vmmac_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmmac/internal/watcher" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) (reconcile.Result, error) + Handle(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) (reconcile.Result, error) } type Watcher interface { @@ -96,10 +96,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineMACAddress { - return &virtv2.VirtualMachineMACAddress{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineMACAddress { + return &v1alpha2.VirtualMachineMACAddress{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineMACAddress) virtv2.VirtualMachineMACAddressStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineMACAddress) v1alpha2.VirtualMachineMACAddressStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/deletion_handler.go b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/deletion_handler.go index 5f2125b6dc..7d2b3d2343 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/deletion_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/deletion_handler.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaclcondition" ) @@ -42,7 +42,7 @@ func NewDeletionHandler(client client.Client) *DeletionHandler { } } -func (h *DeletionHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineMACAddressLease) (reconcile.Result, error) { +func (h *DeletionHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) (reconcile.Result, error) { boundCondition, _ := conditions.GetCondition(vmmaclcondition.BoundType, lease.Status.Conditions) if boundCondition.Status == metav1.ConditionFalse && conditions.IsLastUpdated(boundCondition, lease) { err := h.client.Delete(ctx, lease) diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/lifecycle_handler.go b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/lifecycle_handler.go index 8ff29d87bb..9fd5d9a316 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/lifecycle_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/lifecycle_handler.go @@ -31,7 +31,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaclcondition" ) @@ -49,7 +49,7 @@ func NewLifecycleHandler(client client.Client, recorder eventrecord.EventRecorde } } -func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineMACAddressLease) (reconcile.Result, error) { +func (h *LifecycleHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) (reconcile.Result, error) { if lease == nil { return reconcile.Result{}, nil } @@ -57,7 +57,7 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach cb := conditions.NewConditionBuilder(vmmaclcondition.BoundType).Generation(lease.GetGeneration()) vmmacKey := types.NamespacedName{Name: lease.Spec.VirtualMachineMACAddressRef.Name, Namespace: lease.Spec.VirtualMachineMACAddressRef.Namespace} - vmmac, err := object.FetchObject(ctx, vmmacKey, h.client, &virtv2.VirtualMachineMACAddress{}) + vmmac, err := object.FetchObject(ctx, vmmacKey, h.client, &v1alpha2.VirtualMachineMACAddress{}) if err != nil { cb. Status(metav1.ConditionUnknown). @@ -79,10 +79,10 @@ func (h *LifecycleHandler) Handle(ctx context.Context, lease *virtv2.VirtualMach // Valid MAC address was found: it matches both the lease name and the VirtualMachineMACAddressRef. // Now create a "Bound" confirmation: set label with MAC address UID and set condition to True. annotations.AddLabel(lease, annotations.LabelVirtualMachineMACAddressUID, string(vmmac.UID)) - if lease.Status.Phase != virtv2.VirtualMachineMACAddressLeasePhaseBound { - h.recorder.Eventf(lease, corev1.EventTypeNormal, virtv2.ReasonBound, "VirtualMachineMACAddressLease is bound to \"%s/%s\".", vmmac.Namespace, vmmac.Name) + if lease.Status.Phase != v1alpha2.VirtualMachineMACAddressLeasePhaseBound { + h.recorder.Eventf(lease, corev1.EventTypeNormal, v1alpha2.ReasonBound, "VirtualMachineMACAddressLease is bound to \"%s/%s\".", vmmac.Namespace, vmmac.Name) } - lease.Status.Phase = virtv2.VirtualMachineMACAddressLeasePhaseBound + lease.Status.Phase = v1alpha2.VirtualMachineMACAddressLeasePhaseBound cb. Status(metav1.ConditionTrue). Reason(vmmaclcondition.Bound). @@ -96,7 +96,7 @@ func (h *LifecycleHandler) Name() string { return LifecycleHandlerName } -func isBound(lease *virtv2.VirtualMachineMACAddressLease, vmmac *virtv2.VirtualMachineMACAddress) error { +func isBound(lease *v1alpha2.VirtualMachineMACAddressLease, vmmac *v1alpha2.VirtualMachineMACAddress) error { if vmmac == nil { return fmt.Errorf("cannot to bind with empty MAC address") } diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/protection_handler.go b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/protection_handler.go index d8cbd90e16..c2b06ffedc 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/protection_handler.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/protection_handler.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmmaclcondition" ) @@ -36,8 +36,8 @@ func NewProtectionHandler() *ProtectionHandler { return &ProtectionHandler{} } -func (h *ProtectionHandler) Handle(ctx context.Context, lease *virtv2.VirtualMachineMACAddressLease) (reconcile.Result, error) { - controllerutil.AddFinalizer(lease, virtv2.FinalizerMACAddressLeaseCleanup) +func (h *ProtectionHandler) Handle(ctx context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) (reconcile.Result, error) { + controllerutil.AddFinalizer(lease, v1alpha2.FinalizerMACAddressLeaseCleanup) // 1. The lease has a finalizer throughout its lifetime to prevent it from being deleted without prior processing by the controller. if lease.GetDeletionTimestamp() == nil { @@ -47,7 +47,7 @@ func (h *ProtectionHandler) Handle(ctx context.Context, lease *virtv2.VirtualMac // 2. It is necessary to protect the resource until we can unequivocally ensure that the resource is not in the Bound state. boundCondition, _ := conditions.GetCondition(vmmaclcondition.BoundType, lease.Status.Conditions) if boundCondition.Status != metav1.ConditionTrue && conditions.IsLastUpdated(boundCondition, lease) { - controllerutil.RemoveFinalizer(lease, virtv2.FinalizerMACAddressLeaseCleanup) + controllerutil.RemoveFinalizer(lease, v1alpha2.FinalizerMACAddressLeaseCleanup) } return reconcile.Result{}, nil diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmac_watcher.go b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmac_watcher.go index 606a8a81a4..283fbe4dff 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmac_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmac_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/mac" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineMACAddressWatcher struct { @@ -43,7 +43,7 @@ type VirtualMachineMACAddressWatcher struct { func NewVirtualMachineMACAddressWatcher(client client.Client) *VirtualMachineMACAddressWatcher { return &VirtualMachineMACAddressWatcher{ - logger: log.Default().With("watcher", strings.ToLower(virtv2.VirtualMachineMACAddressKind)), + logger: log.Default().With("watcher", strings.ToLower(v1alpha2.VirtualMachineMACAddressKind)), client: client, } } @@ -52,11 +52,11 @@ func (w *VirtualMachineMACAddressWatcher) Watch(mgr manager.Manager, ctr control if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineMACAddress{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmmac *virtv2.VirtualMachineMACAddress) []reconcile.Request { + &v1alpha2.VirtualMachineMACAddress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, vmmac *v1alpha2.VirtualMachineMACAddress) []reconcile.Request { var requests []reconcile.Request - var leases virtv2.VirtualMachineMACAddressLeaseList + var leases v1alpha2.VirtualMachineMACAddressLeaseList if err := w.client.List(ctx, &leases, &client.ListOptions{}); err != nil { w.logger.Error(fmt.Sprintf("failed to list leases: %s", err)) return nil @@ -73,14 +73,14 @@ func (w *VirtualMachineMACAddressWatcher) Watch(mgr manager.Manager, ctr control return requests }), - predicate.TypedFuncs[*virtv2.VirtualMachineMACAddress]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineMACAddress]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineMACAddress]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return false }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineMACAddress]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineMACAddress]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineMACAddress]) bool { return true }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmaclease_watcher.go b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmaclease_watcher.go index 430c8c6b81..f521a7bf27 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmaclease_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/internal/watcher/vmmaclease_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineMACAddressLeaseWatcher struct{} @@ -42,8 +42,8 @@ func (w *VirtualMachineMACAddressLeaseWatcher) Watch(mgr manager.Manager, ctr co if err := ctr.Watch( source.Kind( mgr.GetCache(), - &virtv2.VirtualMachineMACAddressLease{}, - handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, lease *virtv2.VirtualMachineMACAddressLease) []reconcile.Request { + &v1alpha2.VirtualMachineMACAddressLease{}, + handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -53,14 +53,14 @@ func (w *VirtualMachineMACAddressLeaseWatcher) Watch(mgr manager.Manager, ctr co }, } }), - predicate.TypedFuncs[*virtv2.VirtualMachineMACAddressLease]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineMACAddressLease]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return true }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineMACAddressLease]) bool { + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineMACAddressLease]) bool { return true }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmmaclease/vmmaclease_reconciler.go b/images/virtualization-artifact/pkg/controller/vmmaclease/vmmaclease_reconciler.go index 51d77afda7..8ca1207535 100644 --- a/images/virtualization-artifact/pkg/controller/vmmaclease/vmmaclease_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmmaclease/vmmaclease_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmmaclease/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, lease *virtv2.VirtualMachineMACAddressLease) (reconcile.Result, error) + Handle(ctx context.Context, lease *v1alpha2.VirtualMachineMACAddressLease) (reconcile.Result, error) Name() string } @@ -84,7 +84,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return h.Handle(ctx, lease.Changed()) }) rec.SetResourceUpdater(func(ctx context.Context) error { - var specToUpdate *virtv2.VirtualMachineMACAddressLeaseSpec + var specToUpdate *v1alpha2.VirtualMachineMACAddressLeaseSpec if !reflect.DeepEqual(lease.Current().Spec, lease.Changed().Spec) { specToUpdate = lease.Changed().Spec.DeepCopy() } @@ -110,10 +110,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return rec.Reconcile(ctx) } -func (r *Reconciler) factory() *virtv2.VirtualMachineMACAddressLease { - return &virtv2.VirtualMachineMACAddressLease{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineMACAddressLease { + return &v1alpha2.VirtualMachineMACAddressLease{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineMACAddressLease) virtv2.VirtualMachineMACAddressLeaseStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineMACAddressLease) v1alpha2.VirtualMachineMACAddressLeaseStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go index 45a98fc617..a14134b319 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,7 +39,7 @@ func NewDeletionHandler(migration *service.MigrationService) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachineOperation) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (reconcile.Result, error) { if vmop == nil { return reconcile.Result{}, nil } @@ -48,7 +48,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine if vmop.DeletionTimestamp.IsZero() { log.Debug("Add cleanup finalizer") - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } @@ -70,7 +70,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine return reconcile.Result{}, nil } - controllerutil.RemoveFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.RemoveFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go index 83e9e33298..8ab21663eb 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/deletion_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("DeletionHandler", func() { @@ -41,7 +41,7 @@ var _ = Describe("DeletionHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] ) AfterEach(func() { @@ -58,24 +58,24 @@ var _ = Describe("DeletionHandler", func() { Expect(err).NotTo(HaveOccurred()) } - newVmop := func(phase virtv2.VMOPPhase, opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVmop := func(phase v1alpha2.VMOPPhase, opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.NewEmpty(name, namespace) vmop.Status.Phase = phase vmopbuilder.ApplyOptions(vmop, opts...) return vmop } - DescribeTable("Should be protected", func(phase virtv2.VMOPPhase, protect bool) { - vmop := newVmop(phase, vmopbuilder.WithType(virtv2.VMOPTypeEvict)) + DescribeTable("Should be protected", func(phase v1alpha2.VMOPPhase, protect bool) { + vmop := newVmop(phase, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict)) fakeClient, srv = setupEnvironment(vmop) reconcile() - newVMOP := &virtv2.VirtualMachineOperation{} + newVMOP := &v1alpha2.VirtualMachineOperation{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vmop), newVMOP) Expect(err).NotTo(HaveOccurred()) - updated := controllerutil.AddFinalizer(newVMOP, virtv2.FinalizerVMOPCleanup) + updated := controllerutil.AddFinalizer(newVMOP, v1alpha2.FinalizerVMOPCleanup) if protect { Expect(updated).To(BeFalse()) @@ -83,16 +83,16 @@ var _ = Describe("DeletionHandler", func() { Expect(updated).To(BeTrue()) } }, - Entry("VMOP Evict 1", virtv2.VMOPPhasePending, true), - Entry("VMOP Evict 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Evict 3", virtv2.VMOPPhaseCompleted, true), + Entry("VMOP Evict 1", v1alpha2.VMOPPhasePending, true), + Entry("VMOP Evict 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Evict 3", v1alpha2.VMOPPhaseCompleted, true), ) Context("Migration", func() { - DescribeTable("Should cleanup migration", func(vmop *virtv2.VirtualMachineOperation, mig *virtv1.VirtualMachineInstanceMigration, shouldExist bool) { + DescribeTable("Should cleanup migration", func(vmop *v1alpha2.VirtualMachineOperation, mig *virtv1.VirtualMachineInstanceMigration, shouldExist bool) { expectLength := 1 if !shouldExist { - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) vmop.DeletionTimestamp = ptr.To(metav1.Now()) expectLength = 0 } @@ -106,11 +106,11 @@ var _ = Describe("DeletionHandler", func() { Expect(len(migs.Items)).To(Equal(expectLength)) }, Entry("VMOP Evict 1", - newVmop(virtv2.VMOPPhaseInProgress, vmopbuilder.WithType(virtv2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), + newVmop(v1alpha2.VMOPPhaseInProgress, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), newSimpleMigration("vmop-"+name, namespace, "test-vm"), true, ), Entry("VMOP Evict 2", - newVmop(virtv2.VMOPPhaseCompleted, vmopbuilder.WithType(virtv2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), + newVmop(v1alpha2.VMOPPhaseCompleted, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine("test-vm")), newSimpleMigration("vmop-"+name, namespace, "test-vm"), false, ), ) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go index b6f9440b1f..e9c0b2ada9 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle.go @@ -37,7 +37,6 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/livemigration" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/api/core/v1alpha2/vmbdacondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -120,7 +119,7 @@ func (h LifecycleHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMach // 3. Operation already in progress. Check if the operation is completed. // Synchronize conditions to the VMOP. - if isOperationInProgress(vmop) { + if commonvmop.IsOperationInProgress(vmop) { log.Debug("Operation in progress, check if VM is completed", "vm.phase", vm.Status.Phase, "vmop.phase", vmop.Status.Phase) return reconcile.Result{}, h.syncOperationComplete(ctx, vmop) } @@ -344,17 +343,15 @@ func (h LifecycleHandler) areAnyRWOHotplugDisks(ctx context.Context, vm *v1alpha return false, err } - var attached []*v1alpha2.VirtualMachineBlockDeviceAttachment + var vmbdas []*v1alpha2.VirtualMachineBlockDeviceAttachment for _, vmbda := range vmbdaList.Items { if vmbda.Spec.BlockDeviceRef.Kind != v1alpha2.VMBDAObjectRefKindVirtualDisk { continue } - if cond, _ := conditions.GetCondition(vmbdacondition.AttachedType, vmbda.Status.Conditions); cond.Status == metav1.ConditionTrue { - attached = append(attached, &vmbda) - } + vmbdas = append(vmbdas, &vmbda) } - for _, vmbda := range attached { + for _, vmbda := range vmbdas { vd := &v1alpha2.VirtualDisk{} err = h.client.Get(ctx, client.ObjectKey{Namespace: vmbda.Namespace, Name: vmbda.Spec.BlockDeviceRef.Name}, vd) if err != nil { @@ -491,12 +488,6 @@ func (h LifecycleHandler) recordEvent(ctx context.Context, vmop *v1alpha2.Virtua } } -func isOperationInProgress(vmop *v1alpha2.VirtualMachineOperation) bool { - sent, _ := conditions.GetCondition(vmopcondition.TypeSignalSent, vmop.Status.Conditions) - completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) - return sent.Status == metav1.ConditionTrue && completed.Status != metav1.ConditionTrue -} - var mapMigrationPhaseToReason = map[virtv1.VirtualMachineInstanceMigrationPhase]vmopcondition.ReasonCompleted{ virtv1.MigrationPhaseUnset: vmopcondition.ReasonMigrationPending, virtv1.MigrationPending: vmopcondition.ReasonMigrationPending, diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go index 652b47a14c..a543e52544 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/lifecycle_test.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/migration/internal/service" genericservice "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -45,7 +45,7 @@ var _ = Describe("LifecycleHandler", func() { var ( ctx context.Context fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] recorderMock *eventrecord.EventRecorderLoggerMock ) @@ -60,24 +60,24 @@ var _ = Describe("LifecycleHandler", func() { } }) - newVMOPEvictPending := func(opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVMOPEvictPending := func(opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { options := []vmopbuilder.Option{ vmopbuilder.WithName(name), vmopbuilder.WithNamespace(namespace), - vmopbuilder.WithType(virtv2.VMOPTypeEvict), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), vmopbuilder.WithVirtualMachine(name), } options = append(options, opts...) vmop := vmopbuilder.New(options...) - vmop.Status.Phase = virtv2.VMOPPhasePending + vmop.Status.Phase = v1alpha2.VMOPPhasePending return vmop } - newVM := func(vmPolicy virtv2.LiveMigrationPolicy) *virtv2.VirtualMachine { + newVM := func(vmPolicy v1alpha2.LiveMigrationPolicy) *v1alpha2.VirtualMachine { vm := vmbuilder.NewEmpty(name, namespace) vm.Spec.LiveMigrationPolicy = vmPolicy - vm.Spec.RunPolicy = virtv2.AlwaysOnPolicy - vm.Status.Phase = virtv2.MachineRunning + vm.Spec.RunPolicy = v1alpha2.AlwaysOnPolicy + vm.Status.Phase = v1alpha2.MachineRunning vm.Status.Conditions = []metav1.Condition{ { Type: vmcondition.TypeMigratable.String(), @@ -88,7 +88,7 @@ var _ = Describe("LifecycleHandler", func() { return vm } - DescribeTable("Evict operation for migration policy", func(vmop *virtv2.VirtualMachineOperation, vmPolicy virtv2.LiveMigrationPolicy, expectedPhase virtv2.VMOPPhase) { + DescribeTable("Evict operation for migration policy", func(vmop *v1alpha2.VirtualMachineOperation, vmPolicy v1alpha2.LiveMigrationPolicy, expectedPhase v1alpha2.VMOPPhase) { vm := newVM(vmPolicy) fakeClient, srv = setupEnvironment(vmop, vm) @@ -104,69 +104,69 @@ var _ = Describe("LifecycleHandler", func() { // AlwaysSafe cases. Entry("is ok for AlwaysSafe and force=nil", newVMOPEvictPending(), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for AlwaysSafe and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("should become Failed for AlwaysSafe and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.AlwaysSafeMigrationPolicy, - virtv2.VMOPPhaseFailed, + v1alpha2.AlwaysSafeMigrationPolicy, + v1alpha2.VMOPPhaseFailed, ), // PreferSafe cases. Entry("is ok for PreferSafe and force=nil", newVMOPEvictPending(), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferSafe and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferSafe and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.PreferSafeMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferSafeMigrationPolicy, + v1alpha2.VMOPPhasePending, ), // AlwaysForced cases. Entry("is ok for AlwaysForced and force=nil", newVMOPEvictPending(), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("should become Failed for AlwaysForced and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhaseFailed, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhaseFailed, ), Entry("is ok for AlwaysForced and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.AlwaysForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.AlwaysForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), // PreferForced cases. Entry("is ok for PreferForced and force=nil", newVMOPEvictPending(), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferForced and force=false", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(false))), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), Entry("is ok for PreferForced and force=true", newVMOPEvictPending(vmopbuilder.WithForce(ptr.To(true))), - virtv2.PreferForcedMigrationPolicy, - virtv2.VMOPPhasePending, + v1alpha2.PreferForcedMigrationPolicy, + v1alpha2.VMOPPhasePending, ), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go index a7a93fbef0..12cbc6a48c 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/migration/internal/handler/suite_test.go @@ -28,7 +28,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVmopHandlers(t *testing.T) { @@ -36,7 +36,7 @@ func TestVmopHandlers(t *testing.T) { RunSpecs(t, "VMOP Migration handlers Suite") } -func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus]) { +func setupEnvironment(vmop *v1alpha2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus]) { GinkgoHelper() Expect(vmop).ToNot(BeNil()) @@ -50,10 +50,10 @@ func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Objec Expect(err).NotTo(HaveOccurred()) srv := reconciler.NewResource(client.ObjectKeyFromObject(vmop), fakeClient, - func() *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{} + func() *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{} }, - func(obj *virtv2.VirtualMachineOperation) virtv2.VirtualMachineOperationStatus { + func(obj *v1alpha2.VirtualMachineOperation) v1alpha2.VirtualMachineOperationStatus { return obj.Status }) err = srv.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go index 83b0b0aca1..a791d5e219 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const deletionHandlerName = "DeletionHandler" @@ -39,12 +39,12 @@ func NewDeletionHandler(svcOpCreator SvcOpCreator) *DeletionHandler { } } -func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachineOperation) (reconcile.Result, error) { +func (h DeletionHandler) Handle(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (reconcile.Result, error) { log := logger.FromContext(ctx) - if vmop.DeletionTimestamp.IsZero() && vmop.Status.Phase == virtv2.VMOPPhaseInProgress { + if vmop.DeletionTimestamp.IsZero() && vmop.Status.Phase == v1alpha2.VMOPPhaseInProgress { log.Debug("Add cleanup finalizer while in the InProgress phase") - controllerutil.AddFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.AddFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } @@ -54,7 +54,7 @@ func (h DeletionHandler) Handle(ctx context.Context, vmop *virtv2.VirtualMachine } else { log.Info("Deletion observed: remove cleanup finalizer from VirtualMachineOperation", "phase", vmop.Status.Phase) } - controllerutil.RemoveFinalizer(vmop, virtv2.FinalizerVMOPCleanup) + controllerutil.RemoveFinalizer(vmop, v1alpha2.FinalizerVMOPCleanup) return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go index 55b3c14a7f..5172e0e6b2 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/deletion_test.go @@ -25,7 +25,7 @@ import ( vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) var _ = Describe("DeletionHandler", func() { @@ -37,7 +37,7 @@ var _ = Describe("DeletionHandler", func() { var ( ctx = testutil.ContextBackgroundWithNoOpLogger() fakeClient client.WithWatch - srv *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus] + srv *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus] ) AfterEach(func() { @@ -53,24 +53,24 @@ var _ = Describe("DeletionHandler", func() { Expect(err).NotTo(HaveOccurred()) } - newVmop := func(phase virtv2.VMOPPhase, opts ...vmopbuilder.Option) *virtv2.VirtualMachineOperation { + newVmop := func(phase v1alpha2.VMOPPhase, opts ...vmopbuilder.Option) *v1alpha2.VirtualMachineOperation { vmop := vmopbuilder.NewEmpty(name, namespace) vmop.Status.Phase = phase vmopbuilder.ApplyOptions(vmop, opts...) return vmop } - DescribeTable("Should be protected", func(phase virtv2.VMOPPhase, protect bool) { - vmop := newVmop(phase, vmopbuilder.WithType(virtv2.VMOPTypeEvict)) + DescribeTable("Should be protected", func(phase v1alpha2.VMOPPhase, protect bool) { + vmop := newVmop(phase, vmopbuilder.WithType(v1alpha2.VMOPTypeEvict)) fakeClient, srv = setupEnvironment(vmop) reconcile() - newVMOP := &virtv2.VirtualMachineOperation{} + newVMOP := &v1alpha2.VirtualMachineOperation{} err := fakeClient.Get(ctx, client.ObjectKeyFromObject(vmop), newVMOP) Expect(err).NotTo(HaveOccurred()) - updated := controllerutil.AddFinalizer(newVMOP, virtv2.FinalizerVMOPCleanup) + updated := controllerutil.AddFinalizer(newVMOP, v1alpha2.FinalizerVMOPCleanup) if protect { Expect(updated).To(BeFalse()) @@ -78,19 +78,19 @@ var _ = Describe("DeletionHandler", func() { Expect(updated).To(BeTrue()) } }, - Entry("VMOP Start 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Start 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Start 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Start 4", virtv2.VMOPPhaseFailed, false), - - Entry("VMOP Stop 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Stop 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Stop 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Stop 4", virtv2.VMOPPhaseFailed, false), - - Entry("VMOP Restart 1", virtv2.VMOPPhasePending, false), - Entry("VMOP Restart 2", virtv2.VMOPPhaseInProgress, true), - Entry("VMOP Restart 3", virtv2.VMOPPhaseCompleted, false), - Entry("VMOP Restart 4", virtv2.VMOPPhaseFailed, false), + Entry("VMOP Start 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Start 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Start 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Start 4", v1alpha2.VMOPPhaseFailed, false), + + Entry("VMOP Stop 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Stop 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Stop 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Stop 4", v1alpha2.VMOPPhaseFailed, false), + + Entry("VMOP Restart 1", v1alpha2.VMOPPhasePending, false), + Entry("VMOP Restart 2", v1alpha2.VMOPPhaseInProgress, true), + Entry("VMOP Restart 3", v1alpha2.VMOPPhaseCompleted, false), + Entry("VMOP Restart 4", v1alpha2.VMOPPhaseFailed, false), ) }) diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go index e58131f147..09f20d7d59 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/service.go @@ -20,13 +20,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/controller/vmop/powerstate/internal/service" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -type SvcOpCreator func(vmop *virtv2.VirtualMachineOperation) (service.Operation, error) +type SvcOpCreator func(vmop *v1alpha2.VirtualMachineOperation) (service.Operation, error) func NewSvcOpCreator(client client.Client) SvcOpCreator { - return func(vmop *virtv2.VirtualMachineOperation) (service.Operation, error) { + return func(vmop *v1alpha2.VirtualMachineOperation) (service.Operation, error) { return service.NewOperationService(client, vmop) } } diff --git a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go index b278a6c964..b954d57521 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/vmop/powerstate/internal/handler/suite_test.go @@ -26,7 +26,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestVmopHandlers(t *testing.T) { @@ -34,7 +34,7 @@ func TestVmopHandlers(t *testing.T) { RunSpecs(t, "VMOP PowerState handlers Suite") } -func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*virtv2.VirtualMachineOperation, virtv2.VirtualMachineOperationStatus]) { +func setupEnvironment(vmop *v1alpha2.VirtualMachineOperation, objs ...client.Object) (client.WithWatch, *reconciler.Resource[*v1alpha2.VirtualMachineOperation, v1alpha2.VirtualMachineOperationStatus]) { GinkgoHelper() Expect(vmop).ToNot(BeNil()) @@ -46,10 +46,10 @@ func setupEnvironment(vmop *virtv2.VirtualMachineOperation, objs ...client.Objec Expect(err).NotTo(HaveOccurred()) srv := reconciler.NewResource(client.ObjectKeyFromObject(vmop), fakeClient, - func() *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{} + func() *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{} }, - func(obj *virtv2.VirtualMachineOperation) virtv2.VirtualMachineOperationStatus { + func(obj *v1alpha2.VirtualMachineOperation) v1alpha2.VirtualMachineOperationStatus { return obj.Status }) err = srv.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/controller/vmop/vmop_controller.go b/images/virtualization-artifact/pkg/controller/vmop/vmop_controller.go index 1e4f2ec2c4..e8746e9054 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/vmop_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmop/vmop_controller.go @@ -83,7 +83,7 @@ func SetupController( if err := builder.WebhookManagedBy(mgr). For(&v1alpha2.VirtualMachineOperation{}). - WithValidator(NewValidator(log)). + WithValidator(NewValidator(mgr.GetClient(), log)). Complete(); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/controller/vmop/vmop_webhook.go b/images/virtualization-artifact/pkg/controller/vmop/vmop_webhook.go index c170fffd82..be66f35f71 100644 --- a/images/virtualization-artifact/pkg/controller/vmop/vmop_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmop/vmop_webhook.go @@ -18,19 +18,133 @@ package vmop import ( "context" + "errors" + "fmt" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/deckhouse/pkg/log" + "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/validator" "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewValidator(log *log.Logger) admission.CustomValidator { +func NewValidator(c client.Client, log *log.Logger) admission.CustomValidator { return validator.NewValidator[*v1alpha2.VirtualMachineOperation](log. With("controller", "vmop-controller"). With("webhook", "validation"), - ).WithCreateValidators(&deprecateMigrateValidator{}) + ).WithCreateValidators(&deprecateMigrateValidator{}, NewLocalVirtualDiskValidator(c)) +} + +type LocalVirtualDiskValidator struct { + client client.Client +} + +func NewLocalVirtualDiskValidator(client client.Client) *LocalVirtualDiskValidator { + return &LocalVirtualDiskValidator{client: client} +} + +func (v *LocalVirtualDiskValidator) ValidateCreate(ctx context.Context, vmop *v1alpha2.VirtualMachineOperation) (admission.Warnings, error) { + if vmop.Spec.Type != v1alpha2.VMOPTypeEvict && vmop.Spec.Type != v1alpha2.VMOPTypeMigrate { + return nil, nil + } + + vm, err := object.FetchObject(ctx, types.NamespacedName{ + Namespace: vmop.Namespace, + Name: vmop.Spec.VirtualMachine, + }, v.client, &v1alpha2.VirtualMachine{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch virtual machine %s: %w", vmop.Spec.VirtualMachine, err) + } + + if vm == nil { + return nil, nil + } + + var hasHotplugs bool + var hasRWO bool + + for _, bdRef := range vm.Status.BlockDeviceRefs { + if bdRef.Hotplugged { + hasHotplugs = true + } + + switch bdRef.Kind { + case v1alpha2.VirtualDiskKind: + var vd *v1alpha2.VirtualDisk + vd, err = object.FetchObject(ctx, types.NamespacedName{ + Namespace: vm.Namespace, + Name: bdRef.Name, + }, v.client, &v1alpha2.VirtualDisk{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch virtual disk %s: %w", bdRef.Name, err) + } + + if vd == nil || vd.Status.Target.PersistentVolumeClaim == "" { + return nil, nil + } + + var isRWO bool + isRWO, err = v.isRWOPersistentVolumeClaim(ctx, vd.Status.Target.PersistentVolumeClaim, vm.Namespace) + if err != nil { + return nil, err + } + + hasRWO = hasRWO || isRWO + case v1alpha2.VirtualImageKind: + var vi *v1alpha2.VirtualImage + vi, err = object.FetchObject(ctx, types.NamespacedName{ + Namespace: vm.Namespace, + Name: bdRef.Name, + }, v.client, &v1alpha2.VirtualImage{}) + if err != nil { + return nil, fmt.Errorf("failed to fetch virtual image %s: %w", bdRef.Name, err) + } + + if vi == nil || vi.Status.Target.PersistentVolumeClaim == "" { + return nil, nil + } + + var isRWO bool + isRWO, err = v.isRWOPersistentVolumeClaim(ctx, vi.Status.Target.PersistentVolumeClaim, vm.Namespace) + if err != nil { + return nil, err + } + + hasRWO = hasRWO || isRWO + } + } + + if hasRWO && hasHotplugs { + return nil, errors.New("for now, migration of the rwo virtual disk is not allowed if the virtual machine has hot-plugged block devices") + } + + return nil, nil +} + +func (v *LocalVirtualDiskValidator) isRWOPersistentVolumeClaim(ctx context.Context, pvcName, pvcNamespace string) (bool, error) { + pvc, err := object.FetchObject(ctx, types.NamespacedName{ + Namespace: pvcNamespace, + Name: pvcName, + }, v.client, &corev1.PersistentVolumeClaim{}) + if err != nil { + return false, fmt.Errorf("failed to fetch pvc %s: %w", pvcName, err) + } + + if pvc == nil { + return false, nil + } + + for _, mode := range pvc.Status.AccessModes { + if mode == corev1.ReadWriteOnce { + return true, nil + } + } + + return false, nil } type deprecateMigrateValidator struct{} diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go index 56dfdbf195..3e72cf4467 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/interfaces.go @@ -21,16 +21,16 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Restorer type Restorer interface { - RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) + RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) RestoreProvisioner(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) - RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) - RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) - RestoreVirtualMachineMACAddresses(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineMACAddress, error) + RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) + RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) + RestoreVirtualMachineMACAddresses(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineMACAddress, error) RestoreMACAddressOrder(ctx context.Context, secret *corev1.Secret) ([]string, error) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go index d4de034a75..e8751ca91f 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/life_cycle.go @@ -37,7 +37,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal/restorer" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" ) @@ -58,11 +58,11 @@ func NewLifeCycleHandler(client client.Client, restorer Restorer, recorder event } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) { switch vmRestore.Status.Phase { - case virtv2.VirtualMachineRestorePhaseReady, - virtv2.VirtualMachineRestorePhaseFailed, - virtv2.VirtualMachineRestorePhaseTerminating: + case v1alpha2.VirtualMachineRestorePhaseReady, + v1alpha2.VirtualMachineRestorePhaseFailed, + v1alpha2.VirtualMachineRestorePhaseTerminating: return reconcile.Result{}, nil } @@ -74,29 +74,29 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } if vmRestore.Status.Phase == "" { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending } if vmRestore.DeletionTimestamp != nil { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseTerminating + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseTerminating cb.Status(metav1.ConditionUnknown).Reason(conditions.ReasonUnknown) return reconcile.Result{}, nil } - if vmRestore.Status.Phase == virtv2.VirtualMachineRestorePhaseInProgress { - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Status.Phase == v1alpha2.VirtualMachineRestorePhaseInProgress { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { err := h.startVirtualMachine(ctx, vmRestore) if err != nil { h.recorder.Event( vmRestore, corev1.EventTypeWarning, - virtv2.ReasonVMStartFailed, + v1alpha2.ReasonVMStartFailed, err.Error(), ) } } - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseReady + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseReady cb.Status(metav1.ConditionTrue).Reason(vmrestorecondition.VirtualMachineRestoreReady) return reconcile.Result{}, nil @@ -104,7 +104,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM vmSnapshotReadyToUseCondition, _ := conditions.GetCondition(vmrestorecondition.VirtualMachineSnapshotReadyToUseType, vmRestore.Status.Conditions) if vmSnapshotReadyToUseCondition.Status != metav1.ConditionTrue { - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReadyToUse). @@ -113,7 +113,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } vmSnapshotKey := types.NamespacedName{Namespace: vmRestore.Namespace, Name: vmRestore.Spec.VirtualMachineSnapshotName} - vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &virtv2.VirtualMachineSnapshot{}) + vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &v1alpha2.VirtualMachineSnapshot{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, err @@ -134,7 +134,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM var ( overrideValidators []OverrideValidator - runPolicy virtv2.RunPolicy + runPolicy v1alpha2.RunPolicy overridedVMName string ) @@ -144,9 +144,9 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { runPolicy = vm.Spec.RunPolicy - vm.Spec.RunPolicy = virtv2.AlwaysOffPolicy + vm.Spec.RunPolicy = v1alpha2.AlwaysOffPolicy } vmip, err := h.restorer.RestoreVirtualMachineIPAddress(ctx, restorerSecret) @@ -181,7 +181,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM for i := range vm.Spec.Networks { ns := &vm.Spec.Networks[i] - if ns.Type == virtv2.NetworksTypeMain { + if ns.Type == v1alpha2.NetworksTypeMain { continue } @@ -201,7 +201,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM switch { case err == nil: case errors.Is(err, ErrVirtualDiskSnapshotNotFound): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhasePending + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). @@ -238,7 +238,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM var toCreate []client.Object - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { for _, ov := range overrideValidators { ov.Override(vmRestore.Spec.NameReplacements) @@ -247,7 +247,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM case err == nil: toCreate = append(toCreate, ov.Object()) case errors.Is(err, restorer.ErrAlreadyInUse), errors.Is(err, restorer.ErrAlreadyExistsAndHasDiff): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseFailed + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreConflict). @@ -260,7 +260,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) @@ -272,14 +272,14 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } else { switch vmObj.Status.Phase { - case virtv2.MachinePending: + case v1alpha2.MachinePending: err := errors.New("a virtual machine cannot be restored from the pending phase with `Forced` mode; you can delete the virtual machine and restore it with `Safe` mode") setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, err - case virtv2.MachineStopped: + case v1alpha2.MachineStopped: default: - if runPolicy != virtv2.AlwaysOffPolicy { - err := h.updateVMRunPolicy(ctx, vmObj, virtv2.AlwaysOffPolicy) + if runPolicy != v1alpha2.AlwaysOffPolicy { + err := h.updateVMRunPolicy(ctx, vmObj, v1alpha2.AlwaysOffPolicy) if err != nil { if errors.Is(err, restorer.ErrUpdating) { setPhaseConditionToPending(cb, &vmRestore.Status.Phase, vmrestorecondition.VirtualMachineIsNotStopped, err.Error()) @@ -318,7 +318,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeSafe { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeSafe { for _, ov := range overrideValidators { ov.Override(vmRestore.Spec.NameReplacements) @@ -326,7 +326,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM switch { case err == nil: case errors.Is(err, restorer.ErrAlreadyExists), errors.Is(err, restorer.ErrAlreadyInUse), errors.Is(err, restorer.ErrAlreadyExistsAndHasDiff): - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseFailed + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreConflict). @@ -359,7 +359,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - if vmRestore.Spec.RestoreMode == virtv2.RestoreModeForced { + if vmRestore.Spec.RestoreMode == v1alpha2.RestoreModeForced { err = h.checkKVVMDiskStatus(ctx, vm.Name, vm.Namespace) if err != nil { if errors.Is(err, restorer.ErrRestoring) { @@ -369,7 +369,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM return reconcile.Result{}, err } - vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, types.NamespacedName{Name: overridedVMName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachine{}) if err != nil { setPhaseConditionToFailed(cb, &vmRestore.Status.Phase, err) return reconcile.Result{}, fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) @@ -386,7 +386,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM } } - vmRestore.Status.Phase = virtv2.VirtualMachineRestorePhaseInProgress + vmRestore.Status.Phase = v1alpha2.VirtualMachineRestorePhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). @@ -396,7 +396,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualM type OverrideValidator interface { Object() client.Object - Override(rules []virtv2.NameReplacement) + Override(rules []v1alpha2.NameReplacement) Validate(ctx context.Context) error ValidateWithForce(ctx context.Context) error ProcessWithForce(ctx context.Context) error @@ -404,12 +404,12 @@ type OverrideValidator interface { var ErrVirtualDiskSnapshotNotFound = errors.New("not found") -func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) ([]*virtv2.VirtualDisk, error) { - vds := make([]*virtv2.VirtualDisk, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) +func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) ([]*v1alpha2.VirtualDisk, error) { + vds := make([]*v1alpha2.VirtualDisk, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { vdSnapshotKey := types.NamespacedName{Namespace: vmSnapshot.Namespace, Name: vdSnapshotName} - vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, h.client, &virtv2.VirtualDiskSnapshot{}) + vdSnapshot, err := object.FetchObject(ctx, vdSnapshotKey, h.client, &v1alpha2.VirtualDiskSnapshot{}) if err != nil { return nil, fmt.Errorf("failed to fetch the virtual disk snapshot %q: %w", vdSnapshotKey.Name, err) } @@ -418,26 +418,26 @@ func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv return nil, fmt.Errorf("the virtual disk snapshot %q %w", vdSnapshotName, ErrVirtualDiskSnapshotNotFound) } - vd := virtv2.VirtualDisk{ + vd := v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ - Kind: virtv2.VirtualDiskKind, - APIVersion: virtv2.Version, + Kind: v1alpha2.VirtualDiskKind, + APIVersion: v1alpha2.Version, }, ObjectMeta: metav1.ObjectMeta{ Name: vdSnapshot.Spec.VirtualDiskName, Namespace: vdSnapshot.Namespace, }, - Spec: virtv2.VirtualDiskSpec{ - DataSource: &virtv2.VirtualDiskDataSource{ - Type: virtv2.DataSourceTypeObjectRef, - ObjectRef: &virtv2.VirtualDiskObjectRef{ - Kind: virtv2.VirtualDiskObjectRefKindVirtualDiskSnapshot, + Spec: v1alpha2.VirtualDiskSpec{ + DataSource: &v1alpha2.VirtualDiskDataSource{ + Type: v1alpha2.DataSourceTypeObjectRef, + ObjectRef: &v1alpha2.VirtualDiskObjectRef{ + Kind: v1alpha2.VirtualDiskObjectRefKindVirtualDiskSnapshot, Name: vdSnapshot.Name, }, }, }, - Status: virtv2.VirtualDiskStatus{ - AttachedToVirtualMachines: []virtv2.AttachedVirtualMachine{ + Status: v1alpha2.VirtualDiskStatus{ + AttachedToVirtualMachines: []v1alpha2.AttachedVirtualMachine{ {Name: vmSnapshot.Spec.VirtualMachineName, Mounted: true}, }, }, @@ -449,14 +449,14 @@ func (h LifeCycleHandler) getVirtualDisks(ctx context.Context, vmSnapshot *virtv return vds, nil } -func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) { - vmbdas := &virtv2.VirtualMachineBlockDeviceAttachmentList{} +func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { + vmbdas := &v1alpha2.VirtualMachineBlockDeviceAttachmentList{} err := h.client.List(ctx, vmbdas, &client.ListOptions{Namespace: vmNamespace}) if err != nil { return nil, fmt.Errorf("failed to list the `VirtualMachineBlockDeviceAttachment`: %w", err) } - vmbdasByVM := make([]*virtv2.VirtualMachineBlockDeviceAttachment, 0, len(vmbdas.Items)) + vmbdasByVM := make([]*v1alpha2.VirtualMachineBlockDeviceAttachment, 0, len(vmbdas.Items)) for _, vmbda := range vmbdas.Items { if vmbda.Spec.VirtualMachineName != vmName { continue @@ -470,7 +470,7 @@ func (h LifeCycleHandler) getCurrentVirtualMachineBlockDeviceAttachments(ctx con return vmbdasByVM, nil } -func (h LifeCycleHandler) deleteCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmbdas []*virtv2.VirtualMachineBlockDeviceAttachment) error { +func (h LifeCycleHandler) deleteCurrentVirtualMachineBlockDeviceAttachments(ctx context.Context, vmbdas []*v1alpha2.VirtualMachineBlockDeviceAttachment) error { for _, vmbda := range vmbdas { err := object.DeleteObject(ctx, h.client, client.Object(vmbda)) if err != nil { @@ -492,23 +492,23 @@ func (h LifeCycleHandler) createBatch(ctx context.Context, objs ...client.Object return nil } -func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *virtv2.VirtualMachineRestorePhase, err error) { - *phase = virtv2.VirtualMachineRestorePhaseFailed +func setPhaseConditionToFailed(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualMachineRestorePhase, err error) { + *phase = v1alpha2.VirtualMachineRestorePhaseFailed cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineRestoreFailed). Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func setPhaseConditionToPending(cb *conditions.ConditionBuilder, phase *virtv2.VirtualMachineRestorePhase, reason vmrestorecondition.VirtualMachineRestoreReadyReason, msg string) { - *phase = virtv2.VirtualMachineRestorePhasePending +func setPhaseConditionToPending(cb *conditions.ConditionBuilder, phase *v1alpha2.VirtualMachineRestorePhase, reason vmrestorecondition.VirtualMachineRestoreReadyReason, msg string) { + *phase = v1alpha2.VirtualMachineRestorePhasePending cb. Status(metav1.ConditionFalse). Reason(reason). Message(service.CapitalizeFirstLetter(msg) + ".") } -func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { +func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { return vmopbuilder.New( vmopbuilder.WithGenerateName("vmrestore-"), vmopbuilder.WithNamespace(namespace), @@ -518,8 +518,8 @@ func newVMRestoreVMOP(vmName, namespace, vmRestoreUID string, vmopType virtv2.VM ) } -func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmRestoreUID string, vmopType virtv2.VMOPType) (*virtv2.VirtualMachineOperation, error) { - vmops := &virtv2.VirtualMachineOperationList{} +func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmRestoreUID string, vmopType v1alpha2.VMOPType) (*v1alpha2.VirtualMachineOperation, error) { + vmops := &v1alpha2.VirtualMachineOperationList{} err := h.client.List(ctx, vmops, &client.ListOptions{Namespace: vmNamespace}) if err != nil { return nil, err @@ -537,13 +537,13 @@ func (h LifeCycleHandler) getVMRestoreVMOP(ctx context.Context, vmNamespace, vmR } func (h LifeCycleHandler) stopVirtualMachine(ctx context.Context, vmName, vmNamespace, vmRestoreUID string) error { - vmopStop, err := h.getVMRestoreVMOP(ctx, vmNamespace, vmRestoreUID, virtv2.VMOPTypeStop) + vmopStop, err := h.getVMRestoreVMOP(ctx, vmNamespace, vmRestoreUID, v1alpha2.VMOPTypeStop) if err != nil { return fmt.Errorf("failed to list the `VirtualMachineOperations`: %w", err) } if vmopStop == nil { - vmopStop := newVMRestoreVMOP(vmName, vmNamespace, vmRestoreUID, virtv2.VMOPTypeStop) + vmopStop := newVMRestoreVMOP(vmName, vmNamespace, vmRestoreUID, v1alpha2.VMOPTypeStop) err := h.client.Create(ctx, vmopStop) if err != nil { return fmt.Errorf("failed to stop the `VirtualMachine`: %w", err) @@ -553,17 +553,17 @@ func (h LifeCycleHandler) stopVirtualMachine(ctx context.Context, vmName, vmName conditionCompleted, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmopStop.Status.Conditions) switch vmopStop.Status.Phase { - case virtv2.VMOPPhaseFailed: + case v1alpha2.VMOPPhaseFailed: return fmt.Errorf("failed to stop the `VirtualMachine`: %s", conditionCompleted.Message) - case virtv2.VMOPPhaseCompleted: + case v1alpha2.VMOPPhaseCompleted: return nil default: return fmt.Errorf("the status of the `VirtualMachineOperation` is %w: %s", restorer.ErrIncomplete, conditionCompleted.Message) } } -func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) error { - vms := &virtv2.VirtualMachineList{} +func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) error { + vms := &v1alpha2.VirtualMachineList{} err := h.client.List(ctx, vms, &client.ListOptions{Namespace: vmRestore.Namespace}) if err != nil { return fmt.Errorf("failed to list the `VirtualMachines`: %w", err) @@ -577,18 +577,18 @@ func (h LifeCycleHandler) startVirtualMachine(ctx context.Context, vmRestore *vi } vmKey := types.NamespacedName{Name: vmName, Namespace: vmRestore.Namespace} - vmObj, err := object.FetchObject(ctx, vmKey, h.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, vmKey, h.client, &v1alpha2.VirtualMachine{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) } if vmObj != nil { - if vmObj.Spec.RunPolicy != virtv2.AlwaysOnUnlessStoppedManually { + if vmObj.Spec.RunPolicy != v1alpha2.AlwaysOnUnlessStoppedManually { return nil } - if vmObj.Status.Phase == virtv2.MachineStopped { - vmopStart := newVMRestoreVMOP(vmName, vmRestore.Namespace, string(vmRestore.UID), virtv2.VMOPTypeStart) + if vmObj.Status.Phase == v1alpha2.MachineStopped { + vmopStart := newVMRestoreVMOP(vmName, vmRestore.Namespace, string(vmRestore.UID), v1alpha2.VMOPTypeStart) err := h.client.Create(ctx, vmopStart) if err != nil { return fmt.Errorf("failed to start the `VirtualMachine`: %w", err) @@ -620,7 +620,7 @@ func (h LifeCycleHandler) checkKVVMDiskStatus(ctx context.Context, vmName, vmNam func (h LifeCycleHandler) getOverrridedVMName(overrideValidators []OverrideValidator) (string, error) { for _, ov := range overrideValidators { - if ov.Object().GetObjectKind().GroupVersionKind().Kind == virtv2.VirtualMachineKind { + if ov.Object().GetObjectKind().GroupVersionKind().Kind == v1alpha2.VirtualMachineKind { return ov.Object().GetName(), nil } } @@ -628,7 +628,7 @@ func (h LifeCycleHandler) getOverrridedVMName(overrideValidators []OverrideValid return "", fmt.Errorf("failed to get the `VirtualMachine` name") } -func (h LifeCycleHandler) updateVMRunPolicy(ctx context.Context, vmObj *virtv2.VirtualMachine, runPolicy virtv2.RunPolicy) error { +func (h LifeCycleHandler) updateVMRunPolicy(ctx context.Context, vmObj *v1alpha2.VirtualMachine, runPolicy v1alpha2.RunPolicy) error { vmObj.Spec.RunPolicy = runPolicy err := h.client.Update(ctx, vmObj) diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/mock.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/mock.go index 67bdbbf2bd..4165a3dd57 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/mock.go @@ -5,7 +5,7 @@ package internal import ( "context" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" "sync" ) @@ -26,16 +26,16 @@ var _ Restorer = &RestorerMock{} // RestoreProvisionerFunc: func(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) { // panic("mock out the RestoreProvisioner method") // }, -// RestoreVirtualMachineFunc: func(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) { +// RestoreVirtualMachineFunc: func(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) { // panic("mock out the RestoreVirtualMachine method") // }, -// RestoreVirtualMachineBlockDeviceAttachmentsFunc: func(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) { +// RestoreVirtualMachineBlockDeviceAttachmentsFunc: func(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { // panic("mock out the RestoreVirtualMachineBlockDeviceAttachments method") // }, -// RestoreVirtualMachineIPAddressFunc: func(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) { +// RestoreVirtualMachineIPAddressFunc: func(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) { // panic("mock out the RestoreVirtualMachineIPAddress method") // }, -// RestoreVirtualMachineMACAddressesFunc: func(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineMACAddress, error) { +// RestoreVirtualMachineMACAddressesFunc: func(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineMACAddress, error) { // panic("mock out the RestoreVirtualMachineMACAddresses method") // }, // } @@ -52,16 +52,16 @@ type RestorerMock struct { RestoreProvisionerFunc func(ctx context.Context, secret *corev1.Secret) (*corev1.Secret, error) // RestoreVirtualMachineFunc mocks the RestoreVirtualMachine method. - RestoreVirtualMachineFunc func(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) + RestoreVirtualMachineFunc func(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) // RestoreVirtualMachineBlockDeviceAttachmentsFunc mocks the RestoreVirtualMachineBlockDeviceAttachments method. - RestoreVirtualMachineBlockDeviceAttachmentsFunc func(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) + RestoreVirtualMachineBlockDeviceAttachmentsFunc func(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) // RestoreVirtualMachineIPAddressFunc mocks the RestoreVirtualMachineIPAddress method. - RestoreVirtualMachineIPAddressFunc func(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) + RestoreVirtualMachineIPAddressFunc func(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) // RestoreVirtualMachineMACAddressesFunc mocks the RestoreVirtualMachineMACAddresses method. - RestoreVirtualMachineMACAddressesFunc func(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineMACAddress, error) + RestoreVirtualMachineMACAddressesFunc func(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineMACAddress, error) // calls tracks calls to the methods. calls struct { @@ -189,7 +189,7 @@ func (mock *RestorerMock) RestoreProvisionerCalls() []struct { } // RestoreVirtualMachine calls RestoreVirtualMachineFunc. -func (mock *RestorerMock) RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachine, error) { +func (mock *RestorerMock) RestoreVirtualMachine(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachine, error) { if mock.RestoreVirtualMachineFunc == nil { panic("RestorerMock.RestoreVirtualMachineFunc: method is nil but Restorer.RestoreVirtualMachine was just called") } @@ -225,7 +225,7 @@ func (mock *RestorerMock) RestoreVirtualMachineCalls() []struct { } // RestoreVirtualMachineBlockDeviceAttachments calls RestoreVirtualMachineBlockDeviceAttachmentsFunc. -func (mock *RestorerMock) RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineBlockDeviceAttachment, error) { +func (mock *RestorerMock) RestoreVirtualMachineBlockDeviceAttachments(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineBlockDeviceAttachment, error) { if mock.RestoreVirtualMachineBlockDeviceAttachmentsFunc == nil { panic("RestorerMock.RestoreVirtualMachineBlockDeviceAttachmentsFunc: method is nil but Restorer.RestoreVirtualMachineBlockDeviceAttachments was just called") } @@ -261,7 +261,7 @@ func (mock *RestorerMock) RestoreVirtualMachineBlockDeviceAttachmentsCalls() []s } // RestoreVirtualMachineIPAddress calls RestoreVirtualMachineIPAddressFunc. -func (mock *RestorerMock) RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*virtv2.VirtualMachineIPAddress, error) { +func (mock *RestorerMock) RestoreVirtualMachineIPAddress(ctx context.Context, secret *corev1.Secret) (*v1alpha2.VirtualMachineIPAddress, error) { if mock.RestoreVirtualMachineIPAddressFunc == nil { panic("RestorerMock.RestoreVirtualMachineIPAddressFunc: method is nil but Restorer.RestoreVirtualMachineIPAddress was just called") } @@ -297,7 +297,7 @@ func (mock *RestorerMock) RestoreVirtualMachineIPAddressCalls() []struct { } // RestoreVirtualMachineMACAddresses calls RestoreVirtualMachineMACAddressesFunc. -func (mock *RestorerMock) RestoreVirtualMachineMACAddresses(ctx context.Context, secret *corev1.Secret) ([]*virtv2.VirtualMachineMACAddress, error) { +func (mock *RestorerMock) RestoreVirtualMachineMACAddresses(ctx context.Context, secret *corev1.Secret) ([]*v1alpha2.VirtualMachineMACAddress, error) { if mock.RestoreVirtualMachineMACAddressesFunc == nil { panic("RestorerMock.RestoreVirtualMachineMACAddressesFunc: method is nil but Restorer.RestoreVirtualMachineMACAddresses was just called") } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go index f1de740a86..f8abc3dda1 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/overrider.go @@ -16,9 +16,9 @@ limitations under the License. package restorer -import virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +import "github.com/deckhouse/virtualization/api/core/v1alpha2" -func overrideName(kind, name string, rules []virtv2.NameReplacement) string { +func overrideName(kind, name string, rules []v1alpha2.NameReplacement) string { if name == "" { return "" } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go index 27bbd2b098..11ee92fc4f 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/provisioner_restorer.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ProvisionerOverrideValidator struct { @@ -67,7 +67,7 @@ func NewProvisionerOverrideValidator(secretTmpl *corev1.Secret, client client.Cl } } -func (v *ProvisionerOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *ProvisionerOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.secret.Name = overrideName(v.secret.Kind, v.secret.Name, rules) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go index a6bf4b90b6..8f16c0ffb3 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vd_restorer.go @@ -26,16 +26,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskOverrideValidator struct { - vd *virtv2.VirtualDisk + vd *v1alpha2.VirtualDisk client client.Client vmRestoreUID string } -func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.Client, vmRestoreUID string) *VirtualDiskOverrideValidator { +func NewVirtualDiskOverrideValidator(vdTmpl *v1alpha2.VirtualDisk, client client.Client, vmRestoreUID string) *VirtualDiskOverrideValidator { if vdTmpl.Annotations != nil { vdTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -43,7 +43,7 @@ func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.C vdTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualDiskOverrideValidator{ - vd: &virtv2.VirtualDisk{ + vd: &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ Kind: vdTmpl.Kind, APIVersion: vdTmpl.APIVersion, @@ -62,13 +62,13 @@ func NewVirtualDiskOverrideValidator(vdTmpl *virtv2.VirtualDisk, client client.C } } -func (v *VirtualDiskOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualDiskOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vd.Name = overrideName(v.vd.Kind, v.vd.Name, rules) } func (v *VirtualDiskOverrideValidator) Validate(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - existed, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + existed, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } @@ -85,7 +85,7 @@ func (v *VirtualDiskOverrideValidator) Validate(ctx context.Context) error { func (v *VirtualDiskOverrideValidator) ValidateWithForce(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - existed, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + existed, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } @@ -105,7 +105,7 @@ func (v *VirtualDiskOverrideValidator) ValidateWithForce(ctx context.Context) er func (v *VirtualDiskOverrideValidator) ProcessWithForce(ctx context.Context) error { vdKey := types.NamespacedName{Namespace: v.vd.Namespace, Name: v.vd.Name} - vdObj, err := object.FetchObject(ctx, vdKey, v.client, &virtv2.VirtualDisk{}) + vdObj, err := object.FetchObject(ctx, vdKey, v.client, &v1alpha2.VirtualDisk{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualDisk`: %w", err) } @@ -129,7 +129,7 @@ func (v *VirtualDiskOverrideValidator) ProcessWithForce(ctx context.Context) err } func (v *VirtualDiskOverrideValidator) Object() client.Object { - return &virtv2.VirtualDisk{ + return &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ Kind: v.vd.Kind, APIVersion: v.vd.APIVersion, diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go index ec632c2d5b..ea7fb0c346 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vm_restorer.go @@ -28,18 +28,18 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ReasonPVCNotFound = "PVC not found" type VirtualMachineOverrideValidator struct { - vm *virtv2.VirtualMachine + vm *v1alpha2.VirtualMachine client client.Client vmRestoreUID string } -func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client client.Client, vmRestoreUID string) *VirtualMachineOverrideValidator { +func NewVirtualMachineOverrideValidator(vmTmpl *v1alpha2.VirtualMachine, client client.Client, vmRestoreUID string) *VirtualMachineOverrideValidator { if vmTmpl.Annotations != nil { vmTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -48,7 +48,7 @@ func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client cl } return &VirtualMachineOverrideValidator{ - vm: &virtv2.VirtualMachine{ + vm: &v1alpha2.VirtualMachine{ TypeMeta: metav1.TypeMeta{ Kind: vmTmpl.Kind, APIVersion: vmTmpl.APIVersion, @@ -66,15 +66,15 @@ func NewVirtualMachineOverrideValidator(vmTmpl *virtv2.VirtualMachine, client cl } } -func (v *VirtualMachineOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vm.Name = overrideName(v.vm.Kind, v.vm.Name, rules) - v.vm.Spec.VirtualMachineIPAddress = overrideName(virtv2.VirtualMachineIPAddressKind, v.vm.Spec.VirtualMachineIPAddress, rules) + v.vm.Spec.VirtualMachineIPAddress = overrideName(v1alpha2.VirtualMachineIPAddressKind, v.vm.Spec.VirtualMachineIPAddress, rules) if v.vm.Spec.Provisioning != nil { if v.vm.Spec.Provisioning.UserDataRef != nil { - if v.vm.Spec.Provisioning.UserDataRef.Kind == virtv2.UserDataRefKindSecret { + if v.vm.Spec.Provisioning.UserDataRef.Kind == v1alpha2.UserDataRefKindSecret { v.vm.Spec.Provisioning.UserDataRef.Name = overrideName( - string(virtv2.UserDataRefKindSecret), + string(v1alpha2.UserDataRefKindSecret), v.vm.Spec.Provisioning.UserDataRef.Name, rules, ) @@ -83,17 +83,17 @@ func (v *VirtualMachineOverrideValidator) Override(rules []virtv2.NameReplacemen } for i := range v.vm.Spec.BlockDeviceRefs { - if v.vm.Spec.BlockDeviceRefs[i].Kind != virtv2.DiskDevice { + if v.vm.Spec.BlockDeviceRefs[i].Kind != v1alpha2.DiskDevice { continue } - v.vm.Spec.BlockDeviceRefs[i].Name = overrideName(virtv2.VirtualDiskKind, v.vm.Spec.BlockDeviceRefs[i].Name, rules) + v.vm.Spec.BlockDeviceRefs[i].Name = overrideName(v1alpha2.VirtualDiskKind, v.vm.Spec.BlockDeviceRefs[i].Name, rules) } } func (v *VirtualMachineOverrideValidator) Validate(ctx context.Context) error { vmKey := types.NamespacedName{Namespace: v.vm.Namespace, Name: v.vm.Name} - existed, err := object.FetchObject(ctx, vmKey, v.client, &virtv2.VirtualMachine{}) + existed, err := object.FetchObject(ctx, vmKey, v.client, &v1alpha2.VirtualMachine{}) if err != nil { return err } @@ -114,7 +114,7 @@ func (v *VirtualMachineOverrideValidator) ValidateWithForce(ctx context.Context) func (v *VirtualMachineOverrideValidator) ProcessWithForce(ctx context.Context) error { vmKey := types.NamespacedName{Namespace: v.vm.Namespace, Name: v.vm.Name} - vmObj, err := object.FetchObject(ctx, vmKey, v.client, &virtv2.VirtualMachine{}) + vmObj, err := object.FetchObject(ctx, vmKey, v.client, &v1alpha2.VirtualMachine{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachine`: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go index 383d8b128f..a85d8cffd0 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmbda_restorer.go @@ -26,16 +26,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentsOverrideValidator struct { - vmbda *virtv2.VirtualMachineBlockDeviceAttachment + vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment client client.Client vmRestoreUID string } -func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2.VirtualMachineBlockDeviceAttachment, client client.Client, vmRestoreUID string) *VirtualMachineBlockDeviceAttachmentsOverrideValidator { +func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *v1alpha2.VirtualMachineBlockDeviceAttachment, client client.Client, vmRestoreUID string) *VirtualMachineBlockDeviceAttachmentsOverrideValidator { if vmbdaTmpl.Annotations != nil { vmbdaTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -43,7 +43,7 @@ func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2. vmbdaTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualMachineBlockDeviceAttachmentsOverrideValidator{ - vmbda: &virtv2.VirtualMachineBlockDeviceAttachment{ + vmbda: &v1alpha2.VirtualMachineBlockDeviceAttachment{ TypeMeta: metav1.TypeMeta{ Kind: vmbdaTmpl.Kind, APIVersion: vmbdaTmpl.APIVersion, @@ -61,23 +61,23 @@ func NewVirtualMachineBlockDeviceAttachmentsOverrideValidator(vmbdaTmpl *virtv2. } } -func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vmbda.Name = overrideName(v.vmbda.Kind, v.vmbda.Name, rules) - v.vmbda.Spec.VirtualMachineName = overrideName(virtv2.VirtualMachineKind, v.vmbda.Spec.VirtualMachineName, rules) + v.vmbda.Spec.VirtualMachineName = overrideName(v1alpha2.VirtualMachineKind, v.vmbda.Spec.VirtualMachineName, rules) switch v.vmbda.Spec.BlockDeviceRef.Kind { - case virtv2.VMBDAObjectRefKindVirtualDisk: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.VirtualDiskKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) - case virtv2.VMBDAObjectRefKindClusterVirtualImage: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.ClusterVirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) - case virtv2.VMBDAObjectRefKindVirtualImage: - v.vmbda.Spec.BlockDeviceRef.Name = overrideName(virtv2.VirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindVirtualDisk: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.VirtualDiskKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.ClusterVirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) + case v1alpha2.VMBDAObjectRefKindVirtualImage: + v.vmbda.Spec.BlockDeviceRef.Name = overrideName(v1alpha2.VirtualImageKind, v.vmbda.Spec.BlockDeviceRef.Name, rules) } } func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) Validate(ctx context.Context) error { vmbdaKey := types.NamespacedName{Namespace: v.vmbda.Namespace, Name: v.vmbda.Name} - existed, err := object.FetchObject(ctx, vmbdaKey, v.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + existed, err := object.FetchObject(ctx, vmbdaKey, v.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return err } @@ -98,7 +98,7 @@ func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) ValidateWithForc func (v *VirtualMachineBlockDeviceAttachmentsOverrideValidator) ProcessWithForce(ctx context.Context) error { vmbdaKey := types.NamespacedName{Namespace: v.vmbda.Namespace, Name: v.vmbda.Name} - vmbdaObj, err := object.FetchObject(ctx, vmbdaKey, v.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + vmbdaObj, err := object.FetchObject(ctx, vmbdaKey, v.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return fmt.Errorf("failed to fetch the `VirtualMachineBlockDeviceAttachment`: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go index 5265a52238..958f44b068 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmip_restorer.go @@ -28,16 +28,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineIPAddressOverrideValidator struct { - vmip *virtv2.VirtualMachineIPAddress + vmip *v1alpha2.VirtualMachineIPAddress client client.Client vmRestoreUID string } -func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachineIPAddress, client client.Client, vmRestoreUID string) *VirtualMachineIPAddressOverrideValidator { +func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *v1alpha2.VirtualMachineIPAddress, client client.Client, vmRestoreUID string) *VirtualMachineIPAddressOverrideValidator { if vmipTmpl.Annotations != nil { vmipTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -45,7 +45,7 @@ func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachine vmipTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualMachineIPAddressOverrideValidator{ - vmip: &virtv2.VirtualMachineIPAddress{ + vmip: &v1alpha2.VirtualMachineIPAddress{ TypeMeta: metav1.TypeMeta{ Kind: vmipTmpl.Kind, APIVersion: vmipTmpl.APIVersion, @@ -64,13 +64,13 @@ func NewVirtualMachineIPAddressOverrideValidator(vmipTmpl *virtv2.VirtualMachine } } -func (v *VirtualMachineIPAddressOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineIPAddressOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vmip.Name = overrideName(v.vmip.Kind, v.vmip.Name, rules) } func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) error { vmipKey := types.NamespacedName{Namespace: v.vmip.Namespace, Name: v.vmip.Name} - existed, err := object.FetchObject(ctx, vmipKey, v.client, &virtv2.VirtualMachineIPAddress{}) + existed, err := object.FetchObject(ctx, vmipKey, v.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -80,7 +80,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) return nil } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err = v.client.List(ctx, &vmips, &client.ListOptions{ Namespace: v.vmip.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMIPByAddress, v.vmip.Spec.StaticIP), @@ -103,7 +103,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) return nil } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine ip address %q is %w and cannot be used for the restored virtual machine", vmipKey.Name, ErrAlreadyInUse) } @@ -112,7 +112,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) Validate(ctx context.Context) func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context.Context) error { vmipKey := types.NamespacedName{Namespace: v.vmip.Namespace, Name: v.vmip.Name} - existed, err := object.FetchObject(ctx, vmipKey, v.client, &virtv2.VirtualMachineIPAddress{}) + existed, err := object.FetchObject(ctx, vmipKey, v.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -124,7 +124,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context return nil } - var vmips virtv2.VirtualMachineIPAddressList + var vmips v1alpha2.VirtualMachineIPAddressList err = v.client.List(ctx, &vmips, &client.ListOptions{ Namespace: v.vmip.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMIPByAddress, v.vmip.Spec.StaticIP), @@ -143,11 +143,11 @@ func (v *VirtualMachineIPAddressOverrideValidator) ValidateWithForce(ctx context return nil } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached && existed.Status.VirtualMachine == vmName { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached && existed.Status.VirtualMachine == vmName { return ErrAlreadyExists } - if existed.Status.Phase == virtv2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine ip address %q is %w and cannot be used for the restored virtual machine", vmipKey.Name, ErrAlreadyInUse) } @@ -159,7 +159,7 @@ func (v *VirtualMachineIPAddressOverrideValidator) ProcessWithForce(ctx context. } func (v *VirtualMachineIPAddressOverrideValidator) Object() client.Object { - return &virtv2.VirtualMachineIPAddress{ + return &v1alpha2.VirtualMachineIPAddress{ TypeMeta: metav1.TypeMeta{ Kind: v.vmip.Kind, APIVersion: v.vmip.APIVersion, diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmmac_restorer.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmmac_restorer.go index 9e1c56fb28..78f38ca38e 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmmac_restorer.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/restorer/vmmac_restorer.go @@ -28,16 +28,16 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineMACAddressOverrideValidator struct { - vmmac *virtv2.VirtualMachineMACAddress + vmmac *v1alpha2.VirtualMachineMACAddress client client.Client vmRestoreUID string } -func NewVirtualMachineMACAddressOverrideValidator(vmmacTmpl *virtv2.VirtualMachineMACAddress, client client.Client, vmRestoreUID string) *VirtualMachineMACAddressOverrideValidator { +func NewVirtualMachineMACAddressOverrideValidator(vmmacTmpl *v1alpha2.VirtualMachineMACAddress, client client.Client, vmRestoreUID string) *VirtualMachineMACAddressOverrideValidator { if vmmacTmpl.Annotations != nil { vmmacTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } else { @@ -45,7 +45,7 @@ func NewVirtualMachineMACAddressOverrideValidator(vmmacTmpl *virtv2.VirtualMachi vmmacTmpl.Annotations[annotations.AnnVMRestore] = vmRestoreUID } return &VirtualMachineMACAddressOverrideValidator{ - vmmac: &virtv2.VirtualMachineMACAddress{ + vmmac: &v1alpha2.VirtualMachineMACAddress{ TypeMeta: metav1.TypeMeta{ Kind: vmmacTmpl.Kind, APIVersion: vmmacTmpl.APIVersion, @@ -64,13 +64,13 @@ func NewVirtualMachineMACAddressOverrideValidator(vmmacTmpl *virtv2.VirtualMachi } } -func (v *VirtualMachineMACAddressOverrideValidator) Override(rules []virtv2.NameReplacement) { +func (v *VirtualMachineMACAddressOverrideValidator) Override(rules []v1alpha2.NameReplacement) { v.vmmac.Name = overrideName(v.vmmac.Kind, v.vmmac.Name, rules) } func (v *VirtualMachineMACAddressOverrideValidator) Validate(ctx context.Context) error { vmmacKey := types.NamespacedName{Namespace: v.vmmac.Namespace, Name: v.vmmac.Name} - existed, err := object.FetchObject(ctx, vmmacKey, v.client, &virtv2.VirtualMachineMACAddress{}) + existed, err := object.FetchObject(ctx, vmmacKey, v.client, &v1alpha2.VirtualMachineMACAddress{}) if err != nil { return err } @@ -80,7 +80,7 @@ func (v *VirtualMachineMACAddressOverrideValidator) Validate(ctx context.Context return nil } - var vmmacs virtv2.VirtualMachineMACAddressList + var vmmacs v1alpha2.VirtualMachineMACAddressList err = v.client.List(ctx, &vmmacs, &client.ListOptions{ Namespace: v.vmmac.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMMACByAddress, v.vmmac.Spec.Address), @@ -99,7 +99,7 @@ func (v *VirtualMachineMACAddressOverrideValidator) Validate(ctx context.Context return nil } - if existed.Status.Phase == virtv2.VirtualMachineMACAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineMACAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine mac address %q is %w and cannot be used for the restored virtual machine", vmmacKey.Name, ErrAlreadyInUse) } @@ -108,7 +108,7 @@ func (v *VirtualMachineMACAddressOverrideValidator) Validate(ctx context.Context func (v *VirtualMachineMACAddressOverrideValidator) ValidateWithForce(ctx context.Context) error { vmmacKey := types.NamespacedName{Namespace: v.vmmac.Namespace, Name: v.vmmac.Name} - existed, err := object.FetchObject(ctx, vmmacKey, v.client, &virtv2.VirtualMachineMACAddress{}) + existed, err := object.FetchObject(ctx, vmmacKey, v.client, &v1alpha2.VirtualMachineMACAddress{}) if err != nil { return err } @@ -120,7 +120,7 @@ func (v *VirtualMachineMACAddressOverrideValidator) ValidateWithForce(ctx contex return nil } - var vmmacs virtv2.VirtualMachineMACAddressList + var vmmacs v1alpha2.VirtualMachineMACAddressList err = v.client.List(ctx, &vmmacs, &client.ListOptions{ Namespace: v.vmmac.Namespace, FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMMACByAddress, v.vmmac.Spec.Address), @@ -139,11 +139,11 @@ func (v *VirtualMachineMACAddressOverrideValidator) ValidateWithForce(ctx contex return nil } - if existed.Status.Phase == virtv2.VirtualMachineMACAddressPhaseAttached && existed.Status.VirtualMachine == vmName { + if existed.Status.Phase == v1alpha2.VirtualMachineMACAddressPhaseAttached && existed.Status.VirtualMachine == vmName { return ErrAlreadyExists } - if existed.Status.Phase == virtv2.VirtualMachineMACAddressPhaseAttached || existed.Status.VirtualMachine != "" { + if existed.Status.Phase == v1alpha2.VirtualMachineMACAddressPhaseAttached || existed.Status.VirtualMachine != "" { return fmt.Errorf("the virtual machine mac address %q is %w and cannot be used for the restored virtual machine", vmmacKey.Name, ErrAlreadyInUse) } @@ -155,7 +155,7 @@ func (v *VirtualMachineMACAddressOverrideValidator) ProcessWithForce(_ context.C } func (v *VirtualMachineMACAddressOverrideValidator) Object() client.Object { - return &virtv2.VirtualMachineMACAddress{ + return &v1alpha2.VirtualMachineMACAddress{ TypeMeta: metav1.TypeMeta{ Kind: v.vmmac.Kind, APIVersion: v.vmmac.APIVersion, diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go index 80aea3ac35..06b71f6b07 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/vm_snapshot_ready_to_use.go @@ -27,7 +27,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" ) @@ -42,7 +42,7 @@ func NewVirtualMachineSnapshotReadyToUseHandler(client client.Client) *VirtualMa } } -func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) { +func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmrestorecondition.VirtualMachineSnapshotReadyToUseType) defer func() { conditions.SetCondition(cb.Generation(vmRestore.Generation), &vmRestore.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmR } vmSnapshotKey := types.NamespacedName{Name: vmRestore.Spec.VirtualMachineSnapshotName, Namespace: vmRestore.Namespace} - vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &virtv2.VirtualMachineSnapshot{}) + vmSnapshot, err := object.FetchObject(ctx, vmSnapshotKey, h.client, &v1alpha2.VirtualMachineSnapshot{}) if err != nil { return reconcile.Result{}, err } @@ -78,7 +78,7 @@ func (h VirtualMachineSnapshotReadyToUseHandler) Handle(ctx context.Context, vmR } vmSnapshotReady, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) - if vmSnapshotReady.Status != metav1.ConditionTrue || vmSnapshot.Status.Phase != virtv2.VirtualMachineSnapshotPhaseReady { + if vmSnapshotReady.Status != metav1.ConditionTrue || vmSnapshot.Status.Phase != v1alpha2.VirtualMachineSnapshotPhaseReady { cb. Status(metav1.ConditionFalse). Reason(vmrestorecondition.VirtualMachineSnapshotNotReady). diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go index b85f60b1cf..96d505290c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/kvvm_watcher.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // This watcher is required for monitoring the statuses of InternalVirtualMachine disks, which must update their PVC during the restoration process. @@ -57,7 +57,7 @@ func (w InternalVirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller } func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm *virtv1.VirtualMachine) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: kvvm.GetNamespace(), }) @@ -68,7 +68,7 @@ func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: kvvm.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -88,7 +88,7 @@ func (w InternalVirtualMachineWatcher) enqueueRequests(ctx context.Context, kvvm return } -func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -97,7 +97,7 @@ func (w InternalVirtualMachineWatcher) isKvvmNameMatch(kvvmName, restoredName st isNameMatch = kvvmName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineKind { + if nr.From.Kind != v1alpha2.VirtualMachineKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go index fa35ac0416..c9673b4e2c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vd_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskWatcher struct { @@ -44,7 +44,7 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -53,8 +53,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vd.GetNamespace(), }) @@ -65,14 +65,14 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vd.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) return } for _, vdsnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { - var vdSnapshot virtv2.VirtualDiskSnapshot + var vdSnapshot v1alpha2.VirtualDiskSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vdsnapshotName, Namespace: vd.GetNamespace()}, &vdSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vdSnapshot: %s", err)) @@ -93,7 +93,7 @@ func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.Virt return } -func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -102,7 +102,7 @@ func (w VirtualDiskWatcher) isVdNameMatch(vdName, restoredName string, nameRepla isNameMatch = vdName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualDiskKind { + if nr.From.Kind != v1alpha2.VirtualDiskKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go index b424ae3358..d67c7cd048 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vm_watcher.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineWatcher struct { @@ -44,7 +44,7 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -53,8 +53,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vm.GetNamespace(), }) @@ -65,7 +65,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vm.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -85,7 +85,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -94,7 +94,7 @@ func (w VirtualMachineWatcher) isVMNameMatch(vmName, restoredName string, nameRe isNameMatch = vmName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineKind { + if nr.From.Kind != v1alpha2.VirtualMachineKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go index 31c3a975c2..09277c86a9 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmbda_watcher.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common/object" vmrestore "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineBlockDeviceAttachmentWatcher struct { @@ -49,7 +49,7 @@ func NewVirtualMachineBlockDeviceAttachmentWatcher(client client.Client, restore func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineBlockDeviceAttachment{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineBlockDeviceAttachment{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), ), ); err != nil { @@ -58,8 +58,8 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) Watch(mgr manager.Manager, c return nil } -func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context.Context, vmbda *virtv2.VirtualMachineBlockDeviceAttachment) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context.Context, vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vmbda.GetNamespace(), }) @@ -70,7 +70,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context. for _, vmRestore := range vmRestores.Items { vmSnapshotName := vmRestore.Spec.VirtualMachineSnapshotName - var vmSnapshot virtv2.VirtualMachineSnapshot + var vmSnapshot v1alpha2.VirtualMachineSnapshot err := w.client.Get(ctx, types.NamespacedName{Name: vmSnapshotName, Namespace: vmbda.GetNamespace()}, &vmSnapshot) if err != nil { log.Error(fmt.Sprintf("failed to get vmSnapshot: %s", err)) @@ -110,7 +110,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) enqueueRequests(ctx context. return } -func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, restoredName string, nameReplacements []virtv2.NameReplacement) bool { +func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, restoredName string, nameReplacements []v1alpha2.NameReplacement) bool { var ( isNameMatch bool isNameReplacementMatch bool @@ -119,7 +119,7 @@ func (w VirtualMachineBlockDeviceAttachmentWatcher) isVmbdaNameMatch(vmbdaName, isNameMatch = vmbdaName == restoredName for _, nr := range nameReplacements { - if nr.From.Kind != virtv2.VirtualMachineBlockDeviceAttachmentKind { + if nr.From.Kind != v1alpha2.VirtualMachineBlockDeviceAttachmentKind { continue } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go index 5b896a0e4d..ce112c96e3 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmrestore_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineRestoreWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualMachineRestoreWatcher(client client.Client) *VirtualMachineRestor func (w VirtualMachineRestoreWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineRestore{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineRestore]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineRestore]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineRestore]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineRestore{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineRestore]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineRestore]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineRestore]) bool { oldPhase := e.ObjectOld.Status.Phase newPhase := e.ObjectNew.Status.Phase diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go index 5af989c202..f0eb723525 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/internal/watcher/vmsnapshot_watcher.go @@ -33,7 +33,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct { @@ -48,10 +48,10 @@ func NewVirtualMachineSnapshotWatcher(client client.Client) *VirtualMachineSnaps func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -62,8 +62,8 @@ func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller return nil } -func (w VirtualMachineSnapshotWatcher) enqueueRequests(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (requests []reconcile.Request) { - var vmRestores virtv2.VirtualMachineRestoreList +func (w VirtualMachineSnapshotWatcher) enqueueRequests(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (requests []reconcile.Request) { + var vmRestores v1alpha2.VirtualMachineRestoreList err := w.client.List(ctx, &vmRestores, &client.ListOptions{ Namespace: vmSnapshot.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMRestoreByVMSnapshot, vmSnapshot.GetName()), diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go index 9b9d5f028d..45922cfaaa 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_controller.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmrestore-controller" @@ -63,7 +63,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineRestore{}). + For(&v1alpha2.VirtualMachineRestore{}). WithValidator(NewValidator()). Complete(); err != nil { return err diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go index 6a676c73ac..77840ed10c 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_reconciler.go @@ -29,11 +29,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/service/restorer" "github.com/deckhouse/virtualization-controller/pkg/controller/vmrestore/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmRestore *virtv2.VirtualMachineRestore) (reconcile.Result, error) + Handle(ctx context.Context, vmRestore *v1alpha2.VirtualMachineRestore) (reconcile.Result, error) } type Watcher interface { @@ -96,10 +96,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineRestore { - return &virtv2.VirtualMachineRestore{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineRestore { + return &v1alpha2.VirtualMachineRestore{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineRestore) virtv2.VirtualMachineRestoreStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineRestore) v1alpha2.VirtualMachineRestoreStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go index cab18c7665..2a0bc66156 100644 --- a/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmrestore/vmrestore_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct{} @@ -40,12 +40,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, _ runtime.Object) (admis } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMRestore, ok := oldObj.(*virtv2.VirtualMachineRestore) + oldVMRestore, ok := oldObj.(*v1alpha2.VirtualMachineRestore) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineRestore but got a %T", newObj) } - newVMRestore, ok := newObj.(*virtv2.VirtualMachineRestore) + newVMRestore, ok := newObj.(*v1alpha2.VirtualMachineRestore) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineRestore but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go index 2991ec22c2..6690332a57 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/interfaces.go @@ -21,25 +21,25 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) //go:generate go tool moq -rm -out mock.go . Storer Snapshotter type Storer interface { - Store(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) + Store(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) } type Snapshotter interface { GetSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) - GetVirtualDisk(ctx context.Context, name, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) + GetVirtualDisk(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) GetPersistentVolumeClaim(ctx context.Context, name, namespace string) (*corev1.PersistentVolumeClaim, error) - GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*virtv2.VirtualDiskSnapshot, error) - CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) + GetVirtualDiskSnapshot(ctx context.Context, name, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) + CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) Freeze(ctx context.Context, name, namespace string) error Unfreeze(ctx context.Context, name, namespace string) error - IsFrozen(vm *virtv2.VirtualMachine) bool - CanFreeze(vm *virtv2.VirtualMachine) bool - CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + IsFrozen(vm *v1alpha2.VirtualMachine) bool + CanFreeze(vm *v1alpha2.VirtualMachine) bool + CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go index 0b8cd2c445..42219dfdbd 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/service" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdscondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" @@ -57,7 +57,7 @@ func NewLifeCycleHandler(recorder eventrecord.EventRecorderLogger, snapshotter S } } -func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) { +func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) { log := logger.FromContext(ctx).With(logger.SlogHandler("lifecycle")) cb := conditions.NewConditionBuilder(vmscondition.VirtualMachineSnapshotReadyType) @@ -74,7 +74,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual } if vmSnapshot.DeletionTimestamp != nil { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseTerminating + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseTerminating cb. Status(metav1.ConditionUnknown). Reason(conditions.ReasonUnknown). @@ -91,15 +91,15 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch vmSnapshot.Status.Phase { case "": - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending - case virtv2.VirtualMachineSnapshotPhaseFailed: + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending + case v1alpha2.VirtualMachineSnapshotPhaseFailed: readyCondition, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) cb. Status(readyCondition.Status). Reason(conditions.CommonReason(readyCondition.Reason)). Message(readyCondition.Message) return reconcile.Result{}, nil - case virtv2.VirtualMachineSnapshotPhaseReady: + case v1alpha2.VirtualMachineSnapshotPhaseReady: // Ensure vd snapshots aren't lost. var lostVDSnapshots []string for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { @@ -112,20 +112,20 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case vdSnapshot == nil: lostVDSnapshots = append(lostVDSnapshots, vdSnapshotName) - case vdSnapshot.Status.Phase != virtv2.VirtualDiskSnapshotPhaseReady: + case vdSnapshot.Status.Phase != v1alpha2.VirtualDiskSnapshotPhaseReady: log.Error("expected virtual disk snapshot to be ready, please report a bug", "vdSnapshotPhase", vdSnapshot.Status.Phase) } } if len(lostVDSnapshots) > 0 { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed cb.Status(metav1.ConditionFalse).Reason(vmscondition.VirtualDiskSnapshotLost) if len(lostVDSnapshots) == 1 { msg := fmt.Sprintf("The underlying virtual disk snapshot (%s) is lost.", lostVDSnapshots[0]) h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) cb.Message(msg) @@ -134,7 +134,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) cb.Message(msg) @@ -142,7 +142,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseReady + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseReady cb. Status(metav1.ConditionTrue). Reason(vmscondition.VirtualMachineSnapshotReady). @@ -152,12 +152,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual virtualMachineReadyCondition, _ := conditions.GetCondition(vmscondition.VirtualMachineReadyType, vmSnapshot.Status.Conditions) if vm == nil || virtualMachineReadyCondition.Status != metav1.ConditionTrue { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := fmt.Sprintf("Waiting for the virtual machine %q to be ready for snapshotting.", vmSnapshot.Spec.VirtualMachineName) h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -172,12 +172,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case err == nil: case errors.Is(err, ErrBlockDevicesNotReady), errors.Is(err, ErrVirtualDiskNotReady), errors.Is(err, ErrVirtualDiskResizing): - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed msg := service.CapitalizeFirstLetter(err.Error() + ".") h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) cb. @@ -194,11 +194,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual isAwaitingConsistency := needToFreeze && !h.snapshotter.CanFreeze(vm) && vmSnapshot.Spec.RequiredConsistency if isAwaitingConsistency { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhasePending + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhasePending msg := fmt.Sprintf( "The snapshotting of virtual machine %q might result in an inconsistent snapshot: "+ "waiting for the virtual machine to be %s", - vm.Name, virtv2.MachineStopped, + vm.Name, v1alpha2.MachineStopped, ) agentReadyCondition, _ := conditions.GetCondition(vmcondition.TypeAgentReady, vm.Status.Conditions) @@ -214,7 +214,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingPending, + v1alpha2.ReasonVMSnapshottingPending, msg, ) cb. @@ -224,12 +224,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } - if vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhasePending { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + if vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhasePending { + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingStarted, + v1alpha2.ReasonVMSnapshottingStarted, "Virtual machine snapshotting process is started.", ) cb. @@ -251,7 +251,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual } if hasFrozen { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress cb. Status(metav1.ConditionFalse). Reason(vmscondition.FileSystemFreezing). @@ -274,12 +274,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual switch { case err == nil: case errors.Is(err, ErrCannotTakeSnapshot): - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed msg := service.CapitalizeFirstLetter(err.Error()) h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, msg, ) if !strings.HasSuffix(msg, ".") { @@ -306,11 +306,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual if readyCount != len(vdSnapshots) { log.Debug("Waiting for the virtual disk snapshots to be taken for the block devices of the virtual machine") - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingInProgress, + v1alpha2.ReasonVMSnapshottingInProgress, msg, ) cb. @@ -322,7 +322,7 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingInProgress, + v1alpha2.ReasonVMSnapshottingInProgress, msg, ) } @@ -349,11 +349,11 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual // 9. Move to Ready phase. log.Debug("The virtual disk snapshots are taken: the virtual machine snapshot is Ready now", "unfrozen", unfrozen) - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseReady + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseReady h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingCompleted, + v1alpha2.ReasonVMSnapshottingCompleted, "Virtual machine snapshotting process is completed.", ) cb. @@ -364,12 +364,12 @@ func (h LifeCycleHandler) Handle(ctx context.Context, vmSnapshot *virtv2.Virtual return reconcile.Result{}, nil } -func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuilder, vmSnapshot *virtv2.VirtualMachineSnapshot, err error) { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseFailed +func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuilder, vmSnapshot *v1alpha2.VirtualMachineSnapshot, err error) { + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseFailed h.recorder.Event( vmSnapshot, corev1.EventTypeWarning, - virtv2.ReasonVMSnapshottingFailed, + v1alpha2.ReasonVMSnapshottingFailed, err.Error()+".", ) cb. @@ -378,11 +378,11 @@ func (h LifeCycleHandler) setPhaseConditionToFailed(cb *conditions.ConditionBuil Message(service.CapitalizeFirstLetter(err.Error()) + ".") } -func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) { +func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) { vmSnapshot.Status.VirtualDiskSnapshotNames = nil for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -395,8 +395,8 @@ func (h LifeCycleHandler) fillStatusVirtualDiskSnapshotNames(vmSnapshot *virtv2. var ErrCannotTakeSnapshot = errors.New("cannot take snapshot") -func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) ([]*virtv2.VirtualDiskSnapshot, error) { - vdSnapshots := make([]*virtv2.VirtualDiskSnapshot, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) +func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) ([]*v1alpha2.VirtualDiskSnapshot, error) { + vdSnapshots := make([]*v1alpha2.VirtualDiskSnapshot, 0, len(vmSnapshot.Status.VirtualDiskSnapshotNames)) for _, vdSnapshotName := range vmSnapshot.Status.VirtualDiskSnapshotNames { vdSnapshot, err := h.snapshotter.GetVirtualDiskSnapshot(ctx, vdSnapshotName, vmSnapshot.Namespace) @@ -410,7 +410,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return nil, fmt.Errorf("failed to get VirtualDisk's name from VirtualDiskSnapshot's name %q", vdSnapshotName) } - var vd *virtv2.VirtualDisk + var vd *v1alpha2.VirtualDisk vd, err = h.snapshotter.GetVirtualDisk(ctx, vdName, vmSnapshot.Namespace) if err != nil { return nil, err @@ -434,10 +434,10 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return nil, fmt.Errorf("the persistent volume claim %q doesn't have the storage class name", pvc.Name) } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ TypeMeta: metav1.TypeMeta{ - Kind: virtv2.VirtualDiskSnapshotKind, - APIVersion: virtv2.Version, + Kind: v1alpha2.VirtualDiskSnapshotKind, + APIVersion: v1alpha2.Version, }, ObjectMeta: metav1.ObjectMeta{ Name: vdSnapshotName, @@ -446,7 +446,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap service.MakeOwnerReference(vmSnapshot), }, }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ VirtualDiskName: vdName, RequiredConsistency: vmSnapshot.Spec.RequiredConsistency, }, @@ -459,7 +459,7 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap } vdSnapshotReady, _ := conditions.GetCondition(vdscondition.VirtualDiskSnapshotReadyType, vdSnapshot.Status.Conditions) - if vdSnapshotReady.Reason == vdscondition.VirtualDiskSnapshotFailed.String() || vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseFailed { + if vdSnapshotReady.Reason == vdscondition.VirtualDiskSnapshotFailed.String() || vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseFailed { return nil, fmt.Errorf("the virtual disk snapshot %q is failed: %w. %s", vdSnapshot.Name, ErrCannotTakeSnapshot, vdSnapshotReady.Message) } @@ -469,10 +469,10 @@ func (h LifeCycleHandler) ensureVirtualDiskSnapshots(ctx context.Context, vmSnap return vdSnapshots, nil } -func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*virtv2.VirtualDiskSnapshot) int { +func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*v1alpha2.VirtualDiskSnapshot) int { var readyCount int for _, vdSnapshot := range vdSnapshots { - if vdSnapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady { + if vdSnapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady { readyCount++ } } @@ -480,7 +480,7 @@ func (h LifeCycleHandler) countReadyVirtualDiskSnapshots(vdSnapshots []*virtv2.V return readyCount } -func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*virtv2.VirtualDiskSnapshot) bool { +func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*v1alpha2.VirtualDiskSnapshot) bool { for _, vdSnapshot := range vdSnapshots { if vdSnapshot.Status.Consistent == nil || !*vdSnapshot.Status.Consistent { return false @@ -490,12 +490,12 @@ func (h LifeCycleHandler) areVirtualDiskSnapshotsConsistent(vdSnapshots []*virtv return true } -func (h LifeCycleHandler) needToFreeze(vm *virtv2.VirtualMachine, requiredConsistency bool) bool { +func (h LifeCycleHandler) needToFreeze(vm *v1alpha2.VirtualMachine, requiredConsistency bool) bool { if !requiredConsistency { return false } - if vm.Status.Phase == virtv2.MachineStopped { + if vm.Status.Phase == v1alpha2.MachineStopped { return false } @@ -506,8 +506,8 @@ func (h LifeCycleHandler) needToFreeze(vm *virtv2.VirtualMachine, requiredConsis return true } -func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (bool, error) { - if vm.Status.Phase != virtv2.MachineRunning { +func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (bool, error) { + if vm.Status.Phase != v1alpha2.MachineRunning { return false, errors.New("cannot freeze not Running virtual machine") } @@ -519,15 +519,15 @@ func (h LifeCycleHandler) freezeVirtualMachine(ctx context.Context, vm *virtv2.V h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingFrozen, + v1alpha2.ReasonVMSnapshottingFrozen, fmt.Sprintf("The file system of the virtual machine %q is frozen.", vm.Name), ) return true, nil } -func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) (bool, error) { - if vm == nil || vm.Status.Phase != virtv2.MachineRunning || !h.snapshotter.IsFrozen(vm) { +func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) (bool, error) { + if vm == nil || vm.Status.Phase != v1alpha2.MachineRunning || !h.snapshotter.IsFrozen(vm) { return false, nil } @@ -548,7 +548,7 @@ func (h LifeCycleHandler) unfreezeVirtualMachineIfCan(ctx context.Context, vmSna h.recorder.Event( vmSnapshot, corev1.EventTypeNormal, - virtv2.ReasonVMSnapshottingThawed, + v1alpha2.ReasonVMSnapshottingThawed, fmt.Sprintf("The file system of the virtual machine %q is thawed.", vm.Name), ) @@ -561,14 +561,14 @@ var ( ErrVirtualDiskResizing = errors.New("virtual disk is in the process of resizing") ) -func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm *virtv2.VirtualMachine) error { +func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm *v1alpha2.VirtualMachine) error { bdReady, _ := conditions.GetCondition(vmcondition.TypeBlockDevicesReady, vm.Status.Conditions) if bdReady.Status != metav1.ConditionTrue { return fmt.Errorf("%w: waiting for the block devices of the virtual machine %q to be ready", ErrBlockDevicesNotReady, vm.Name) } for _, bdr := range vm.Status.BlockDeviceRefs { - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } @@ -577,8 +577,8 @@ func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm * return err } - if vd.Status.Phase != virtv2.DiskReady { - return fmt.Errorf("%w: waiting for the virtual disk %q to be %s", ErrVirtualDiskNotReady, vd.Name, virtv2.DiskReady) + if vd.Status.Phase != v1alpha2.DiskReady { + return fmt.Errorf("%w: waiting for the virtual disk %q to be %s", ErrVirtualDiskNotReady, vd.Name, v1alpha2.DiskReady) } ready, _ := conditions.GetCondition(vdcondition.ReadyType, vd.Status.Conditions) @@ -595,7 +595,7 @@ func (h LifeCycleHandler) ensureBlockDeviceConsistency(ctx context.Context, vm * return nil } -func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) error { +func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) error { var secret *corev1.Secret var err error @@ -620,28 +620,28 @@ func (h LifeCycleHandler) ensureSecret(ctx context.Context, vm *virtv2.VirtualMa return nil } -func getVDName(vdSnapshotName string, vmSnapshot *virtv2.VirtualMachineSnapshot) (string, bool) { +func getVDName(vdSnapshotName string, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (string, bool) { return strings.CutSuffix(vdSnapshotName, "-"+string(vmSnapshot.UID)) } -func getVDSnapshotName(vdName string, vmSnapshot *virtv2.VirtualMachineSnapshot) string { +func getVDSnapshotName(vdName string, vmSnapshot *v1alpha2.VirtualMachineSnapshot) string { return fmt.Sprintf("%s-%s", vdName, vmSnapshot.UID) } -func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot, vm *virtv2.VirtualMachine) error { - vmSnapshot.Status.Resources = []virtv2.ResourceRef{} +func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot, vm *v1alpha2.VirtualMachine) error { + vmSnapshot.Status.Resources = []v1alpha2.ResourceRef{} - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vm.Kind, ApiVersion: vm.APIVersion, Name: vm.Name, }) - if vmSnapshot.Spec.KeepIPAddress == virtv2.KeepIPAddressAlways { + if vmSnapshot.Spec.KeepIPAddress == v1alpha2.KeepIPAddressAlways { vmip, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: vm.Status.VirtualMachineIPAddress, - }, h.client, &virtv2.VirtualMachineIPAddress{}) + }, h.client, &v1alpha2.VirtualMachineIPAddress{}) if err != nil { return err } @@ -650,7 +650,7 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return fmt.Errorf("the virtual machine ip address %q not found", vm.Status.VirtualMachineIPAddress) } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vmip.Kind, ApiVersion: vmip.APIVersion, Name: vmip.Name, @@ -659,21 +659,21 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v if len(vm.Spec.Networks) > 1 { for _, ns := range vm.Status.Networks { - if ns.Type == virtv2.NetworksTypeMain { + if ns.Type == v1alpha2.NetworksTypeMain { continue } vmmac, err := object.FetchObject(ctx, types.NamespacedName{ Namespace: vm.Namespace, Name: ns.VirtualMachineMACAddressName, - }, h.client, &virtv2.VirtualMachineMACAddress{}) + }, h.client, &v1alpha2.VirtualMachineMACAddress{}) if err != nil { return err } if vmmac == nil { return fmt.Errorf("the virtual machine mac address %q not found", ns.VirtualMachineMACAddressName) } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vmmac.Kind, ApiVersion: vmmac.APIVersion, Name: vmmac.Name, @@ -686,7 +686,7 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return err } if provisioner != nil { - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: provisioner.Kind, ApiVersion: provisioner.APIVersion, Name: provisioner.Name, @@ -695,32 +695,32 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v for _, bdr := range vm.Status.BlockDeviceRefs { if bdr.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.VirtualMachineBlockDeviceAttachmentName, Namespace: vm.Namespace}, h.client, &virtv2.VirtualMachineBlockDeviceAttachment{}) + vmbda, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.VirtualMachineBlockDeviceAttachmentName, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualMachineBlockDeviceAttachment{}) if err != nil { return err } if vmbda == nil { continue } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vmbda.Kind, ApiVersion: vmbda.APIVersion, Name: vmbda.Name, }) } - if bdr.Kind != virtv2.DiskDevice { + if bdr.Kind != v1alpha2.DiskDevice { continue } - vd, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.Name, Namespace: vm.Namespace}, h.client, &virtv2.VirtualDisk{}) + vd, err := object.FetchObject(ctx, types.NamespacedName{Name: bdr.Name, Namespace: vm.Namespace}, h.client, &v1alpha2.VirtualDisk{}) if err != nil { return err } if vd == nil { continue } - vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, virtv2.ResourceRef{ + vmSnapshot.Status.Resources = append(vmSnapshot.Status.Resources, v1alpha2.ResourceRef{ Kind: vd.Kind, ApiVersion: vd.APIVersion, Name: vd.Name, @@ -730,26 +730,26 @@ func (h LifeCycleHandler) fillStatusResources(ctx context.Context, vmSnapshot *v return nil } -func (h LifeCycleHandler) getProvisionerFromVM(ctx context.Context, vm *virtv2.VirtualMachine) (*corev1.Secret, error) { +func (h LifeCycleHandler) getProvisionerFromVM(ctx context.Context, vm *v1alpha2.VirtualMachine) (*corev1.Secret, error) { if vm.Spec.Provisioning != nil { var provisioningSecretName string switch vm.Spec.Provisioning.Type { - case virtv2.ProvisioningTypeSysprepRef: + case v1alpha2.ProvisioningTypeSysprepRef: if vm.Spec.Provisioning.SysprepRef == nil { return nil, nil } - if vm.Spec.Provisioning.SysprepRef.Kind == virtv2.SysprepRefKindSecret { + if vm.Spec.Provisioning.SysprepRef.Kind == v1alpha2.SysprepRefKindSecret { provisioningSecretName = vm.Spec.Provisioning.SysprepRef.Name } - case virtv2.ProvisioningTypeUserDataRef: + case v1alpha2.ProvisioningTypeUserDataRef: if vm.Spec.Provisioning.UserDataRef == nil { return nil, nil } - if vm.Spec.Provisioning.UserDataRef.Kind == virtv2.UserDataRefKindSecret { + if vm.Spec.Provisioning.UserDataRef.Kind == v1alpha2.UserDataRefKindSecret { provisioningSecretName = vm.Spec.Provisioning.UserDataRef.Name } } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go index a88c03cf29..66561c384a 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/life_cycle_test.go @@ -30,7 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/eventrecord" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" @@ -40,18 +40,18 @@ var _ = Describe("LifeCycle handler", func() { var recorder eventrecord.EventRecorderLogger var snapshotter *SnapshotterMock var storer *StorerMock - var vd *virtv2.VirtualDisk - var vm *virtv2.VirtualMachine + var vd *v1alpha2.VirtualDisk + var vm *v1alpha2.VirtualMachine var secret *corev1.Secret - var vdSnapshot *virtv2.VirtualDiskSnapshot - var vmSnapshot *virtv2.VirtualMachineSnapshot + var vdSnapshot *v1alpha2.VirtualDiskSnapshot + var vmSnapshot *v1alpha2.VirtualMachineSnapshot var fakeClient client.WithWatch BeforeEach(func() { - vd = &virtv2.VirtualDisk{ + vd = &v1alpha2.VirtualDisk{ ObjectMeta: metav1.ObjectMeta{Name: "vd-bar"}, - Status: virtv2.VirtualDiskStatus{ - Phase: virtv2.DiskReady, + Status: v1alpha2.VirtualDiskStatus{ + Phase: v1alpha2.DiskReady, Conditions: []metav1.Condition{ { Type: vdcondition.Ready.String(), @@ -61,21 +61,21 @@ var _ = Describe("LifeCycle handler", func() { }, } - vm = &virtv2.VirtualMachine{ + vm = &v1alpha2.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{Name: "vm"}, - Spec: virtv2.VirtualMachineSpec{ - BlockDeviceRefs: []virtv2.BlockDeviceSpecRef{ + Spec: v1alpha2.VirtualMachineSpec{ + BlockDeviceRefs: []v1alpha2.BlockDeviceSpecRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, }, - Status: virtv2.VirtualMachineStatus{ - Phase: virtv2.MachineRunning, - BlockDeviceRefs: []virtv2.BlockDeviceStatusRef{ + Status: v1alpha2.VirtualMachineStatus{ + Phase: v1alpha2.MachineRunning, + BlockDeviceRefs: []v1alpha2.BlockDeviceStatusRef{ { - Kind: virtv2.DiskDevice, + Kind: v1alpha2.DiskDevice, Name: vd.Name, }, }, @@ -92,13 +92,13 @@ var _ = Describe("LifeCycle handler", func() { ObjectMeta: metav1.ObjectMeta{Name: vm.Name}, } - vmSnapshot = &virtv2.VirtualMachineSnapshot{ + vmSnapshot = &v1alpha2.VirtualMachineSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: "vm-snapshot"}, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vm.Name, RequiredConsistency: true, }, - Status: virtv2.VirtualMachineSnapshotStatus{ + Status: v1alpha2.VirtualMachineSnapshotStatus{ VirtualMachineSnapshotSecretName: "vm-snapshot", Conditions: []metav1.Condition{ { @@ -109,28 +109,28 @@ var _ = Describe("LifeCycle handler", func() { }, } - vdSnapshot = &virtv2.VirtualDiskSnapshot{ + vdSnapshot = &v1alpha2.VirtualDiskSnapshot{ ObjectMeta: metav1.ObjectMeta{Name: getVDSnapshotName(vd.Name, vmSnapshot)}, - Status: virtv2.VirtualDiskSnapshotStatus{ - Phase: virtv2.VirtualDiskSnapshotPhaseReady, + Status: v1alpha2.VirtualDiskSnapshotStatus{ + Phase: v1alpha2.VirtualDiskSnapshotPhaseReady, Consistent: ptr.To(true), }, } snapshotter = &SnapshotterMock{ - GetVirtualDiskFunc: func(_ context.Context, name, namespace string) (*virtv2.VirtualDisk, error) { + GetVirtualDiskFunc: func(_ context.Context, name, namespace string) (*v1alpha2.VirtualDisk, error) { return vd, nil }, - GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + GetVirtualMachineFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { return vm, nil }, - IsFrozenFunc: func(_ *virtv2.VirtualMachine) bool { + IsFrozenFunc: func(_ *v1alpha2.VirtualMachine) bool { return true }, - CanUnfreezeWithVirtualMachineSnapshotFunc: func(_ context.Context, _ string, _ *virtv2.VirtualMachine) (bool, error) { + CanUnfreezeWithVirtualMachineSnapshotFunc: func(_ context.Context, _ string, _ *v1alpha2.VirtualMachine) (bool, error) { return true, nil }, - CanFreezeFunc: func(_ *virtv2.VirtualMachine) bool { + CanFreezeFunc: func(_ *v1alpha2.VirtualMachine) bool { return false }, UnfreezeFunc: func(ctx context.Context, _, _ string) error { @@ -139,7 +139,7 @@ var _ = Describe("LifeCycle handler", func() { GetSecretFunc: func(_ context.Context, _, _ string) (*corev1.Secret, error) { return secret, nil }, - GetVirtualDiskSnapshotFunc: func(_ context.Context, _, _ string) (*virtv2.VirtualDiskSnapshot, error) { + GetVirtualDiskSnapshotFunc: func(_ context.Context, _, _ string) (*v1alpha2.VirtualDiskSnapshot, error) { return vdSnapshot, nil }, } @@ -155,7 +155,7 @@ var _ = Describe("LifeCycle handler", func() { Context("The block devices of the virtual machine are not in the consistent state", func() { It("The BlockDevicesReady condition of the virtual machine isn't True", func() { - snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { cb := conditions.NewConditionBuilder(vmcondition.TypeBlockDevicesReady). Generation(vm.Generation). Status(metav1.ConditionFalse) @@ -166,7 +166,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseFailed)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -174,15 +174,15 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is Pending", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { - vd.Status.Phase = virtv2.DiskPending + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { + vd.Status.Phase = v1alpha2.DiskPending return vd, nil } h := NewLifeCycleHandler(recorder, snapshotter, storer, fakeClient) _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseFailed)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -190,7 +190,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is not Ready", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { cb := conditions.NewConditionBuilder(vdcondition.Ready). Generation(vd.Generation). Status(metav1.ConditionFalse) @@ -201,7 +201,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseFailed)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -209,7 +209,7 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual disk is the process of Resizing", func() { - snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDisk, error) { + snapshotter.GetVirtualDiskFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDisk, error) { cb := conditions.NewConditionBuilder(vdcondition.ResizingType). Generation(vd.Generation). Status(metav1.ConditionTrue). @@ -221,7 +221,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseFailed)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseFailed)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.BlockDevicesNotReady.String())) @@ -231,7 +231,7 @@ var _ = Describe("LifeCycle handler", func() { Context("Ensure the virtual machine consistency", func() { It("The virtual machine has RestartAwaitingChanges", func() { - snapshotter.GetVirtualMachineFunc = func(ctx context.Context, _, _ string) (*virtv2.VirtualMachine, error) { + snapshotter.GetVirtualMachineFunc = func(ctx context.Context, _, _ string) (*v1alpha2.VirtualMachine, error) { vm.Status.RestartAwaitingChanges = []apiextensionsv1.JSON{{}, {}} return vm, nil } @@ -240,7 +240,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseInProgress)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.FileSystemFreezing.String())) @@ -248,10 +248,10 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual machine is potentially inconsistent", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } @@ -259,7 +259,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhasePending)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhasePending)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.PotentiallyInconsistent.String())) @@ -267,10 +267,10 @@ var _ = Describe("LifeCycle handler", func() { }) It("The virtual machine has frozen", func() { - snapshotter.IsFrozenFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.IsFrozenFunc = func(_ *v1alpha2.VirtualMachine) bool { return false } - snapshotter.CanFreezeFunc = func(_ *virtv2.VirtualMachine) bool { + snapshotter.CanFreezeFunc = func(_ *v1alpha2.VirtualMachine) bool { return true } snapshotter.FreezeFunc = func(_ context.Context, _, _ string) error { @@ -281,7 +281,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseInProgress)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseInProgress)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionFalse)) Expect(ready.Reason).To(Equal(vmscondition.FileSystemFreezing.String())) @@ -291,7 +291,7 @@ var _ = Describe("LifeCycle handler", func() { Context("The virtual machine snapshot is Ready", func() { BeforeEach(func() { - vmSnapshot.Status.Phase = virtv2.VirtualMachineSnapshotPhaseInProgress + vmSnapshot.Status.Phase = v1alpha2.VirtualMachineSnapshotPhaseInProgress }) It("The snapshot of virtual machine is Ready", func() { @@ -299,7 +299,7 @@ var _ = Describe("LifeCycle handler", func() { _, err := h.Handle(testContext(), vmSnapshot) Expect(err).To(BeNil()) - Expect(vmSnapshot.Status.Phase).To(Equal(virtv2.VirtualMachineSnapshotPhaseReady)) + Expect(vmSnapshot.Status.Phase).To(Equal(v1alpha2.VirtualMachineSnapshotPhaseReady)) ready, _ := conditions.GetCondition(vmscondition.VirtualMachineSnapshotReadyType, vmSnapshot.Status.Conditions) Expect(ready.Status).To(Equal(metav1.ConditionTrue)) Expect(ready.Reason).To(Equal(vmscondition.VirtualMachineReady.String())) @@ -317,8 +317,8 @@ var _ = Describe("LifeCycle handler", func() { }) It("The snapshot of stopped virtual machine is consistent", func() { - snapshotter.GetVirtualMachineFunc = func(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) { - vm.Status.Phase = virtv2.MachineStopped + snapshotter.GetVirtualMachineFunc = func(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) { + vm.Status.Phase = v1alpha2.MachineStopped return vm, nil } h := NewLifeCycleHandler(recorder, snapshotter, storer, fakeClient) @@ -330,7 +330,7 @@ var _ = Describe("LifeCycle handler", func() { It("The virtual machine snapshot is potentially inconsistent", func() { vmSnapshot.Spec.RequiredConsistency = false - snapshotter.GetVirtualDiskSnapshotFunc = func(_ context.Context, _, _ string) (*virtv2.VirtualDiskSnapshot, error) { + snapshotter.GetVirtualDiskSnapshotFunc = func(_ context.Context, _, _ string) (*v1alpha2.VirtualDiskSnapshot, error) { vdSnapshot.Status.Consistent = nil return vdSnapshot, nil } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/mock.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/mock.go index 90c1cab63e..134a491adb 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/mock.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/mock.go @@ -5,7 +5,7 @@ package internal import ( "context" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" corev1 "k8s.io/api/core/v1" "sync" ) @@ -20,7 +20,7 @@ var _ Storer = &StorerMock{} // // // make and configure a mocked Storer // mockedStorer := &StorerMock{ -// StoreFunc: func(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) { +// StoreFunc: func(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) { // panic("mock out the Store method") // }, // } @@ -31,7 +31,7 @@ var _ Storer = &StorerMock{} // } type StorerMock struct { // StoreFunc mocks the Store method. - StoreFunc func(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) + StoreFunc func(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) // calls tracks calls to the methods. calls struct { @@ -40,23 +40,23 @@ type StorerMock struct { // Ctx is the ctx argument value. Ctx context.Context // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine // VmSnapshot is the vmSnapshot argument value. - VmSnapshot *virtv2.VirtualMachineSnapshot + VmSnapshot *v1alpha2.VirtualMachineSnapshot } } lockStore sync.RWMutex } // Store calls StoreFunc. -func (mock *StorerMock) Store(ctx context.Context, vm *virtv2.VirtualMachine, vmSnapshot *virtv2.VirtualMachineSnapshot) (*corev1.Secret, error) { +func (mock *StorerMock) Store(ctx context.Context, vm *v1alpha2.VirtualMachine, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (*corev1.Secret, error) { if mock.StoreFunc == nil { panic("StorerMock.StoreFunc: method is nil but Storer.Store was just called") } callInfo := struct { Ctx context.Context - VM *virtv2.VirtualMachine - VmSnapshot *virtv2.VirtualMachineSnapshot + VM *v1alpha2.VirtualMachine + VmSnapshot *v1alpha2.VirtualMachineSnapshot }{ Ctx: ctx, VM: vm, @@ -74,13 +74,13 @@ func (mock *StorerMock) Store(ctx context.Context, vm *virtv2.VirtualMachine, vm // len(mockedStorer.StoreCalls()) func (mock *StorerMock) StoreCalls() []struct { Ctx context.Context - VM *virtv2.VirtualMachine - VmSnapshot *virtv2.VirtualMachineSnapshot + VM *v1alpha2.VirtualMachine + VmSnapshot *v1alpha2.VirtualMachineSnapshot } { var calls []struct { Ctx context.Context - VM *virtv2.VirtualMachine - VmSnapshot *virtv2.VirtualMachineSnapshot + VM *v1alpha2.VirtualMachine + VmSnapshot *v1alpha2.VirtualMachineSnapshot } mock.lockStore.RLock() calls = mock.calls.Store @@ -98,13 +98,13 @@ var _ Snapshotter = &SnapshotterMock{} // // // make and configure a mocked Snapshotter // mockedSnapshotter := &SnapshotterMock{ -// CanFreezeFunc: func(vm *virtv2.VirtualMachine) bool { +// CanFreezeFunc: func(vm *v1alpha2.VirtualMachine) bool { // panic("mock out the CanFreeze method") // }, -// CanUnfreezeWithVirtualMachineSnapshotFunc: func(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +// CanUnfreezeWithVirtualMachineSnapshotFunc: func(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { // panic("mock out the CanUnfreezeWithVirtualMachineSnapshot method") // }, -// CreateVirtualDiskSnapshotFunc: func(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) { +// CreateVirtualDiskSnapshotFunc: func(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) { // panic("mock out the CreateVirtualDiskSnapshot method") // }, // FreezeFunc: func(ctx context.Context, name string, namespace string) error { @@ -116,16 +116,16 @@ var _ Snapshotter = &SnapshotterMock{} // GetSecretFunc: func(ctx context.Context, name string, namespace string) (*corev1.Secret, error) { // panic("mock out the GetSecret method") // }, -// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +// GetVirtualDiskFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { // panic("mock out the GetVirtualDisk method") // }, -// GetVirtualDiskSnapshotFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDiskSnapshot, error) { +// GetVirtualDiskSnapshotFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { // panic("mock out the GetVirtualDiskSnapshot method") // }, -// GetVirtualMachineFunc: func(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) { +// GetVirtualMachineFunc: func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) { // panic("mock out the GetVirtualMachine method") // }, -// IsFrozenFunc: func(vm *virtv2.VirtualMachine) bool { +// IsFrozenFunc: func(vm *v1alpha2.VirtualMachine) bool { // panic("mock out the IsFrozen method") // }, // UnfreezeFunc: func(ctx context.Context, name string, namespace string) error { @@ -139,13 +139,13 @@ var _ Snapshotter = &SnapshotterMock{} // } type SnapshotterMock struct { // CanFreezeFunc mocks the CanFreeze method. - CanFreezeFunc func(vm *virtv2.VirtualMachine) bool + CanFreezeFunc func(vm *v1alpha2.VirtualMachine) bool // CanUnfreezeWithVirtualMachineSnapshotFunc mocks the CanUnfreezeWithVirtualMachineSnapshot method. - CanUnfreezeWithVirtualMachineSnapshotFunc func(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) + CanUnfreezeWithVirtualMachineSnapshotFunc func(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) // CreateVirtualDiskSnapshotFunc mocks the CreateVirtualDiskSnapshot method. - CreateVirtualDiskSnapshotFunc func(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) + CreateVirtualDiskSnapshotFunc func(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) // FreezeFunc mocks the Freeze method. FreezeFunc func(ctx context.Context, name string, namespace string) error @@ -157,16 +157,16 @@ type SnapshotterMock struct { GetSecretFunc func(ctx context.Context, name string, namespace string) (*corev1.Secret, error) // GetVirtualDiskFunc mocks the GetVirtualDisk method. - GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) + GetVirtualDiskFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) // GetVirtualDiskSnapshotFunc mocks the GetVirtualDiskSnapshot method. - GetVirtualDiskSnapshotFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualDiskSnapshot, error) + GetVirtualDiskSnapshotFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) // GetVirtualMachineFunc mocks the GetVirtualMachine method. - GetVirtualMachineFunc func(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualMachineFunc func(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) // IsFrozenFunc mocks the IsFrozen method. - IsFrozenFunc func(vm *virtv2.VirtualMachine) bool + IsFrozenFunc func(vm *v1alpha2.VirtualMachine) bool // UnfreezeFunc mocks the Unfreeze method. UnfreezeFunc func(ctx context.Context, name string, namespace string) error @@ -176,7 +176,7 @@ type SnapshotterMock struct { // CanFreeze holds details about calls to the CanFreeze method. CanFreeze []struct { // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // CanUnfreezeWithVirtualMachineSnapshot holds details about calls to the CanUnfreezeWithVirtualMachineSnapshot method. CanUnfreezeWithVirtualMachineSnapshot []struct { @@ -185,14 +185,14 @@ type SnapshotterMock struct { // VmSnapshotName is the vmSnapshotName argument value. VmSnapshotName string // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // CreateVirtualDiskSnapshot holds details about calls to the CreateVirtualDiskSnapshot method. CreateVirtualDiskSnapshot []struct { // Ctx is the ctx argument value. Ctx context.Context // VdSnapshot is the vdSnapshot argument value. - VdSnapshot *virtv2.VirtualDiskSnapshot + VdSnapshot *v1alpha2.VirtualDiskSnapshot } // Freeze holds details about calls to the Freeze method. Freeze []struct { @@ -251,7 +251,7 @@ type SnapshotterMock struct { // IsFrozen holds details about calls to the IsFrozen method. IsFrozen []struct { // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } // Unfreeze holds details about calls to the Unfreeze method. Unfreeze []struct { @@ -277,12 +277,12 @@ type SnapshotterMock struct { } // CanFreeze calls CanFreezeFunc. -func (mock *SnapshotterMock) CanFreeze(vm *virtv2.VirtualMachine) bool { +func (mock *SnapshotterMock) CanFreeze(vm *v1alpha2.VirtualMachine) bool { if mock.CanFreezeFunc == nil { panic("SnapshotterMock.CanFreezeFunc: method is nil but Snapshotter.CanFreeze was just called") } callInfo := struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ VM: vm, } @@ -297,10 +297,10 @@ func (mock *SnapshotterMock) CanFreeze(vm *virtv2.VirtualMachine) bool { // // len(mockedSnapshotter.CanFreezeCalls()) func (mock *SnapshotterMock) CanFreezeCalls() []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockCanFreeze.RLock() calls = mock.calls.CanFreeze @@ -309,14 +309,14 @@ func (mock *SnapshotterMock) CanFreezeCalls() []struct { } // CanUnfreezeWithVirtualMachineSnapshot calls CanUnfreezeWithVirtualMachineSnapshotFunc. -func (mock *SnapshotterMock) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *virtv2.VirtualMachine) (bool, error) { +func (mock *SnapshotterMock) CanUnfreezeWithVirtualMachineSnapshot(ctx context.Context, vmSnapshotName string, vm *v1alpha2.VirtualMachine) (bool, error) { if mock.CanUnfreezeWithVirtualMachineSnapshotFunc == nil { panic("SnapshotterMock.CanUnfreezeWithVirtualMachineSnapshotFunc: method is nil but Snapshotter.CanUnfreezeWithVirtualMachineSnapshot was just called") } callInfo := struct { Ctx context.Context VmSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ Ctx: ctx, VmSnapshotName: vmSnapshotName, @@ -335,12 +335,12 @@ func (mock *SnapshotterMock) CanUnfreezeWithVirtualMachineSnapshot(ctx context.C func (mock *SnapshotterMock) CanUnfreezeWithVirtualMachineSnapshotCalls() []struct { Ctx context.Context VmSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { Ctx context.Context VmSnapshotName string - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockCanUnfreezeWithVirtualMachineSnapshot.RLock() calls = mock.calls.CanUnfreezeWithVirtualMachineSnapshot @@ -349,13 +349,13 @@ func (mock *SnapshotterMock) CanUnfreezeWithVirtualMachineSnapshotCalls() []stru } // CreateVirtualDiskSnapshot calls CreateVirtualDiskSnapshotFunc. -func (mock *SnapshotterMock) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (*virtv2.VirtualDiskSnapshot, error) { +func (mock *SnapshotterMock) CreateVirtualDiskSnapshot(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (*v1alpha2.VirtualDiskSnapshot, error) { if mock.CreateVirtualDiskSnapshotFunc == nil { panic("SnapshotterMock.CreateVirtualDiskSnapshotFunc: method is nil but Snapshotter.CreateVirtualDiskSnapshot was just called") } callInfo := struct { Ctx context.Context - VdSnapshot *virtv2.VirtualDiskSnapshot + VdSnapshot *v1alpha2.VirtualDiskSnapshot }{ Ctx: ctx, VdSnapshot: vdSnapshot, @@ -372,11 +372,11 @@ func (mock *SnapshotterMock) CreateVirtualDiskSnapshot(ctx context.Context, vdSn // len(mockedSnapshotter.CreateVirtualDiskSnapshotCalls()) func (mock *SnapshotterMock) CreateVirtualDiskSnapshotCalls() []struct { Ctx context.Context - VdSnapshot *virtv2.VirtualDiskSnapshot + VdSnapshot *v1alpha2.VirtualDiskSnapshot } { var calls []struct { Ctx context.Context - VdSnapshot *virtv2.VirtualDiskSnapshot + VdSnapshot *v1alpha2.VirtualDiskSnapshot } mock.lockCreateVirtualDiskSnapshot.RLock() calls = mock.calls.CreateVirtualDiskSnapshot @@ -505,7 +505,7 @@ func (mock *SnapshotterMock) GetSecretCalls() []struct { } // GetVirtualDisk calls GetVirtualDiskFunc. -func (mock *SnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*virtv2.VirtualDisk, error) { +func (mock *SnapshotterMock) GetVirtualDisk(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDisk, error) { if mock.GetVirtualDiskFunc == nil { panic("SnapshotterMock.GetVirtualDiskFunc: method is nil but Snapshotter.GetVirtualDisk was just called") } @@ -545,7 +545,7 @@ func (mock *SnapshotterMock) GetVirtualDiskCalls() []struct { } // GetVirtualDiskSnapshot calls GetVirtualDiskSnapshotFunc. -func (mock *SnapshotterMock) GetVirtualDiskSnapshot(ctx context.Context, name string, namespace string) (*virtv2.VirtualDiskSnapshot, error) { +func (mock *SnapshotterMock) GetVirtualDiskSnapshot(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualDiskSnapshot, error) { if mock.GetVirtualDiskSnapshotFunc == nil { panic("SnapshotterMock.GetVirtualDiskSnapshotFunc: method is nil but Snapshotter.GetVirtualDiskSnapshot was just called") } @@ -585,7 +585,7 @@ func (mock *SnapshotterMock) GetVirtualDiskSnapshotCalls() []struct { } // GetVirtualMachine calls GetVirtualMachineFunc. -func (mock *SnapshotterMock) GetVirtualMachine(ctx context.Context, name string, namespace string) (*virtv2.VirtualMachine, error) { +func (mock *SnapshotterMock) GetVirtualMachine(ctx context.Context, name string, namespace string) (*v1alpha2.VirtualMachine, error) { if mock.GetVirtualMachineFunc == nil { panic("SnapshotterMock.GetVirtualMachineFunc: method is nil but Snapshotter.GetVirtualMachine was just called") } @@ -625,12 +625,12 @@ func (mock *SnapshotterMock) GetVirtualMachineCalls() []struct { } // IsFrozen calls IsFrozenFunc. -func (mock *SnapshotterMock) IsFrozen(vm *virtv2.VirtualMachine) bool { +func (mock *SnapshotterMock) IsFrozen(vm *v1alpha2.VirtualMachine) bool { if mock.IsFrozenFunc == nil { panic("SnapshotterMock.IsFrozenFunc: method is nil but Snapshotter.IsFrozen was just called") } callInfo := struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine }{ VM: vm, } @@ -645,10 +645,10 @@ func (mock *SnapshotterMock) IsFrozen(vm *virtv2.VirtualMachine) bool { // // len(mockedSnapshotter.IsFrozenCalls()) func (mock *SnapshotterMock) IsFrozenCalls() []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } { var calls []struct { - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine } mock.lockIsFrozen.RLock() calls = mock.calls.IsFrozen diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go index b2abe2ffa7..ce8e76e11d 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/virtual_machine_ready.go @@ -24,13 +24,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmscondition" ) type VirtualMachineReadySnapshotter interface { - GetVirtualMachine(ctx context.Context, name, namespace string) (*virtv2.VirtualMachine, error) + GetVirtualMachine(ctx context.Context, name, namespace string) (*v1alpha2.VirtualMachine, error) } type VirtualMachineReadyHandler struct { @@ -43,7 +43,7 @@ func NewVirtualMachineReadyHandler(snapshotter VirtualMachineReadySnapshotter) * } } -func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) { +func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) { cb := conditions.NewConditionBuilder(vmscondition.VirtualMachineReadyType) defer func() { conditions.SetCondition(cb.Generation(vmSnapshot.Generation), &vmSnapshot.Status.Conditions) }() @@ -56,7 +56,7 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virt return reconcile.Result{}, nil } - if vmSnapshot.Status.Phase == virtv2.VirtualMachineSnapshotPhaseReady { + if vmSnapshot.Status.Phase == v1alpha2.VirtualMachineSnapshotPhaseReady { cb.Status(metav1.ConditionTrue).Reason(vmscondition.VirtualMachineReady) return reconcile.Result{}, nil } @@ -83,7 +83,7 @@ func (h VirtualMachineReadyHandler) Handle(ctx context.Context, vmSnapshot *virt } switch vm.Status.Phase { - case virtv2.MachineRunning, virtv2.MachineStopped: + case v1alpha2.MachineRunning, v1alpha2.MachineStopped: snapshotting, _ := conditions.GetCondition(vmcondition.TypeSnapshotting, vm.Status.Conditions) if snapshotting.Status != metav1.ConditionTrue { cb.Status(metav1.ConditionFalse).Reason(vmscondition.VirtualMachineNotReadyForSnapshotting) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go index f113c406e8..8ca221a4cb 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vd_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) @@ -49,12 +49,12 @@ func NewVirtualDiskWatcher(client client.Client) *VirtualDiskWatcher { func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDisk{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDisk{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDisk]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDisk]) bool { return false }, - DeleteFunc: func(e event.TypedDeleteEvent[*virtv2.VirtualDisk]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDisk]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDisk]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDisk]) bool { return false }, + DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha2.VirtualDisk]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDisk]) bool { if e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase { return true } @@ -79,8 +79,8 @@ func (w VirtualDiskWatcher) Watch(mgr manager.Manager, ctr controller.Controller return nil } -func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *virtv2.VirtualDisk) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualDiskWatcher) enqueueRequests(ctx context.Context, vd *v1alpha2.VirtualDisk) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vd.GetNamespace(), }) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go index 83012af437..fae0870beb 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vdsnapshot_watcher.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualDiskSnapshotWatcher struct { @@ -48,11 +48,11 @@ func NewVirtualDiskSnapshotWatcher(client client.Client) *VirtualDiskSnapshotWat func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualDiskSnapshot{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualDiskSnapshot{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualDiskSnapshot]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualDiskSnapshot]) bool { return false }, - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualDiskSnapshot]) bool { + predicate.TypedFuncs[*v1alpha2.VirtualDiskSnapshot]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return false }, + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualDiskSnapshot]) bool { return e.ObjectOld.Status.Phase != e.ObjectNew.Status.Phase }, }, @@ -63,8 +63,8 @@ func (w VirtualDiskSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Co return nil } -func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *virtv2.VirtualDiskSnapshot) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualDiskSnapshotWatcher) enqueueRequests(ctx context.Context, vdSnapshot *v1alpha2.VirtualDiskSnapshot) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vdSnapshot.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMSnapshotByVDSnapshot, vdSnapshot.GetName()), diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go index 0a423aa943..03f03c1aec 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vm_watcher.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/indexer" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -50,10 +50,10 @@ func NewVirtualMachineWatcher(client client.Client) *VirtualMachineWatcher { func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachine{}, + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachine{}, handler.TypedEnqueueRequestsFromMapFunc(w.enqueueRequests), - predicate.TypedFuncs[*virtv2.VirtualMachine]{ - CreateFunc: func(e event.TypedCreateEvent[*virtv2.VirtualMachine]) bool { return false }, + predicate.TypedFuncs[*v1alpha2.VirtualMachine]{ + CreateFunc: func(e event.TypedCreateEvent[*v1alpha2.VirtualMachine]) bool { return false }, UpdateFunc: w.filterUpdateEvents, }, ), @@ -63,8 +63,8 @@ func (w VirtualMachineWatcher) Watch(mgr manager.Manager, ctr controller.Control return nil } -func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.VirtualMachine) (requests []reconcile.Request) { - var vmSnapshots virtv2.VirtualMachineSnapshotList +func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *v1alpha2.VirtualMachine) (requests []reconcile.Request) { + var vmSnapshots v1alpha2.VirtualMachineSnapshotList err := w.client.List(ctx, &vmSnapshots, &client.ListOptions{ Namespace: vm.GetNamespace(), FieldSelector: fields.OneTermEqualSelector(indexer.IndexFieldVMSnapshotByVM, vm.GetName()), @@ -88,7 +88,7 @@ func (w VirtualMachineWatcher) enqueueRequests(ctx context.Context, vm *virtv2.V return } -func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*virtv2.VirtualMachine]) bool { +func (w VirtualMachineWatcher) filterUpdateEvents(e event.TypedUpdateEvent[*v1alpha2.VirtualMachine]) bool { oldAgentReady, _ := conditions.GetCondition(vmcondition.TypeAgentReady, e.ObjectOld.Status.Conditions) newAgentReady, _ := conditions.GetCondition(vmcondition.TypeAgentReady, e.ObjectNew.Status.Conditions) diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go index 4785591bda..92f165cb74 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/internal/watcher/vmsnapshot_watcher.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualMachineSnapshotWatcher struct { @@ -42,10 +42,10 @@ func NewVirtualMachineSnapshotWatcher(client client.Client) *VirtualMachineSnaps func (w VirtualMachineSnapshotWatcher) Watch(mgr manager.Manager, ctr controller.Controller) error { if err := ctr.Watch( - source.Kind(mgr.GetCache(), &virtv2.VirtualMachineSnapshot{}, - &handler.TypedEnqueueRequestForObject[*virtv2.VirtualMachineSnapshot]{}, - predicate.TypedFuncs[*virtv2.VirtualMachineSnapshot]{ - UpdateFunc: func(e event.TypedUpdateEvent[*virtv2.VirtualMachineSnapshot]) bool { + source.Kind(mgr.GetCache(), &v1alpha2.VirtualMachineSnapshot{}, + &handler.TypedEnqueueRequestForObject[*v1alpha2.VirtualMachineSnapshot]{}, + predicate.TypedFuncs[*v1alpha2.VirtualMachineSnapshot]{ + UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha2.VirtualMachineSnapshot]) bool { return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, }, diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go index c0c2e40eea..d3156b3c6c 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_controller.go @@ -34,7 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/logger" vmsnapshotcollector "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/vmsnapshot" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ControllerName = "vmsnapshot-controller" @@ -45,7 +45,7 @@ func NewController( log *log.Logger, virtClient kubeclient.Client, ) error { - protection := service.NewProtectionService(mgr.GetClient(), virtv2.FinalizerVMSnapshotProtection) + protection := service.NewProtectionService(mgr.GetClient(), v1alpha2.FinalizerVMSnapshotProtection) recorder := eventrecord.NewEventRecorderLogger(mgr, ControllerName) snapshotter := service.NewSnapshotService(virtClient, mgr.GetClient(), protection) @@ -71,7 +71,7 @@ func NewController( } if err = builder.WebhookManagedBy(mgr). - For(&virtv2.VirtualMachineSnapshot{}). + For(&v1alpha2.VirtualMachineSnapshot{}). WithValidator(NewValidator()). Complete(); err != nil { return err diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go index e600eb9773..750a70a2b9 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_reconciler.go @@ -28,11 +28,11 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" "github.com/deckhouse/virtualization-controller/pkg/controller/vmsnapshot/internal/watcher" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Handler interface { - Handle(ctx context.Context, vmSnapshot *virtv2.VirtualMachineSnapshot) (reconcile.Result, error) + Handle(ctx context.Context, vmSnapshot *v1alpha2.VirtualMachineSnapshot) (reconcile.Result, error) } type Watcher interface { @@ -92,10 +92,10 @@ func (r *Reconciler) SetupController(_ context.Context, mgr manager.Manager, ctr return nil } -func (r *Reconciler) factory() *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{} +func (r *Reconciler) factory() *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{} } -func (r *Reconciler) statusGetter(obj *virtv2.VirtualMachineSnapshot) virtv2.VirtualMachineSnapshotStatus { +func (r *Reconciler) statusGetter(obj *v1alpha2.VirtualMachineSnapshot) v1alpha2.VirtualMachineSnapshotStatus { return obj.Status } diff --git a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go index c126414f61..0490285673 100644 --- a/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vmsnapshot/vmsnapshot_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type Validator struct{} @@ -40,12 +40,12 @@ func (v *Validator) ValidateCreate(ctx context.Context, _ runtime.Object) (admis } func (v *Validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldVMSnapshot, ok := oldObj.(*virtv2.VirtualMachineSnapshot) + oldVMSnapshot, ok := oldObj.(*v1alpha2.VirtualMachineSnapshot) if !ok { return nil, fmt.Errorf("expected an old VirtualMachineSnapshot but got a %T", newObj) } - newVMSnapshot, ok := newObj.(*virtv2.VirtualMachineSnapshot) + newVMSnapshot, ok := newObj.(*v1alpha2.VirtualMachineSnapshot) if !ok { return nil, fmt.Errorf("expected a new VirtualMachineSnapshot but got a %T", newObj) } diff --git a/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go index e8824b5bae..d4ec7760fb 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/cvi_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/cvicondition" ) type ClusterVirtualImageRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.ClusterVirtualImageObjectRefKind + enqueueFromKind v1alpha2.ClusterVirtualImageObjectRefKind client client.Client logger *log.Logger } -func NewClusterVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.ClusterVirtualImageObjectRefKind) *ClusterVirtualImageRequestEnqueuer { +func NewClusterVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.ClusterVirtualImageObjectRefKind) *ClusterVirtualImageRequestEnqueuer { return &ClusterVirtualImageRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w ClusterVirtualImageRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w ClusterVirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var cvis virtv2.ClusterVirtualImageList + var cvis v1alpha2.ClusterVirtualImageList err := w.client.List(ctx, &cvis) if err != nil { w.logger.Error(fmt.Sprintf("failed to list cvi: %s", err)) @@ -65,7 +65,7 @@ func (w ClusterVirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, continue } - if cvi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if cvi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } diff --git a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go index 5cc4d2ef39..62c13d2322 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/cvi_filter.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type ClusterVirtualImageFilter struct { @@ -36,13 +36,13 @@ func NewClusterVirtualImageFilter() *ClusterVirtualImageFilter { } func (f ClusterVirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool { - oldCVI, ok := e.ObjectOld.(*virtv2.ClusterVirtualImage) + oldCVI, ok := e.ObjectOld.(*v1alpha2.ClusterVirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected an old ClusterVirtualImage but got a %T", e.ObjectOld)) return false } - newCVI, ok := e.ObjectNew.(*virtv2.ClusterVirtualImage) + newCVI, ok := e.ObjectNew.(*v1alpha2.ClusterVirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected a new ClusterVirtualImage but got a %T", e.ObjectNew)) return false diff --git a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go index 9a9c571057..a3d4b35c78 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vd_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vdcondition" ) type VirtualDiskRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.VirtualDiskObjectRefKind + enqueueFromKind v1alpha2.VirtualDiskObjectRefKind client client.Client logger *log.Logger } -func NewVirtualDiskRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.VirtualDiskObjectRefKind) *VirtualDiskRequestEnqueuer { +func NewVirtualDiskRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.VirtualDiskObjectRefKind) *VirtualDiskRequestEnqueuer { return &VirtualDiskRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w VirtualDiskRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vds virtv2.VirtualDiskList + var vds v1alpha2.VirtualDiskList err := w.client.List(ctx, &vds) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vd: %s", err)) @@ -66,7 +66,7 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, continue } - if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vd.Spec.DataSource == nil || vd.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } @@ -90,14 +90,14 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVDs(ctx context.Context, } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVIs(obj client.Object) (requests []reconcile.Request) { - if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindVirtualImage { - vi, ok := obj.(*virtv2.VirtualImage) + if w.enqueueFromKind == v1alpha2.VirtualDiskObjectRefKindVirtualImage { + vi, ok := obj.(*v1alpha2.VirtualImage) if !ok { w.logger.Error(fmt.Sprintf("expected a VirtualImage but got a %T", obj)) return } - if vi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if vi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && vi.Spec.DataSource.ObjectRef != nil && vi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: vi.Spec.DataSource.ObjectRef.Name, @@ -110,14 +110,14 @@ func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromVIs(obj client.Object) (r } func (w VirtualDiskRequestEnqueuer) EnqueueRequestsFromCVIs(obj client.Object) (requests []reconcile.Request) { - if w.enqueueFromKind == virtv2.VirtualDiskObjectRefKindClusterVirtualImage { - cvi, ok := obj.(*virtv2.ClusterVirtualImage) + if w.enqueueFromKind == v1alpha2.VirtualDiskObjectRefKindClusterVirtualImage { + cvi, ok := obj.(*v1alpha2.ClusterVirtualImage) if !ok { w.logger.Error(fmt.Sprintf("expected a ClusterVirtualImage but got a %T", obj)) return } - if cvi.Spec.DataSource.Type == virtv2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == virtv2.VirtualDiskKind { + if cvi.Spec.DataSource.Type == v1alpha2.DataSourceTypeObjectRef && cvi.Spec.DataSource.ObjectRef != nil && cvi.Spec.DataSource.ObjectRef.Kind == v1alpha2.VirtualDiskKind { requests = append(requests, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: cvi.Spec.DataSource.ObjectRef.Name, diff --git a/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go b/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go index 5d9ae58959..eb1c33449f 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vi_enqueuer.go @@ -27,18 +27,18 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" ) type VirtualImageRequestEnqueuer struct { enqueueFromObj client.Object - enqueueFromKind virtv2.VirtualImageObjectRefKind + enqueueFromKind v1alpha2.VirtualImageObjectRefKind client client.Client logger *log.Logger } -func NewVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind virtv2.VirtualImageObjectRefKind) *VirtualImageRequestEnqueuer { +func NewVirtualImageRequestEnqueuer(client client.Client, enqueueFromObj client.Object, enqueueFromKind v1alpha2.VirtualImageObjectRefKind) *VirtualImageRequestEnqueuer { return &VirtualImageRequestEnqueuer{ enqueueFromObj: enqueueFromObj, enqueueFromKind: enqueueFromKind, @@ -52,7 +52,7 @@ func (w VirtualImageRequestEnqueuer) GetEnqueueFrom() client.Object { } func (w VirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj client.Object) (requests []reconcile.Request) { - var vis virtv2.VirtualImageList + var vis v1alpha2.VirtualImageList err := w.client.List(ctx, &vis) if err != nil { w.logger.Error(fmt.Sprintf("failed to list vi: %s", err)) @@ -65,7 +65,7 @@ func (w VirtualImageRequestEnqueuer) EnqueueRequests(ctx context.Context, obj cl continue } - if vi.Spec.DataSource.Type != virtv2.DataSourceTypeObjectRef { + if vi.Spec.DataSource.Type != v1alpha2.DataSourceTypeObjectRef { continue } diff --git a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go index cccde70bac..3beb71aee8 100644 --- a/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go +++ b/images/virtualization-artifact/pkg/controller/watchers/vi_filter.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "github.com/deckhouse/deckhouse/pkg/log" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type VirtualImageFilter struct { @@ -36,13 +36,13 @@ func NewVirtualImageFilter() *VirtualImageFilter { } func (f VirtualImageFilter) FilterUpdateEvents(e event.UpdateEvent) bool { - oldVI, ok := e.ObjectOld.(*virtv2.VirtualImage) + oldVI, ok := e.ObjectOld.(*v1alpha2.VirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected an old VirtualImage but got a %T", e.ObjectOld)) return false } - newVI, ok := e.ObjectNew.(*virtv2.VirtualImage) + newVI, ok := e.ObjectNew.(*v1alpha2.VirtualImage) if !ok { f.logger.Error(fmt.Sprintf("expected a new VirtualImage but got a %T", e.ObjectNew)) return false diff --git a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/mock.go b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/mock.go index f5d98f7b97..5882a1fca1 100644 --- a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/mock.go +++ b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/mock.go @@ -5,7 +5,7 @@ package handler import ( "context" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "sync" ) @@ -19,7 +19,7 @@ var _ OneShotMigration = &OneShotMigrationMock{} // // // make and configure a mocked OneShotMigration // mockedOneShotMigration := &OneShotMigrationMock{ -// OnceMigrateFunc: func(ctx context.Context, vm *virtv2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) { +// OnceMigrateFunc: func(ctx context.Context, vm *v1alpha2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) { // panic("mock out the OnceMigrate method") // }, // } @@ -30,7 +30,7 @@ var _ OneShotMigration = &OneShotMigrationMock{} // } type OneShotMigrationMock struct { // OnceMigrateFunc mocks the OnceMigrate method. - OnceMigrateFunc func(ctx context.Context, vm *virtv2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) + OnceMigrateFunc func(ctx context.Context, vm *v1alpha2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) // calls tracks calls to the methods. calls struct { @@ -39,7 +39,7 @@ type OneShotMigrationMock struct { // Ctx is the ctx argument value. Ctx context.Context // VM is the vm argument value. - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine // AnnotationKey is the annotationKey argument value. AnnotationKey string // AnnotationExpectedValue is the annotationExpectedValue argument value. @@ -50,13 +50,13 @@ type OneShotMigrationMock struct { } // OnceMigrate calls OnceMigrateFunc. -func (mock *OneShotMigrationMock) OnceMigrate(ctx context.Context, vm *virtv2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) { +func (mock *OneShotMigrationMock) OnceMigrate(ctx context.Context, vm *v1alpha2.VirtualMachine, annotationKey string, annotationExpectedValue string) (bool, error) { if mock.OnceMigrateFunc == nil { panic("OneShotMigrationMock.OnceMigrateFunc: method is nil but OneShotMigration.OnceMigrate was just called") } callInfo := struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine AnnotationKey string AnnotationExpectedValue string }{ @@ -77,13 +77,13 @@ func (mock *OneShotMigrationMock) OnceMigrate(ctx context.Context, vm *virtv2.Vi // len(mockedOneShotMigration.OnceMigrateCalls()) func (mock *OneShotMigrationMock) OnceMigrateCalls() []struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine AnnotationKey string AnnotationExpectedValue string } { var calls []struct { Ctx context.Context - VM *virtv2.VirtualMachine + VM *v1alpha2.VirtualMachine AnnotationKey string AnnotationExpectedValue string } diff --git a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go index 22093dab64..2f954f483b 100644 --- a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go +++ b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/nodeplacement.go @@ -32,7 +32,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/logger" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -51,7 +51,7 @@ type NodePlacementHandler struct { oneShotMigration OneShotMigration } -func (h *NodePlacementHandler) Handle(ctx context.Context, vm *virtv2.VirtualMachine) (reconcile.Result, error) { +func (h *NodePlacementHandler) Handle(ctx context.Context, vm *v1alpha2.VirtualMachine) (reconcile.Result, error) { if vm == nil || !vm.GetDeletionTimestamp().IsZero() { return reconcile.Result{}, nil } diff --git a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go index f2517db160..ec8a27ea29 100644 --- a/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go +++ b/images/virtualization-artifact/pkg/controller/workload-updater/internal/handler/suite_test.go @@ -29,7 +29,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/testutil" "github.com/deckhouse/virtualization-controller/pkg/controller/reconciler" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestWorkloadUpdateHandlers(t *testing.T) { @@ -37,7 +37,7 @@ func TestWorkloadUpdateHandlers(t *testing.T) { RunSpecs(t, "WorkloadUpdate Handlers Suite") } -func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) client.Client { +func setupEnvironment(vm *v1alpha2.VirtualMachine, objs ...client.Object) client.Client { GinkgoHelper() Expect(vm).ToNot(BeNil()) allObjects := []client.Object{vm} @@ -51,10 +51,10 @@ func setupEnvironment(vm *virtv2.VirtualMachine, objs ...client.Object) client.C Namespace: vm.GetNamespace(), } resource := reconciler.NewResource(key, fakeClient, - func() *virtv2.VirtualMachine { - return &virtv2.VirtualMachine{} + func() *v1alpha2.VirtualMachine { + return &v1alpha2.VirtualMachine{} }, - func(obj *virtv2.VirtualMachine) virtv2.VirtualMachineStatus { + func(obj *v1alpha2.VirtualMachine) v1alpha2.VirtualMachineStatus { return obj.Status }) err = resource.Fetch(context.Background()) diff --git a/images/virtualization-artifact/pkg/migration/disk_cache.go b/images/virtualization-artifact/pkg/migration/disk_cache.go index 5edca578b0..4a49c11ce6 100644 --- a/images/virtualization-artifact/pkg/migration/disk_cache.go +++ b/images/virtualization-artifact/pkg/migration/disk_cache.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type diskCache struct { @@ -32,7 +32,7 @@ type diskCache struct { } func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { - cviList := &virtv2.ClusterVirtualImageList{} + cviList := &v1alpha2.ClusterVirtualImageList{} if err := c.List(ctx, cviList, &client.ListOptions{}); err != nil { return diskCache{}, err } @@ -41,7 +41,7 @@ func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { cviNameUIDMap[cviList.Items[i].Name] = cviList.Items[i].UID } - viList := &virtv2.VirtualImageList{} + viList := &v1alpha2.VirtualImageList{} if err := c.List(ctx, viList, &client.ListOptions{}); err != nil { return diskCache{}, err } @@ -53,7 +53,7 @@ func newDiskCache(ctx context.Context, c client.Client) (diskCache, error) { }] = viList.Items[i].UID } - vdList := &virtv2.VirtualDiskList{} + vdList := &v1alpha2.VirtualDiskList{} if err := c.List(ctx, vdList, &client.ListOptions{}); err != nil { return diskCache{}, err } diff --git a/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go b/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go index 2c844fba2f..19b65d46a4 100644 --- a/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go +++ b/images/virtualization-artifact/pkg/migration/qemu_max_length_36_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/testutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func TestMigrationSuite(t *testing.T) { @@ -106,10 +106,10 @@ const ( ) var ( - vdQemu36 = &virtv2.VirtualDisk{ + vdQemu36 = &v1alpha2.VirtualDisk{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualDiskKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualDiskKind, }, ObjectMeta: metav1.ObjectMeta{ Name: vdQemu36Name, @@ -117,10 +117,10 @@ var ( UID: vdQemu36UID, }, } - viQemu36 = &virtv2.VirtualImage{ + viQemu36 = &v1alpha2.VirtualImage{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualImageKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualImageKind, }, ObjectMeta: metav1.ObjectMeta{ Name: viQemu36Name, @@ -128,10 +128,10 @@ var ( UID: viQemu36UID, }, } - cviQemu36 = &virtv2.ClusterVirtualImage{ + cviQemu36 = &v1alpha2.ClusterVirtualImage{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.ClusterVirtualImageKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.ClusterVirtualImageKind, }, ObjectMeta: metav1.ObjectMeta{ Name: cviQemu36Name, diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go index a52afc0033..cf4455022b 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/data_metric.go @@ -17,17 +17,17 @@ limitations under the License. package cvi import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string UID string - Phase virtv2.ImagePhase + Phase v1alpha2.ImagePhase } // DO NOT mutate ClusterVirtualImage! -func newDataMetric(cvi *virtv2.ClusterVirtualImage) *dataMetric { +func newDataMetric(cvi *v1alpha2.ClusterVirtualImage) *dataMetric { if cvi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go index bfaa8555eb..f967050efa 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,18 +42,18 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricClusterVirtualImageStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.ImagePending + phase = v1alpha2.ImagePending } phases := []struct { value bool name string }{ - {phase == virtv2.ImagePending, string(virtv2.ImagePending)}, - {phase == virtv2.ImageWaitForUserUpload, string(virtv2.ImageWaitForUserUpload)}, - {phase == virtv2.ImageProvisioning, string(virtv2.ImageProvisioning)}, - {phase == virtv2.ImageReady, string(virtv2.ImageReady)}, - {phase == virtv2.ImageFailed, string(virtv2.ImageFailed)}, - {phase == virtv2.ImageTerminating, string(virtv2.ImageTerminating)}, + {phase == v1alpha2.ImagePending, string(v1alpha2.ImagePending)}, + {phase == v1alpha2.ImageWaitForUserUpload, string(v1alpha2.ImageWaitForUserUpload)}, + {phase == v1alpha2.ImageProvisioning, string(v1alpha2.ImageProvisioning)}, + {phase == v1alpha2.ImageReady, string(v1alpha2.ImageReady)}, + {phase == v1alpha2.ImageFailed, string(v1alpha2.ImageFailed)}, + {phase == v1alpha2.ImageTerminating, string(v1alpha2.ImageTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go index 02915444c4..61d0057d24 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/cvi/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects ClusterVirtualImage and create new DTO. // DO NOT mutate ClusterVirtualImage! func (l *iterator) Iter(ctx context.Context, h handler) error { - cvis := virtv2.ClusterVirtualImageList{} + cvis := v1alpha2.ClusterVirtualImageList{} if err := l.reader.List(ctx, &cvis, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go index 8d2cdd93e0..2b76f8c544 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/data_metric.go @@ -20,20 +20,20 @@ import ( "strings" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.DiskPhase + Phase v1alpha2.DiskPhase Labels map[string]string Annotations map[string]string } // DO NOT mutate VirtualDisk! -func newDataMetric(vd *virtv2.VirtualDisk) *dataMetric { +func newDataMetric(vd *v1alpha2.VirtualDisk) *dataMetric { if vd == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go index e452c146bf..4d32a01912 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -45,21 +45,21 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricDiskStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.DiskPending + phase = v1alpha2.DiskPending } phases := []struct { value bool name string }{ - {phase == virtv2.DiskPending, string(virtv2.DiskPending)}, - {phase == virtv2.DiskWaitForUserUpload, string(virtv2.DiskWaitForUserUpload)}, - {phase == virtv2.DiskWaitForFirstConsumer, string(virtv2.DiskWaitForFirstConsumer)}, - {phase == virtv2.DiskProvisioning, string(virtv2.DiskProvisioning)}, - {phase == virtv2.DiskFailed, string(virtv2.DiskFailed)}, - {phase == virtv2.DiskLost, string(virtv2.DiskLost)}, - {phase == virtv2.DiskReady, string(virtv2.DiskReady)}, - {phase == virtv2.DiskResizing, string(virtv2.DiskResizing)}, - {phase == virtv2.DiskTerminating, string(virtv2.DiskTerminating)}, + {phase == v1alpha2.DiskPending, string(v1alpha2.DiskPending)}, + {phase == v1alpha2.DiskWaitForUserUpload, string(v1alpha2.DiskWaitForUserUpload)}, + {phase == v1alpha2.DiskWaitForFirstConsumer, string(v1alpha2.DiskWaitForFirstConsumer)}, + {phase == v1alpha2.DiskProvisioning, string(v1alpha2.DiskProvisioning)}, + {phase == v1alpha2.DiskFailed, string(v1alpha2.DiskFailed)}, + {phase == v1alpha2.DiskLost, string(v1alpha2.DiskLost)}, + {phase == v1alpha2.DiskReady, string(v1alpha2.DiskReady)}, + {phase == v1alpha2.DiskResizing, string(v1alpha2.DiskResizing)}, + {phase == v1alpha2.DiskTerminating, string(v1alpha2.DiskTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go index 49bf9342ba..362438b6c2 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vd/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualDisk and create new DTO. // DO NOT mutate VirtualDisk! func (l *iterator) Iter(ctx context.Context, h handler) error { - vds := virtv2.VirtualDiskList{} + vds := v1alpha2.VirtualDiskList{} if err := l.reader.List(ctx, &vds, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go index dd625270ab..95928c3be7 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/data_metric.go @@ -17,26 +17,28 @@ limitations under the License. package vdsnapshot import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { - Name string - Namespace string - UID string - Phase virtv2.VirtualDiskSnapshotPhase + Name string + Namespace string + UID string + Phase v1alpha2.VirtualDiskSnapshotPhase + VirtualDisk string } // DO NOT mutate VirtualDiskSnapshot! -func newDataMetric(vds *virtv2.VirtualDiskSnapshot) *dataMetric { +func newDataMetric(vds *v1alpha2.VirtualDiskSnapshot) *dataMetric { if vds == nil { return nil } return &dataMetric{ - Name: vds.Name, - Namespace: vds.Namespace, - UID: string(vds.UID), - Phase: vds.Status.Phase, + Name: vds.Name, + Namespace: vds.Namespace, + UID: string(vds.UID), + Phase: vds.Status.Phase, + VirtualDisk: vds.Spec.VirtualDiskName, } } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/metrics.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/metrics.go index 81d8ae8ed2..9a2f881260 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/metrics.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/metrics.go @@ -24,6 +24,7 @@ import ( const ( MetricVDSnapshotStatusPhase = "virtualdisksnapshot_status_phase" + MetricVDSnapshotInfo = "virtualdisksnapshot_info" ) var baseLabels = []string{"name", "namespace", "uid"} @@ -52,4 +53,12 @@ var vdsnapshotMetrics = map[string]metrics.MetricInfo{ WithBaseLabels("phase"), nil, ), + + MetricVDSnapshotInfo: metrics.NewMetricInfo( + MetricVDSnapshotInfo, + "The virtualdisksnapshot virtualdisk name.", + prometheus.GaugeValue, + WithBaseLabels("virtualdisk"), + nil, + ), } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go index cd8642e71e..b4a8081df4 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -37,22 +37,23 @@ type scraper struct { func (s *scraper) Report(m *dataMetric) { s.updateMetricVDSnapshotStatusPhase(m) + s.updateMetricVDSnapshotInfo(m) } func (s *scraper) updateMetricVDSnapshotStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VirtualDiskSnapshotPhasePending + phase = v1alpha2.VirtualDiskSnapshotPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VirtualDiskSnapshotPhasePending, string(virtv2.VirtualDiskSnapshotPhasePending)}, - {phase == virtv2.VirtualDiskSnapshotPhaseInProgress, string(virtv2.VirtualDiskSnapshotPhaseInProgress)}, - {phase == virtv2.VirtualDiskSnapshotPhaseReady, string(virtv2.VirtualDiskSnapshotPhaseReady)}, - {phase == virtv2.VirtualDiskSnapshotPhaseFailed, string(virtv2.VirtualDiskSnapshotPhaseFailed)}, - {phase == virtv2.VirtualDiskSnapshotPhaseTerminating, string(virtv2.VirtualDiskSnapshotPhaseTerminating)}, + {phase == v1alpha2.VirtualDiskSnapshotPhasePending, string(v1alpha2.VirtualDiskSnapshotPhasePending)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseInProgress, string(v1alpha2.VirtualDiskSnapshotPhaseInProgress)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseReady, string(v1alpha2.VirtualDiskSnapshotPhaseReady)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseFailed, string(v1alpha2.VirtualDiskSnapshotPhaseFailed)}, + {phase == v1alpha2.VirtualDiskSnapshotPhaseTerminating, string(v1alpha2.VirtualDiskSnapshotPhaseTerminating)}, } for _, p := range phases { @@ -61,6 +62,10 @@ func (s *scraper) updateMetricVDSnapshotStatusPhase(m *dataMetric) { } } +func (s *scraper) updateMetricVDSnapshotInfo(m *dataMetric) { + s.defaultUpdate(MetricVDSnapshotInfo, 1, m, m.VirtualDisk) +} + func (s *scraper) defaultUpdate(descName string, value float64, m *dataMetric, labels ...string) { info := vdsnapshotMetrics[descName] metric, err := prometheus.NewConstMetric( diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go index a23bb3dc68..481caf09e0 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vdsnapshot/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualDiskSnapshot and create new DTO. // DO NOT mutate VirtualDiskSnapshot! func (l *iterator) Iter(ctx context.Context, h handler) error { - vdss := virtv2.VirtualDiskSnapshotList{} + vdss := v1alpha2.VirtualDiskSnapshotList{} if err := l.reader.List(ctx, &vdss, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go index d0b12d2f92..4a9478a728 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/data_metric.go @@ -17,18 +17,18 @@ limitations under the License. package vi import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.ImagePhase + Phase v1alpha2.ImagePhase } // DO NOT mutate VirtualImage! -func newDataMetric(vi *virtv2.VirtualImage) *dataMetric { +func newDataMetric(vi *v1alpha2.VirtualImage) *dataMetric { if vi == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go index eade3bb97a..bb58aa6656 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,19 +42,19 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVirtualImageStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.ImagePending + phase = v1alpha2.ImagePending } phases := []struct { value bool name string }{ - {phase == virtv2.ImagePending, string(virtv2.ImagePending)}, - {phase == virtv2.ImageWaitForUserUpload, string(virtv2.ImageWaitForUserUpload)}, - {phase == virtv2.ImageProvisioning, string(virtv2.ImageProvisioning)}, - {phase == virtv2.ImageReady, string(virtv2.ImageReady)}, - {phase == virtv2.ImageFailed, string(virtv2.ImageFailed)}, - {phase == virtv2.ImageTerminating, string(virtv2.ImageTerminating)}, - {phase == virtv2.ImageLost, string(virtv2.ImageLost)}, + {phase == v1alpha2.ImagePending, string(v1alpha2.ImagePending)}, + {phase == v1alpha2.ImageWaitForUserUpload, string(v1alpha2.ImageWaitForUserUpload)}, + {phase == v1alpha2.ImageProvisioning, string(v1alpha2.ImageProvisioning)}, + {phase == v1alpha2.ImageReady, string(v1alpha2.ImageReady)}, + {phase == v1alpha2.ImageFailed, string(v1alpha2.ImageFailed)}, + {phase == v1alpha2.ImageTerminating, string(v1alpha2.ImageTerminating)}, + {phase == v1alpha2.ImageLost, string(v1alpha2.ImageLost)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go index e3339bb777..732b4fc494 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vi/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualImage and create new DTO. // DO NOT mutate VirtualImage! func (l *iterator) Iter(ctx context.Context, h handler) error { - vis := virtv2.VirtualImageList{} + vis := v1alpha2.VirtualImageList{} if err := l.reader.List(ctx, &vis, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go index f7b385228c..04e8e24880 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/data_metric.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" ) @@ -33,7 +33,7 @@ type dataMetric struct { Namespace string Node string UID string - Phase virtv2.MachinePhase + Phase v1alpha2.MachinePhase CPUConfigurationCores float64 CPUConfigurationCoreFraction float64 CPUCores float64 @@ -44,15 +44,15 @@ type dataMetric struct { AwaitingRestartToApplyConfiguration bool ConfigurationApplied bool AgentReady bool - RunPolicy virtv2.RunPolicy - Pods []virtv2.VirtualMachinePod + RunPolicy v1alpha2.RunPolicy + Pods []v1alpha2.VirtualMachinePod Labels map[string]string Annotations map[string]string firmwareUpToDate bool } // DO NOT mutate VirtualMachine! -func newDataMetric(vm *virtv2.VirtualMachine) *dataMetric { +func newDataMetric(vm *v1alpha2.VirtualMachine) *dataMetric { if vm == nil { return nil } @@ -79,7 +79,7 @@ func newDataMetric(vm *virtv2.VirtualMachine) *dataMetric { firmwareUpToDateCondition, _ := conditions.GetCondition(vmcondition.TypeFirmwareUpToDate, vm.Status.Conditions) firmwareUpToDate = firmwareUpToDateCondition.Status != metav1.ConditionFalse - pods := make([]virtv2.VirtualMachinePod, len(vm.Status.VirtualMachinePods)) + pods := make([]v1alpha2.VirtualMachinePod, len(vm.Status.VirtualMachinePods)) for i, pod := range vm.Status.VirtualMachinePods { pods[i] = *pod.DeepCopy() } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go index c94d5445c0..573d90c9ef 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -58,21 +58,21 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVirtualMachineStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.MachinePending + phase = v1alpha2.MachinePending } phases := []struct { value bool name string }{ - {phase == virtv2.MachinePending, string(virtv2.MachinePending)}, - {phase == virtv2.MachineRunning, string(virtv2.MachineRunning)}, - {phase == virtv2.MachineDegraded, string(virtv2.MachineDegraded)}, - {phase == virtv2.MachineTerminating, string(virtv2.MachineTerminating)}, - {phase == virtv2.MachineStopped, string(virtv2.MachineStopped)}, - {phase == virtv2.MachineStopping, string(virtv2.MachineStopping)}, - {phase == virtv2.MachineStarting, string(virtv2.MachineStarting)}, - {phase == virtv2.MachineMigrating, string(virtv2.MachineMigrating)}, - {phase == virtv2.MachinePause, string(virtv2.MachinePause)}, + {phase == v1alpha2.MachinePending, string(v1alpha2.MachinePending)}, + {phase == v1alpha2.MachineRunning, string(v1alpha2.MachineRunning)}, + {phase == v1alpha2.MachineDegraded, string(v1alpha2.MachineDegraded)}, + {phase == v1alpha2.MachineTerminating, string(v1alpha2.MachineTerminating)}, + {phase == v1alpha2.MachineStopped, string(v1alpha2.MachineStopped)}, + {phase == v1alpha2.MachineStopping, string(v1alpha2.MachineStopping)}, + {phase == v1alpha2.MachineStarting, string(v1alpha2.MachineStarting)}, + {phase == v1alpha2.MachineMigrating, string(v1alpha2.MachineMigrating)}, + {phase == v1alpha2.MachinePause, string(v1alpha2.MachinePause)}, } for _, p := range phases { s.defaultUpdate(MetricVirtualMachineStatusPhase, @@ -135,10 +135,10 @@ func (s *scraper) updateMetricVirtualMachineConfigurationRunPolicy(m *dataMetric value bool name string }{ - {policy == virtv2.AlwaysOnPolicy, string(virtv2.AlwaysOnPolicy)}, - {policy == virtv2.AlwaysOffPolicy, string(virtv2.AlwaysOffPolicy)}, - {policy == virtv2.ManualPolicy, string(virtv2.ManualPolicy)}, - {policy == virtv2.AlwaysOnUnlessStoppedManually, string(virtv2.AlwaysOnUnlessStoppedManually)}, + {policy == v1alpha2.AlwaysOnPolicy, string(v1alpha2.AlwaysOnPolicy)}, + {policy == v1alpha2.AlwaysOffPolicy, string(v1alpha2.AlwaysOffPolicy)}, + {policy == v1alpha2.ManualPolicy, string(v1alpha2.ManualPolicy)}, + {policy == v1alpha2.AlwaysOnUnlessStoppedManually, string(v1alpha2.AlwaysOnUnlessStoppedManually)}, } for _, p := range policies { s.defaultUpdate(MetricVirtualMachineConfigurationRunPolicy, diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go index 7af2f9730b..b27e3feac6 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/virtualmachine/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachine and create new DTO. // DO NOT mutate VirtualMachine! func (l *iterator) Iter(ctx context.Context, h handler) error { - vms := virtv2.VirtualMachineList{} + vms := v1alpha2.VirtualMachineList{} if err := l.reader.List(ctx, &vms, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go index b07bb3b759..4518cbca1c 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/data_metric.go @@ -20,20 +20,20 @@ import ( "strings" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { Name string Namespace string UID string - Phase virtv2.BlockDeviceAttachmentPhase + Phase v1alpha2.BlockDeviceAttachmentPhase Labels map[string]string Annotations map[string]string } // DO NOT mutate VirtualMachineBlockDeviceAttachment! -func newDataMetric(vmbda *virtv2.VirtualMachineBlockDeviceAttachment) *dataMetric { +func newDataMetric(vmbda *v1alpha2.VirtualMachineBlockDeviceAttachment) *dataMetric { if vmbda == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go index 45139d73af..f17ae04fd6 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/scraper.go @@ -24,7 +24,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/monitoring/metrics/promutil" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -45,17 +45,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVMBDAStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.BlockDeviceAttachmentPhasePending + phase = v1alpha2.BlockDeviceAttachmentPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.BlockDeviceAttachmentPhasePending, string(virtv2.BlockDeviceAttachmentPhasePending)}, - {phase == virtv2.BlockDeviceAttachmentPhaseInProgress, string(virtv2.BlockDeviceAttachmentPhaseInProgress)}, - {phase == virtv2.BlockDeviceAttachmentPhaseAttached, string(virtv2.BlockDeviceAttachmentPhaseAttached)}, - {phase == virtv2.BlockDeviceAttachmentPhaseFailed, string(virtv2.BlockDeviceAttachmentPhaseFailed)}, - {phase == virtv2.BlockDeviceAttachmentPhaseTerminating, string(virtv2.BlockDeviceAttachmentPhaseTerminating)}, + {phase == v1alpha2.BlockDeviceAttachmentPhasePending, string(v1alpha2.BlockDeviceAttachmentPhasePending)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseInProgress, string(v1alpha2.BlockDeviceAttachmentPhaseInProgress)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseAttached, string(v1alpha2.BlockDeviceAttachmentPhaseAttached)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseFailed, string(v1alpha2.BlockDeviceAttachmentPhaseFailed)}, + {phase == v1alpha2.BlockDeviceAttachmentPhaseTerminating, string(v1alpha2.BlockDeviceAttachmentPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go index d1b861f866..f4762cce73 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmbda/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VMBDA and create new DTO. // DO NOT mutate VMBDA! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmbdas := virtv2.VirtualMachineBlockDeviceAttachmentList{} + vmbdas := v1alpha2.VirtualMachineBlockDeviceAttachmentList{} if err := l.reader.List(ctx, &vmbdas, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go index 08bbfa67e4..99246e0524 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/data_metric.go @@ -16,17 +16,17 @@ limitations under the License. package vmop -import virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" +import "github.com/deckhouse/virtualization/api/core/v1alpha2" type dataMetric struct { Name string Namespace string UID string - Phase virtv2.VMOPPhase + Phase v1alpha2.VMOPPhase } // DO NOT mutate VirtualMachineOperation! -func newDataMetric(vmop *virtv2.VirtualMachineOperation) *dataMetric { +func newDataMetric(vmop *v1alpha2.VirtualMachineOperation) *dataMetric { if vmop == nil { return nil } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go index c748e1ade7..e720e1da45 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -42,17 +42,17 @@ func (s *scraper) Report(m *dataMetric) { func (s *scraper) updateMetricVMOPStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VMOPPhasePending + phase = v1alpha2.VMOPPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VMOPPhasePending, string(virtv2.VMOPPhasePending)}, - {phase == virtv2.VMOPPhaseInProgress, string(virtv2.VMOPPhaseInProgress)}, - {phase == virtv2.VMOPPhaseCompleted, string(virtv2.VMOPPhaseCompleted)}, - {phase == virtv2.VMOPPhaseFailed, string(virtv2.VMOPPhaseFailed)}, - {phase == virtv2.VMOPPhaseTerminating, string(virtv2.VMOPPhaseTerminating)}, + {phase == v1alpha2.VMOPPhasePending, string(v1alpha2.VMOPPhasePending)}, + {phase == v1alpha2.VMOPPhaseInProgress, string(v1alpha2.VMOPPhaseInProgress)}, + {phase == v1alpha2.VMOPPhaseCompleted, string(v1alpha2.VMOPPhaseCompleted)}, + {phase == v1alpha2.VMOPPhaseFailed, string(v1alpha2.VMOPPhaseFailed)}, + {phase == v1alpha2.VMOPPhaseTerminating, string(v1alpha2.VMOPPhaseTerminating)}, } for _, p := range phases { diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go index cb3859edaa..4cd8a758bd 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmop/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachineOperation and create new DTO. // DO NOT mutate VirtualMachineOperation! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmops := virtv2.VirtualMachineOperationList{} + vmops := v1alpha2.VirtualMachineOperationList{} if err := l.reader.List(ctx, &vmops, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go index 2abcb69370..a21f34caba 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/data_metric.go @@ -17,26 +17,28 @@ limitations under the License. package vmsnapshot import ( - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) type dataMetric struct { - Name string - Namespace string - UID string - Phase virtv2.VirtualMachineSnapshotPhase + Name string + Namespace string + UID string + Phase v1alpha2.VirtualMachineSnapshotPhase + VirtualMachine string } // DO NOT mutate VirtualMachineSnapshot! -func newDataMetric(vms *virtv2.VirtualMachineSnapshot) *dataMetric { +func newDataMetric(vms *v1alpha2.VirtualMachineSnapshot) *dataMetric { if vms == nil { return nil } return &dataMetric{ - Name: vms.Name, - Namespace: vms.Namespace, - UID: string(vms.UID), - Phase: vms.Status.Phase, + Name: vms.Name, + Namespace: vms.Namespace, + UID: string(vms.UID), + Phase: vms.Status.Phase, + VirtualMachine: vms.Spec.VirtualMachineName, } } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/metrics.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/metrics.go index 1df4c19253..b6e763a907 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/metrics.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/metrics.go @@ -24,6 +24,7 @@ import ( const ( MetricVMSnapshotStatusPhase = "virtualmachinesnapshot_status_phase" + MetricVMSnapshotInfo = "virtualmachinesnapshot_info" ) var baseLabels = []string{"name", "namespace", "uid"} @@ -52,4 +53,12 @@ var vmsnapshotMetrics = map[string]metrics.MetricInfo{ WithBaseLabels("phase"), nil, ), + + MetricVMSnapshotInfo: metrics.NewMetricInfo( + MetricVMSnapshotInfo, + "The virtualmachinesnapshot virtualmachine name.", + prometheus.GaugeValue, + WithBaseLabels("virtualmachine"), + nil, + ), } diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go index fe67eb42f1..d1702c367d 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/scraper.go @@ -23,7 +23,7 @@ import ( "github.com/deckhouse/deckhouse/pkg/log" "github.com/deckhouse/virtualization-controller/pkg/common" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newScraper(ch chan<- prometheus.Metric, log *log.Logger) *scraper { @@ -37,22 +37,23 @@ type scraper struct { func (s *scraper) Report(m *dataMetric) { s.updateMetricVMSnapshotStatusPhase(m) + s.updateMetricVMSnapshotInfo(m) } func (s *scraper) updateMetricVMSnapshotStatusPhase(m *dataMetric) { phase := m.Phase if phase == "" { - phase = virtv2.VirtualMachineSnapshotPhasePending + phase = v1alpha2.VirtualMachineSnapshotPhasePending } phases := []struct { value bool name string }{ - {phase == virtv2.VirtualMachineSnapshotPhasePending, string(virtv2.VirtualMachineSnapshotPhasePending)}, - {phase == virtv2.VirtualMachineSnapshotPhaseInProgress, string(virtv2.VirtualMachineSnapshotPhaseInProgress)}, - {phase == virtv2.VirtualMachineSnapshotPhaseReady, string(virtv2.VirtualMachineSnapshotPhaseReady)}, - {phase == virtv2.VirtualMachineSnapshotPhaseFailed, string(virtv2.VirtualMachineSnapshotPhaseFailed)}, - {phase == virtv2.VirtualMachineSnapshotPhaseTerminating, string(virtv2.VirtualMachineSnapshotPhaseTerminating)}, + {phase == v1alpha2.VirtualMachineSnapshotPhasePending, string(v1alpha2.VirtualMachineSnapshotPhasePending)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseInProgress, string(v1alpha2.VirtualMachineSnapshotPhaseInProgress)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseReady, string(v1alpha2.VirtualMachineSnapshotPhaseReady)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseFailed, string(v1alpha2.VirtualMachineSnapshotPhaseFailed)}, + {phase == v1alpha2.VirtualMachineSnapshotPhaseTerminating, string(v1alpha2.VirtualMachineSnapshotPhaseTerminating)}, } for _, p := range phases { @@ -61,6 +62,10 @@ func (s *scraper) updateMetricVMSnapshotStatusPhase(m *dataMetric) { } } +func (s *scraper) updateMetricVMSnapshotInfo(m *dataMetric) { + s.defaultUpdate(MetricVMSnapshotInfo, 1, m, m.VirtualMachine) +} + func (s *scraper) defaultUpdate(descName string, value float64, m *dataMetric, labels ...string) { info := vmsnapshotMetrics[descName] metric, err := prometheus.NewConstMetric( diff --git a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go index df0c325c31..2282d59053 100644 --- a/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go +++ b/images/virtualization-artifact/pkg/monitoring/metrics/vmsnapshot/unsafe.go @@ -21,7 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) func newUnsafeIterator(reader client.Reader) *iterator { @@ -37,7 +37,7 @@ type iterator struct { // Iter implements iteration on objects VirtualMachineSnapshot and create new DTO. // DO NOT mutate VirtualMachineSnapshot! func (l *iterator) Iter(ctx context.Context, h handler) error { - vmss := virtv2.VirtualMachineSnapshotList{} + vmss := v1alpha2.VirtualMachineSnapshotList{} if err := l.reader.List(ctx, &vmss, client.UnsafeDisableDeepCopy); err != nil { return err } diff --git a/images/vm-route-forge/internal/netlinkmanager/manager.go b/images/vm-route-forge/internal/netlinkmanager/manager.go index 461dba76d0..b2a378232f 100644 --- a/images/vm-route-forge/internal/netlinkmanager/manager.go +++ b/images/vm-route-forge/internal/netlinkmanager/manager.go @@ -21,6 +21,8 @@ import ( "fmt" "net" "os" + "vm-route-forge/internal/netlinkwrap" + "vm-route-forge/internal/netutil" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/node/addressing" @@ -31,9 +33,7 @@ import ( vmipcache "vm-route-forge/internal/cache" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" - "vm-route-forge/internal/netlinkwrap" - "vm-route-forge/internal/netutil" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) const ( @@ -150,7 +150,7 @@ func (m *Manager) isManagedIP(ip string) (bool, error) { } // UpdateRoute updates route for a single VirtualMachine. -func (m *Manager) UpdateRoute(vm *virtv2.VirtualMachine, ciliumNode *ciliumv2.CiliumNode) error { +func (m *Manager) UpdateRoute(vm *v1alpha2.VirtualMachine, ciliumNode *ciliumv2.CiliumNode) error { // TODO Add cleanup if node was lost? // TODO What about migration? Is nodeName just changed to new node or we need some workarounds when 2 Pods are running? if vm == nil { diff --git a/oss.yaml b/oss.yaml new file mode 100644 index 0000000000..3f8764e6e8 --- /dev/null +++ b/oss.yaml @@ -0,0 +1,8 @@ +- name: KubeVirt + link: https://github.com/kubevirt/kubevirt + description: KubeVirt is a virtual machine management add-on for Kubernetes. + license: Apache License 2.0 +- name: CDI + link: https://github.com/kubevirt/containerized-data-importer + description: Containerized-Data-Importer (CDI) is a persistent storage management add-on for Kubernetes. + license: Apache License 2.0 diff --git a/src/cli/.golangci.yaml b/src/cli/.golangci.yaml index 0867b18310..1be21e2a37 100644 --- a/src/cli/.golangci.yaml +++ b/src/cli/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: subv1alpha2 + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/src/cli/internal/cmd/portforward/portforward.go b/src/cli/internal/cmd/portforward/portforward.go index 01c971b129..0bf4778187 100644 --- a/src/cli/internal/cmd/portforward/portforward.go +++ b/src/cli/internal/cmd/portforward/portforward.go @@ -30,7 +30,7 @@ import ( "k8s.io/klog/v2" virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" "github.com/deckhouse/virtualization/src/cli/internal/templates" ) @@ -121,23 +121,23 @@ func (o *PortForward) Run(cmd *cobra.Command, args []string) error { func (o *PortForward) prepareCommand(defaultNamespace string, args []string) (namespace, name string, ports []forwardedPort, err error) { namespace, name, err = templates.ParseTarget(args[0]) if err != nil { - return + return namespace, name, ports, err } ports, err = parsePorts(args[1:]) if err != nil { - return + return namespace, name, ports, err } if namespace == "" { namespace = defaultNamespace } - return + return namespace, name, ports, err } func (o *PortForward) startStdoutStream(namespace, name string, port forwardedPort) error { - streamer, err := o.resource.PortForward(name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + streamer, err := o.resource.PortForward(name, subv1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { return err } diff --git a/src/cli/internal/cmd/portforward/portforwarder.go b/src/cli/internal/cmd/portforward/portforwarder.go index 35cae757c0..0e8e7f5e99 100644 --- a/src/cli/internal/cmd/portforward/portforwarder.go +++ b/src/cli/internal/cmd/portforward/portforwarder.go @@ -27,7 +27,7 @@ import ( "k8s.io/klog/v2" virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) type portForwarder struct { @@ -37,7 +37,7 @@ type portForwarder struct { } type portforwardableResource interface { - PortForward(name string, options v1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) + PortForward(name string, options subv1alpha2.VirtualMachinePortForward) (virtualizationv1alpha2.StreamInterface, error) } func (p *portForwarder) startForwarding(address *net.IPAddr, port forwardedPort) error { diff --git a/src/cli/internal/cmd/portforward/tcp.go b/src/cli/internal/cmd/portforward/tcp.go index 736c9f89f8..2113413666 100644 --- a/src/cli/internal/cmd/portforward/tcp.go +++ b/src/cli/internal/cmd/portforward/tcp.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog/v2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) func (p *portForwarder) startForwardingTCP(address *net.IPAddr, port forwardedPort) error { @@ -53,7 +53,7 @@ func (p *portForwarder) waitForConnection(listener net.Listener, port forwardedP return } klog.Infof("opening new tcp tunnel to %d", port.remote) - stream, err := p.resource.PortForward(p.name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + stream, err := p.resource.PortForward(p.name, subv1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { klog.Errorf("can't access vm/%s.%s: %v", p.name, p.namespace, err) return diff --git a/src/cli/internal/cmd/portforward/udp.go b/src/cli/internal/cmd/portforward/udp.go index 9cdc8010f5..d20d5ad226 100644 --- a/src/cli/internal/cmd/portforward/udp.go +++ b/src/cli/internal/cmd/portforward/udp.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog/v2" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) const bufSize = 1500 @@ -47,7 +47,7 @@ func (p *portForwarder) startForwardingUDP(address *net.IPAddr, port forwardedPo listener: listener, remoteDialer: func() (net.Conn, error) { klog.Infof("opening new udp tunnel to %d", port.remote) - stream, err := p.resource.PortForward(p.name, v1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) + stream, err := p.resource.PortForward(p.name, subv1alpha2.VirtualMachinePortForward{Port: port.remote, Protocol: port.protocol}) if err != nil { klog.Errorf("can't access vm/%s.%s: %v", p.name, p.namespace, err) return nil, err diff --git a/src/cli/internal/cmd/ssh/native.go b/src/cli/internal/cmd/ssh/native.go index 829ae19a08..3d7dbae11f 100644 --- a/src/cli/internal/cmd/ssh/native.go +++ b/src/cli/internal/cmd/ssh/native.go @@ -32,7 +32,7 @@ import ( virtualizationv1alpha2 "github.com/deckhouse/virtualization/api/client/generated/clientset/versioned/typed/core/v1alpha2" "github.com/deckhouse/virtualization/api/client/kubeclient" - "github.com/deckhouse/virtualization/api/subresources/v1alpha2" + subv1alpha2 "github.com/deckhouse/virtualization/api/subresources/v1alpha2" ) func (o *SSH) nativeSSH(namespace, name string, virtClient kubeclient.Client) error { @@ -208,7 +208,7 @@ func (o *NativeSSHConnection) StartSession(client *ssh.Client, command string) e } func (o *NativeSSHConnection) prepareSSHTunnel(namespace, name string) (virtualizationv1alpha2.StreamInterface, error) { - opts := v1alpha2.VirtualMachinePortForward{ + opts := subv1alpha2.VirtualMachinePortForward{ Port: o.options.SSHPort, Protocol: "tcp", } diff --git a/src/cli/internal/templates/templates.go b/src/cli/internal/templates/templates.go index 0fda67a165..b48e5cb5c5 100644 --- a/src/cli/internal/templates/templates.go +++ b/src/cli/internal/templates/templates.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) // UsageTemplate returns the usage template for all subcommands @@ -97,7 +97,7 @@ func PrintWarningForPausedVM(ctx context.Context, virtCli kubeclient.Client, vmN if err != nil { return } - if vm.Status.Phase == virtv2.MachinePause { + if vm.Status.Phase == v1alpha2.MachinePause { _, _ = fmt.Fprintf(os.Stderr, "\rWarning: %s is paused. Console will be active after unpause.\n", vmName) } } diff --git a/tests/e2e/.golangci.yaml b/tests/e2e/.golangci.yaml index 6a3506df90..a7add3b595 100644 --- a/tests/e2e/.golangci.yaml +++ b/tests/e2e/.golangci.yaml @@ -44,6 +44,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: subv1alpha2 + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -82,3 +110,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/tests/e2e/Taskfile.yaml b/tests/e2e/Taskfile.yaml index d96cd9ca68..4a08f93ffe 100644 --- a/tests/e2e/Taskfile.yaml +++ b/tests/e2e/Taskfile.yaml @@ -56,7 +56,7 @@ tasks: {{if .TIMEOUT -}} --timeout={{ .TIMEOUT }} \ {{else -}} - --timeout=2h \ + --timeout=3h \ {{end -}} {{if .FOCUS -}} --focus "{{ .FOCUS }}" @@ -78,7 +78,7 @@ tasks: {{if .TIMEOUT -}} --timeout={{ .TIMEOUT }} \ {{else -}} - --timeout=2h \ + --timeout=3h \ {{end -}} {{if .FOCUS -}} --focus "{{ .FOCUS }}" diff --git a/tests/e2e/affinity_toleration_test.go b/tests/e2e/affinity_toleration_test.go index 59455c04b1..c7a671c653 100644 --- a/tests/e2e/affinity_toleration_test.go +++ b/tests/e2e/affinity_toleration_test.go @@ -28,7 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -71,7 +71,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -85,21 +85,21 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD }) It("checks the resources phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`VirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualMachineClasses` should be in %s phases", virtv2.ClassPhaseReady), func() { + By(fmt.Sprintf("`VirtualMachineClasses` should be in %s phases", v1alpha2.ClassPhaseReady), func() { WaitPhaseByLabel(kc.ResourceVMClass, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", virtv2.DiskReady), func() { + By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", v1alpha2.DiskReady), func() { WaitPhaseByLabel(kc.ResourceVD, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, @@ -119,10 +119,10 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD Context("When the virtual machines agents are ready", func() { It("checks the `status.nodeName` field of the `VirtualMachines`", func() { var ( - vmObjA = &virtv2.VirtualMachine{} - vmObjB = &virtv2.VirtualMachine{} - vmObjC = &virtv2.VirtualMachine{} - vmObjD = &virtv2.VirtualMachine{} + vmObjA = &v1alpha2.VirtualMachine{} + vmObjB = &v1alpha2.VirtualMachine{} + vmObjC = &v1alpha2.VirtualMachine{} + vmObjD = &v1alpha2.VirtualMachine{} err error ) By("Obtain the `VirtualMachine` objects", func() { @@ -167,15 +167,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObjC := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObjC.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObjC.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(LongWaitDuration).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -195,8 +195,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObjC := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmC) @@ -207,8 +207,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD By("Change anti-affinity to affinity when the `VirtualMachines` are runnning: `vm-a` and `vm-c` should be running on the same node", func() { wg := &sync.WaitGroup{} - updatedVMObjC := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) @@ -240,8 +240,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObjC = &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ + updatedVMObjC = &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineResource, vmObjC.Name, updatedVMObjC, kc.GetOptions{ Namespace: ns, }) if err != nil { @@ -286,7 +286,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD targetNode string err error ) - vmObj := &virtv2.VirtualMachine{} + vmObj := &v1alpha2.VirtualMachine{} By("Sets the `spec.nodeSelector` with the `status.nodeSelector` value", func() { vmObj, err = GetVirtualMachineObjByLabel(ns, vmNodeSelector) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -299,8 +299,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD }) By("The `VirtualMachine` should not be migrated", func() { time.Sleep(20 * time.Second) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -315,8 +315,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD By("Sets the `spec.nodeSelector` with `another node` value", func() { wg := &sync.WaitGroup{} - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -332,15 +332,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObj.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObj.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(Timeout).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -356,8 +356,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeSelector) @@ -375,7 +375,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD targetNode string err error ) - vmObj := &virtv2.VirtualMachine{} + vmObj := &v1alpha2.VirtualMachine{} By("Sets the `spec.affinity.nodeAffinity` with the `status.nodeSelector` value", func() { vmObj, err = GetVirtualMachineObjByLabel(ns, vmNodeAffinity) Expect(err).NotTo(HaveOccurred()) @@ -391,8 +391,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD }) By("The `VirtualMachine` should not be migrated", func() { time.Sleep(20 * time.Second) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -407,8 +407,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD By("Sets the `spec.affinity.nodeAffinity` with `another node` value", func() { wg := &sync.WaitGroup{} - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -427,15 +427,15 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD defer GinkgoRecover() defer wg.Done() Eventually(func() error { - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) if err != nil { return err } - if updatedVMObj.Status.Phase != virtv2.MachineMigrating { - return fmt.Errorf("the `VirtualMachine` should be %s", virtv2.MachineMigrating) + if updatedVMObj.Status.Phase != v1alpha2.MachineMigrating { + return fmt.Errorf("the `VirtualMachine` should be %s", v1alpha2.MachineMigrating) } return nil }).WithTimeout(Timeout).WithPolling(migratingStatusPollingInterval).Should(Succeed()) @@ -451,8 +451,8 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD Namespace: ns, Timeout: MaxWaitTimeout, }) - updatedVMObj := &virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ + updatedVMObj := &v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmObj.Name, updatedVMObj, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred(), "failed to obtain the %q `VirtualMachine` object", vmNodeAffinity) @@ -470,7 +470,7 @@ var _ = Describe("VirtualMachineAffinityAndToleration", framework.CommonE2ETestD }) }) -func ExpectVirtualMachineIsMigratable(vmObj *virtv2.VirtualMachine) { +func ExpectVirtualMachineIsMigratable(vmObj *v1alpha2.VirtualMachine) { GinkgoHelper() for _, c := range vmObj.Status.Conditions { if c.Type == string(vmcondition.TypeMigratable) { @@ -503,8 +503,8 @@ func DefineTargetNode(sourceNode string, targetLabel map[string]string) (string, return "", fmt.Errorf("failed to define a target node") } -func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*virtv2.VirtualMachine, error) { - vmObjects := virtv2.VirtualMachineList{} +func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*v1alpha2.VirtualMachine, error) { + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{ Labels: label, Namespace: namespace, @@ -519,7 +519,7 @@ func GetVirtualMachineObjByLabel(namespace string, label map[string]string) (*vi } func GenerateNodeAffinityPatch(key string, operator corev1.NodeSelectorOperator, values []string) ([]byte, error) { - vmAffinity := &virtv2.VMAffinity{ + vmAffinity := &v1alpha2.VMAffinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ @@ -545,8 +545,8 @@ func GenerateNodeAffinityPatch(key string, operator corev1.NodeSelectorOperator, } func GenerateVirtualMachineAndPodAntiAffinityPatch(key, topologyKey string, operator metav1.LabelSelectorOperator, values []string) ([]byte, error) { - vmAndPodAntiAffinity := &virtv2.VirtualMachineAndPodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []virtv2.VirtualMachineAndPodAffinityTerm{ + vmAndPodAntiAffinity := &v1alpha2.VirtualMachineAndPodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1alpha2.VirtualMachineAndPodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ @@ -570,8 +570,8 @@ func GenerateVirtualMachineAndPodAntiAffinityPatch(key, topologyKey string, oper } func GenerateVirtualMachineAndPodAffinityPatch(key, topologyKey string, operator metav1.LabelSelectorOperator, values []string) ([]byte, error) { - vmAndPodAffinity := &virtv2.VirtualMachineAndPodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []virtv2.VirtualMachineAndPodAffinityTerm{ + vmAndPodAffinity := &v1alpha2.VirtualMachineAndPodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1alpha2.VirtualMachineAndPodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ diff --git a/tests/e2e/complex_test.go b/tests/e2e/complex_test.go index dc87084bf5..138f620911 100644 --- a/tests/e2e/complex_test.go +++ b/tests/e2e/complex_test.go @@ -24,12 +24,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) +const VirtualMachineCount = 12 + var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), func() { var ( testCaseLabel = map[string]string{"testcase": "complex-test"} @@ -38,13 +40,12 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun notAlwaysOnLabel = map[string]string{"notAlwaysOn": "complex-test"} ns string phaseByVolumeBindingMode = GetPhaseByVolumeBindingModeForTemplateSc() - - f = framework.NewFramework("") + f = framework.NewFramework("") ) AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -80,7 +81,10 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun }) It("should fill empty virtualMachineClassName with the default class name", func() { - defaultVMLabels := testCaseLabel + defaultVMLabels := make(map[string]string, len(testCaseLabel)+1) + for k, v := range testCaseLabel { + defaultVMLabels[k] = v + } defaultVMLabels["vm"] = "default" res := kubectl.List(kc.ResourceVM, kc.GetOptions{ Labels: testCaseLabel, @@ -214,20 +218,21 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun Context("Verify that the virtual machines are stopping by VMOPs", func() { It("stops VMs by VMOPs", func() { - var vmList virtv2.VirtualMachineList + var vmList v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) Expect(err).ShouldNot(HaveOccurred()) + Expect(len(vmList.Items)).To(Equal(VirtualMachineCount)) for _, vmObj := range vmList.Items { - if vmObj.Spec.RunPolicy == virtv2.AlwaysOnPolicy { + if vmObj.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy { alwaysOnVMs = append(alwaysOnVMs, vmObj.Name) - alwaysOnVMStopVMOPs = append(alwaysOnVMStopVMOPs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(virtv2.VMOPTypeStop)))) + alwaysOnVMStopVMOPs = append(alwaysOnVMStopVMOPs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(v1alpha2.VMOPTypeStop)))) } else { notAlwaysOnVMs = append(notAlwaysOnVMs, vmObj.Name) - notAlwaysOnVMStopVMs = append(notAlwaysOnVMStopVMs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(virtv2.VMOPTypeStop)))) + notAlwaysOnVMStopVMs = append(notAlwaysOnVMStopVMs, fmt.Sprintf("%s-%s", vmObj.Name, strings.ToLower(string(v1alpha2.VMOPTypeStop)))) } } @@ -238,23 +243,23 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun }) It("checks VMOPs and VMs phases", func() { - By(fmt.Sprintf("AlwaysOn VM VMOPs should be in %s phases", virtv2.VMOPPhaseFailed)) - WaitResourcesByPhase(alwaysOnVMStopVMOPs, kc.ResourceVMOP, string(virtv2.VMOPPhaseFailed), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VM VMOPs should be in %s phases", v1alpha2.VMOPPhaseFailed)) + WaitResourcesByPhase(alwaysOnVMStopVMOPs, kc.ResourceVMOP, string(v1alpha2.VMOPPhaseFailed), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("Not AlwaysOn VM VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitResourcesByPhase(notAlwaysOnVMStopVMs, kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VM VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitResourcesByPhase(notAlwaysOnVMStopVMs, kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", virtv2.MachineRunning)) - WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(virtv2.MachineRunning), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", v1alpha2.MachineRunning)) + WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineRunning), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", virtv2.MachineStopped)) - WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", v1alpha2.MachineStopped)) + WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) @@ -273,16 +278,17 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun Context("Verify that the virtual machines are starting", func() { It("starts VMs by VMOP", func() { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Namespace: ns, Labels: testCaseLabel, }) Expect(err).NotTo(HaveOccurred()) + Expect(len(vms.Items)).To(Equal(VirtualMachineCount)) var notAlwaysOnVMs []string for _, vm := range vms.Items { - if vm.Spec.RunPolicy != virtv2.AlwaysOnPolicy { + if vm.Spec.RunPolicy != v1alpha2.AlwaysOnPolicy { notAlwaysOnVMs = append(notAlwaysOnVMs, vm.Name) } } @@ -291,8 +297,8 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -308,17 +314,18 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun Context("Verify that the virtual machines are stopping by ssh", func() { It("stops VMs by ssh", func() { - var vmList virtv2.VirtualMachineList + var vmList v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) Expect(err).ShouldNot(HaveOccurred()) + Expect(len(vmList.Items)).To(Equal(VirtualMachineCount)) alwaysOnVMs = []string{} notAlwaysOnVMs = []string{} for _, vmObj := range vmList.Items { - if vmObj.Spec.RunPolicy == virtv2.AlwaysOnPolicy { + if vmObj.Spec.RunPolicy == v1alpha2.AlwaysOnPolicy { alwaysOnVMs = append(alwaysOnVMs, vmObj.Name) } else { notAlwaysOnVMs = append(notAlwaysOnVMs, vmObj.Name) @@ -332,25 +339,25 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun }) It("checks VMs phases", func() { - By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", virtv2.MachineStopped)) - WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + By(fmt.Sprintf("Not AlwaysOn VMs should be in %s phases", v1alpha2.MachineStopped)) + WaitResourcesByPhase(notAlwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) - By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", virtv2.MachineRunning)) - WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(virtv2.MachineRunning), kc.WaitOptions{ + By(fmt.Sprintf("AlwaysOn VMs should be in %s phases", v1alpha2.MachineRunning)) + WaitResourcesByPhase(alwaysOnVMs, kc.ResourceVM, string(v1alpha2.MachineRunning), kc.WaitOptions{ Namespace: ns, Timeout: MaxWaitTimeout, }) }) It("start not AlwaysOn VMs", func() { - CreateAndApplyVMOPsWithSuffix(testCaseLabel, "-after-ssh-stopping", virtv2.VMOPTypeStart, ns, notAlwaysOnVMs...) + CreateAndApplyVMOPsWithSuffix(testCaseLabel, "-after-ssh-stopping", v1alpha2.VMOPTypeStart, ns, notAlwaysOnVMs...) }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -379,8 +386,8 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun }) It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -403,7 +410,7 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun go func() { defer GinkgoRecover() defer wg.Done() - WaitPhaseByLabel(kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + WaitPhaseByLabel(kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -442,7 +449,7 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun go func() { defer GinkgoRecover() defer wg.Done() - WaitPhaseByLabel(kc.ResourceVM, string(virtv2.MachineStopped), kc.WaitOptions{ + WaitPhaseByLabel(kc.ResourceVM, string(v1alpha2.MachineStopped), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -503,8 +510,8 @@ var _ = Describe("ComplexTest", Serial, framework.CommonE2ETestDecorators(), fun Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -572,7 +579,7 @@ func AssignIPToVMIP(f *framework.Framework, vmipNamespace, vmipName string) erro return fmt.Errorf("%s\n%w", assignErr, err) } - vmip := virtv2.VirtualMachineIPAddress{} + vmip := v1alpha2.VirtualMachineIPAddress{} err = GetObject(kc.ResourceVMIP, vmipName, &vmip, kc.GetOptions{ Namespace: vmipNamespace, }) diff --git a/tests/e2e/default_config.yaml b/tests/e2e/default_config.yaml index f1315c2d30..2493a8d3e5 100644 --- a/tests/e2e/default_config.yaml +++ b/tests/e2e/default_config.yaml @@ -48,13 +48,17 @@ logFilter: - "the server rejected our request due to an error in our request" # Err. - "failed to sync powerstate" # Msg. - "does not have a pvc reference" # "err": "kvvm head-345e7b6a-testcases-image-hotplug/head-345e7b6a-vm-image-hotplug spec volume vi-head-345e7b6a-vi-alpine-http does not have a pvc reference" - - "lastTransitionTime: Required value" # Err. - "virtualmachineipaddressleases.virtualization.deckhouse.io " - - "Forbidden: no new finalizers can be added if the object is being deleted, found new finalizers" - "Failed to watch" # error if virtualization-controller restarts during tests. "msg": "Failed to watch", "err": "Get \"http://127.0.0.1:23915/apis/virtualization.deckhouse.io/v1alpha2/virtualmachinerestores?allowWatchBookmarks=true\u0026resourceVersion=709816257\u0026timeoutSeconds=310\u0026watch=true\": context canceled" - "leader election lost" - "a virtual machine cannot be restored from the pending phase with `Forced` mode" # "err": "a virtual machine cannot be restored from the pending phase with `Forced` mode; you can delete the virtual machine and restore it with `Safe` mode" - - 'virtualMachineSnapshotSecret "" not found' # "msg": "virtualMachineSnapshotSecret \"\" not found" + - 'virtualMachineSnapshotSecret \"\" not found' # "msg": "virtualMachineSnapshotSecret \"\" not found" + - 'failed to sync migrating volumes: admission webhook \"virtualmachine-validator.kubevirt.io\" denied the request: spec.template.spec.domain.devices.disks' + - "in-flight migration detected" # "err": "admission webhook \"migration-create-validator.kubevirt.io\" denied the request: in-flight migration detected. Active migration job" + - "the target PersistentVolumeClaim name matched the source PersistentVolumeClaim name, please report a bug" + - "for now, migration of the rwo virtual disk is not allowed if the virtual machine has hot-plugged block devices" # {"level":"error","msg":"Error occurred during reconciliation","controller":"workload-updater-controller","err":"admission webhook \"vmop.virtualization-controller.validate.d8-virtualization\" denied the request: for now, migration of the rwo virtual disk is not allowed if the virtual machine has hot-plugged block devices","name":"example-1","namespace":"default","reconcileID":"d242d357-112b-4e75-a48f-a74ddb2c69af","time":"2025-10-02T21:19:56Z"} + - "Too many requests: limit reached on type Namespace for key" # {"level":"error","msg":"Server rejected event (will not retry!)","err":"Too many requests: limit reached on type Namespace for key head-05d62af18-end-to-end-complex-test","event":{"count":13,"eventTime":null,"firstTimestamp":"2025-10-02T22:10:10Z","involvedObject":{"apiVersion":"virtualization.deckhouse.io/v1alpha2","kind":"VirtualMachine","name":"head-05d62af18-vm-always-on","namespace":"head-05d62af18-end-to-end-complex-test","resourceVersion":"251246482","uid":"1ba665b0-bfd1-44d2-b5e1-0dc491e7b90a"},"lastTimestamp":"2025-10-02T22:10:22Z","message":"The virtual machine configuration successfully synced","metadata":{"creationTimestamp":null,"name":"head-05d62af18-vm-always-on.186acc188a870618","namespace":"head-05d62af18-end-to-end-complex-test","resourceVersion":"251246479"},"reason":"VirtualMachineSynced","reportingComponent":"vm-controller/VirtualMachineSynced","reportingInstance":"","source":{"component":"vm-controller/VirtualMachineSynced"},"type":"Normal"},"time":"2025-10-02T22:10:22Z"} + - "failed to detach" regexpLogFilter: - "failed to detach: .* not found" # "err" "failed to detach: virtualmachine.kubevirt.io \"head-497d17b-vm-automatic-with-hotplug\" not found", - "error patching .* not found" # "err" "error patching *** virtualimages.virtualization.deckhouse.io \"head-497d17b-vi-pvc-oref-vi-oref-vd\" not found", @@ -65,6 +69,8 @@ regexpLogFilter: - "Failed to update lock: .* leases.*leader-election-helper.*" # "msg": "ock: Operation cannot be fulfilled on leases.coordination.k8s.io \"d8-virt-operator-leader-election-helper\": the object has been modified; please apply your changes to the latest version and try again", - "failed to create VirtualMachineIPAddress .* the specified IP address .* has already been allocated and has not been released" # "err": "failed to create VirtualMachineIPAddress \"head-5d2c558-vm-restore-safe-tfv4w\": admission webhook \"vmip.virtualization-controller.validate.d8-virtualization\" denied the request: the VirtualMachineIPAddress cannot be created: the specified IP address 10.66.10.4 has already been allocated and has not been released" - "error retrieving resource lock .*leader-election-helper" # "msg": "error retrieving resource lock d8-virtualization/d8-virt-operator-leader-election-helper: context deadline exceeded", + - "persistentvolumeclaims .* is forbidden: unable to create new content in namespace .* because it is being terminated" # "err": "persistentvolumeclaims \"vd-f6fd1238-9cfe-4c22-bb48-cd64778740e3-\" is forbidden: unable to create new content in namespace virtualization-e2e-volume-migration-storage-class-changed-p8cxr because it is being terminated" + - "virtualmachineoperations.virtualization.deckhouse.io .* is forbidden: unable to create new content in namespace .* because it is being terminated" # "err": "virtualmachineoperations.virtualization.deckhouse.io \"volume-migration-\" is forbidden: unable to create new content in namespace virtualization-e2e-volume-migration-storage-class-changed-qsggq because it is being terminated" cleanupResources: - clustervirtualimages.virtualization.deckhouse.io diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index c6fd27b44f..453350d210 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -165,10 +165,7 @@ var _ = SynchronizedBeforeSuite(func() { } if !config.IsReusable() { - err := Cleanup() - if err != nil { - Expect(err).NotTo(HaveOccurred()) - } + Expect(Cleanup()).To(Succeed()) } else { log.Println("Run test in REUSABLE mode") } @@ -296,7 +293,7 @@ func (c *controllerRestartChecker) Check() error { for _, pod := range pods.Items { foundContainer := false for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.Name == VirtualizationController { + if containerStatus.Name == VirtualizationController && containerStatus.State.Running != nil { foundContainer = true if containerStatus.State.Running.StartedAt.After(c.startedAt.Time) { errs = errors.Join(errs, fmt.Errorf("the container %q was restarted: %s", VirtualizationController, pod.Name)) diff --git a/tests/e2e/framework/client.go b/tests/e2e/framework/client.go index fbbc974aa4..5014721afc 100644 --- a/tests/e2e/framework/client.go +++ b/tests/e2e/framework/client.go @@ -21,20 +21,18 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/exec" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization/api/client/kubeclient" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" dv1alpha1 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha1" dv1alpha2 "github.com/deckhouse/virtualization/tests/e2e/api/deckhouse/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/d8" gt "github.com/deckhouse/virtualization/tests/e2e/git" "github.com/deckhouse/virtualization/tests/e2e/kubectl" - - // register auth plugins - _ "k8s.io/client-go/plugin/pkg/client/auth/exec" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" ) var clients = Clients{} @@ -115,7 +113,7 @@ func init() { // The main reason is that we cannot use kubevirt types in tests because in DVP we use rewritten kubevirt types // use dynamic client for get kubevirt types for _, f := range []func(*apiruntime.Scheme) error{ - virtv2.AddToScheme, + v1alpha2.AddToScheme, clientgoscheme.AddToScheme, dv1alpha1.AddToScheme, dv1alpha2.AddToScheme, diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go index df5cf13300..d44fd0f4b9 100644 --- a/tests/e2e/framework/framework.go +++ b/tests/e2e/framework/framework.go @@ -81,7 +81,7 @@ func (f *Framework) Before() { if !f.skipNsCreation { ns, err := f.CreateNamespace(f.namespacePrefix, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By(fmt.Sprintf("Created namespace %s", ns.Name)) + ginkgo.By(fmt.Sprintf("Create namespace %s", ns.Name)) f.namespace = ns f.DeferNamespaceDelete(ns.Name) } @@ -150,8 +150,22 @@ func (f *Framework) DeferNamespaceDelete(name string) { f.namespacesToDelete[name] = struct{}{} } -func (f *Framework) DeferDelete(obj client.Object) { +func (f *Framework) DeferDelete(objs ...client.Object) { f.mu.Lock() defer f.mu.Unlock() - f.objectsToDelete[string(obj.GetUID())] = obj + + for _, obj := range objs { + f.objectsToDelete[string(obj.GetUID())] = obj + } +} + +func (f *Framework) BatchCreate(ctx context.Context, objs ...client.Object) error { + for _, obj := range objs { + err := f.client.Create(ctx, obj) + if err != nil { + return err + } + } + + return nil } diff --git a/tests/e2e/framework/timeout.go b/tests/e2e/framework/timeout.go index 8f1928637f..9fb04c65fc 100644 --- a/tests/e2e/framework/timeout.go +++ b/tests/e2e/framework/timeout.go @@ -16,27 +16,11 @@ limitations under the License. package framework -import ( - "os" - "time" +import "time" - "github.com/deckhouse/virtualization/tests/e2e/config" +const ( + ShortTimeout = 30 * time.Second + MiddleTimeout = 60 * time.Second + LongTimeout = 300 * time.Second + MaxTimeout = 600 * time.Second ) - -var ( - ShortTimeout = getTimeout(config.E2EShortTimeoutEnv, 30*time.Second) - MiddleTimeout = getTimeout(config.E2EMiddleTimeoutEnv, 60*time.Second) - LongTimeout = getTimeout(config.E2ELongTimeoutEnv, 300*time.Second) - MaxTimeout = getTimeout(config.E2EMaxTimeoutEnv, 600*time.Second) -) - -func getTimeout(env string, defaultTimeout time.Duration) time.Duration { - if e, ok := os.LookupEnv(env); ok { - t, err := time.ParseDuration(e) - if err != nil { - return defaultTimeout - } - return t - } - return defaultTimeout -} diff --git a/tests/e2e/image_hotplug_test.go b/tests/e2e/image_hotplug_test.go index a4d3f7410e..348b2b00c2 100644 --- a/tests/e2e/image_hotplug_test.go +++ b/tests/e2e/image_hotplug_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -44,7 +44,7 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { ) var ( - vmObj virtv2.VirtualMachine + vmObj v1alpha2.VirtualMachine disksBefore Disks disksAfter Disks testCaseLabel = map[string]string{"testcase": "image-hotplug"} @@ -69,7 +69,7 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -77,19 +77,19 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.ClusterVirtualImageResource: &Counter{ + v1alpha2.ClusterVirtualImageResource: &Counter{ Expected: cviCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -110,21 +110,21 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { }) It("checks the resources phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`VirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`ClusterVirtualImages` should be in the %q phase", virtv2.ImageReady), func() { + By(fmt.Sprintf("`ClusterVirtualImages` should be in the %q phase", v1alpha2.ImageReady), func() { WaitPhaseByLabel(kc.ResourceCVI, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualDisk` should be in the %q phase", virtv2.DiskReady), func() { + By(fmt.Sprintf("`VirtualDisk` should be in the %q phase", v1alpha2.DiskReady), func() { WaitPhaseByLabel(kc.ResourceVD, PhaseReady, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, @@ -146,8 +146,8 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { It("retrieves the test objects", func() { By("`VirtualMachine`", func() { - vmObjs := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vmObjs, kc.GetOptions{ + vmObjs := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vmObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -156,8 +156,8 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { vmObj = vmObjs.Items[0] }) By("`VirtualImages`", func() { - viObjs := &virtv2.VirtualImageList{} - err := GetObjects(virtv2.VirtualImageResource, viObjs, kc.GetOptions{ + viObjs := &v1alpha2.VirtualImageList{} + err := GetObjects(v1alpha2.VirtualImageResource, viObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -171,8 +171,8 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { } }) By("`ClusterVirtualImages`", func() { - cviObjs := &virtv2.ClusterVirtualImageList{} - err := GetObjects(virtv2.ClusterVirtualImageResource, cviObjs, kc.GetOptions{ + cviObjs := &v1alpha2.ClusterVirtualImageList{} + err := GetObjects(v1alpha2.ClusterVirtualImageResource, cviObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -196,13 +196,13 @@ var _ = Describe("ImageHotplug", framework.CommonE2ETestDecorators(), func() { It("attaches the images into the `VirtualMachine`", func() { for _, bd := range imageBlockDevices { By(bd.Name, func() { - AttachBlockDevice(ns, vmObj.Name, bd.Name, virtv2.VMBDAObjectRefKind(bd.Kind), testCaseLabel, conf.TestData.ImageHotplug) + AttachBlockDevice(ns, vmObj.Name, bd.Name, v1alpha2.VMBDAObjectRefKind(bd.Kind), testCaseLabel, conf.TestData.ImageHotplug) }) } }) It("checks the `VirtualMachine` and the `VirtualMachineBlockDeviceAttachments` phases", func() { - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachments` should be in the %q phase", virtv2.BlockDeviceAttachmentPhaseAttached), func() { + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachments` should be in the %q phase", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { WaitPhaseByLabel(kc.ResourceVMBDA, PhaseAttached, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, diff --git a/tests/e2e/images_creation_test.go b/tests/e2e/images_creation_test.go index ad548096e2..949b7a91b6 100644 --- a/tests/e2e/images_creation_test.go +++ b/tests/e2e/images_creation_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/helper" @@ -33,6 +33,7 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu var ( testCaseLabel = map[string]string{"testcase": "images-creation"} ns string + criticalError error ) BeforeAll(func() { @@ -49,7 +50,7 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu Expect(conf.StorageClass.ImmediateStorageClass).NotTo(BeNil(), "immediate storage class cannot be nil; please set up the immediate storage class in the cluster") - virtualDisk := virtv2.VirtualDisk{} + virtualDisk := v1alpha2.VirtualDisk{} vdFilePath := fmt.Sprintf("%s/vd/vd-alpine-http.yaml", conf.TestData.ImagesCreation) err = helper.UnmarshalResource(vdFilePath, &virtualDisk) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdFilePath, err) @@ -58,7 +59,7 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu err = helper.WriteYamlObject(vdFilePath, &virtualDisk) Expect(err).NotTo(HaveOccurred(), "cannot update virtual disk with custom storage class: %s\nstderr: %s", vdFilePath, err) - virtualDiskSnapshot := virtv2.VirtualDiskSnapshot{} + virtualDiskSnapshot := v1alpha2.VirtualDiskSnapshot{} vdSnapshotFilePath := fmt.Sprintf("%s/vdsnapshot/vdsnapshot.yaml", conf.TestData.ImagesCreation) err = helper.UnmarshalResource(vdSnapshotFilePath, &virtualDiskSnapshot) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdSnapshotFilePath, err) @@ -69,7 +70,13 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) + } + }) + + BeforeEach(func() { + if criticalError != nil { + Skip(fmt.Sprintf("Skip because blinking error: %s", criticalError.Error())) } }) @@ -85,8 +92,8 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu Context("When base virtual resources are ready", func() { It("checks VD phase", func() { - By(fmt.Sprintf("VD should be in %s phase", virtv2.DiskReady)) - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("VD should be in %s phase", v1alpha2.DiskReady)) + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -94,8 +101,8 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu }) It("checks VDSnapshot phase", func() { - By(fmt.Sprintf("VDSnapshot should be in %s phase", virtv2.VirtualDiskSnapshotPhaseReady)) - WaitPhaseByLabel(kc.ResourceVDSnapshot, string(virtv2.VirtualDiskSnapshotPhaseReady), kc.WaitOptions{ + By(fmt.Sprintf("VDSnapshot should be in %s phase", v1alpha2.VirtualDiskSnapshotPhaseReady)) + WaitPhaseByLabel(kc.ResourceVDSnapshot, string(v1alpha2.VirtualDiskSnapshotPhaseReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -105,17 +112,22 @@ var _ = Describe("VirtualImageCreation", framework.CommonE2ETestDecorators(), fu Context("When virtual images are applied", func() { It("checks VIs phases", func() { - By(fmt.Sprintf("VIs should be in %s phases", virtv2.ImageReady)) - WaitPhaseByLabel(kc.ResourceVI, string(virtv2.ImageReady), kc.WaitOptions{ - Labels: testCaseLabel, - Namespace: ns, - Timeout: MaxWaitTimeout, + By(fmt.Sprintf("VIs should be in %s phases", v1alpha2.ImageReady)) + err := InterceptGomegaFailure(func() { + WaitPhaseByLabel(kc.ResourceVI, string(v1alpha2.ImageReady), kc.WaitOptions{ + Labels: testCaseLabel, + Namespace: ns, + Timeout: MaxWaitTimeout, + }) }) + if err != nil { + criticalError = err + } }) It("checks CVIs phases", func() { - By(fmt.Sprintf("CVIs should be in %s phases", virtv2.ImageReady)) - WaitPhaseByLabel(kc.ResourceCVI, string(virtv2.ImageReady), kc.WaitOptions{ + By(fmt.Sprintf("CVIs should be in %s phases", v1alpha2.ImageReady)) + WaitPhaseByLabel(kc.ResourceCVI, string(v1alpha2.ImageReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, diff --git a/tests/e2e/importer_network_policy_test.go b/tests/e2e/importer_network_policy_test.go index 8c7cda85db..1a3b8996a1 100644 --- a/tests/e2e/importer_network_policy_test.go +++ b/tests/e2e/importer_network_policy_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -53,7 +53,7 @@ var _ = Describe("ImporterNetworkPolicy", framework.CommonE2ETestDecorators(), f AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -98,8 +98,8 @@ var _ = Describe("ImporterNetworkPolicy", framework.CommonE2ETestDecorators(), f Timeout: MaxWaitTimeout, }) }, - Entry("When virtual images are applied", "VI", kc.ResourceVI, string(virtv2.ImageReady)), - Entry("When virtual disks are applied", "VD", kc.ResourceVD, string(virtv2.DiskReady)), - Entry("When virtual machines are applied", "VM", kc.ResourceVM, string(virtv2.MachineRunning)), + Entry("When virtual images are applied", "VI", kc.ResourceVI, string(v1alpha2.ImageReady)), + Entry("When virtual disks are applied", "VD", kc.ResourceVD, string(v1alpha2.DiskReady)), + Entry("When virtual machines are applied", "VM", kc.ResourceVM, string(v1alpha2.MachineRunning)), ) }) diff --git a/tests/e2e/ipam_test.go b/tests/e2e/ipam_test.go index 0cc393cbf1..d750f0e36b 100644 --- a/tests/e2e/ipam_test.go +++ b/tests/e2e/ipam_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmipcondition" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmiplcondition" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -42,7 +42,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { ns string ctx context.Context cancel context.CancelFunc - vmip *virtv2.VirtualMachineIPAddress + vmip *v1alpha2.VirtualMachineIPAddress virtClient = framework.GetClients().VirtClient() ) @@ -65,13 +65,13 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { BeforeEach(func() { ctx, cancel = context.WithTimeout(context.Background(), 50*time.Second) - vmip = &virtv2.VirtualMachineIPAddress{ + vmip = &v1alpha2.VirtualMachineIPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vmip", Namespace: ns, }, - Spec: virtv2.VirtualMachineIPAddressSpec{ - Type: virtv2.VirtualMachineIPAddressTypeAuto, + Spec: v1alpha2.VirtualMachineIPAddressSpec{ + Type: v1alpha2.VirtualMachineIPAddressTypeAuto, }, } }) @@ -94,7 +94,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { Expect(err).NotTo(HaveOccurred()) By("Wait for the label to be restored by the controller") - lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddressLease) (bool, error) { + lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { return e.Labels["virtualization.deckhouse.io/virtual-machine-ip-address-uid"] == string(vmipAuto.UID), nil }) vmipAuto, err = virtClient.VirtualMachineIPAddresses(vmipAuto.Namespace).Get(ctx, vmipAuto.Name, metav1.GetOptions{}) @@ -114,7 +114,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { By("Delete the intermediate vmip automatically and check that the lease is released") DeleteResource(ctx, intermediate) - lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddressLease) (bool, error) { + lease = WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { boundCondition, err := GetCondition(vmiplcondition.BoundType.String(), e) Expect(err).NotTo(HaveOccurred()) return boundCondition.Reason == vmiplcondition.Released.String() && boundCondition.ObservedGeneration == e.Generation, nil @@ -124,7 +124,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { By("Reuse the released lease with a static vmip") vmipStatic := vmip.DeepCopy() vmipStatic.Name += "-static" - vmipStatic.Spec.Type = virtv2.VirtualMachineIPAddressTypeStatic + vmipStatic.Spec.Type = v1alpha2.VirtualMachineIPAddressTypeStatic vmipStatic.Spec.StaticIP = intermediate.Status.Address vmipStatic, lease = CreateVirtualMachineIPAddress(ctx, vmipStatic) ExpectToBeBound(vmipStatic, lease) @@ -135,7 +135,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { go func() { defer close(wait) defer GinkgoRecover() - WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(eType watch.EventType, _ *virtv2.VirtualMachineIPAddressLease) (bool, error) { + WaitForVirtualMachineIPAddressLease(ctx, lease.Name, func(eType watch.EventType, _ *v1alpha2.VirtualMachineIPAddressLease) (bool, error) { return eType == watch.Deleted, nil }) }() @@ -147,7 +147,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { vmipStatic = vmip.DeepCopy() vmipStatic.Name += "-one-more-static" - vmipStatic.Spec.Type = virtv2.VirtualMachineIPAddressTypeStatic + vmipStatic.Spec.Type = v1alpha2.VirtualMachineIPAddressTypeStatic vmipStatic.Spec.StaticIP = intermediate.Status.Address vmipStatic, lease = CreateVirtualMachineIPAddress(ctx, vmipStatic) ExpectToBeBound(vmipStatic, lease) @@ -159,7 +159,7 @@ var _ = Describe("IPAM", framework.CommonE2ETestDecorators(), func() { }) }) -func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h EventHandler[*virtv2.VirtualMachineIPAddress]) *virtv2.VirtualMachineIPAddress { +func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h EventHandler[*v1alpha2.VirtualMachineIPAddress]) *v1alpha2.VirtualMachineIPAddress { GinkgoHelper() vmip, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualMachineIPAddresses(ns), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), @@ -168,7 +168,7 @@ func WaitForVirtualMachineIPAddress(ctx context.Context, ns, name string, h Even return vmip } -func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h EventHandler[*virtv2.VirtualMachineIPAddressLease]) *virtv2.VirtualMachineIPAddressLease { +func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h EventHandler[*v1alpha2.VirtualMachineIPAddressLease]) *v1alpha2.VirtualMachineIPAddressLease { GinkgoHelper() lease, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualMachineIPAddressLeases(), h, metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), @@ -177,12 +177,12 @@ func WaitForVirtualMachineIPAddressLease(ctx context.Context, name string, h Eve return lease } -func CreateVirtualMachineIPAddress(ctx context.Context, vmip *virtv2.VirtualMachineIPAddress) (*virtv2.VirtualMachineIPAddress, *virtv2.VirtualMachineIPAddressLease) { +func CreateVirtualMachineIPAddress(ctx context.Context, vmip *v1alpha2.VirtualMachineIPAddress) (*v1alpha2.VirtualMachineIPAddress, *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() CreateResource(ctx, vmip) - vmip = WaitForVirtualMachineIPAddress(ctx, vmip.Namespace, vmip.Name, func(_ watch.EventType, e *virtv2.VirtualMachineIPAddress) (bool, error) { - return e.Status.Phase == virtv2.VirtualMachineIPAddressPhaseBound, nil + vmip = WaitForVirtualMachineIPAddress(ctx, vmip.Namespace, vmip.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachineIPAddress) (bool, error) { + return e.Status.Phase == v1alpha2.VirtualMachineIPAddressPhaseBound, nil }) lease, err := framework.GetClients().VirtClient().VirtualMachineIPAddressLeases().Get(ctx, ipAddressToLeaseName(vmip.Status.Address), metav1.GetOptions{}) @@ -191,7 +191,7 @@ func CreateVirtualMachineIPAddress(ctx context.Context, vmip *virtv2.VirtualMach return vmip, lease } -func ExpectToBeReleased(lease *virtv2.VirtualMachineIPAddressLease) { +func ExpectToBeReleased(lease *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() boundCondition, err := GetCondition(vmiplcondition.BoundType.String(), lease) @@ -199,10 +199,10 @@ func ExpectToBeReleased(lease *virtv2.VirtualMachineIPAddressLease) { Expect(boundCondition.Status).To(Equal(metav1.ConditionFalse)) Expect(boundCondition.Reason).To(Equal(vmiplcondition.Released.String())) Expect(boundCondition.ObservedGeneration).To(Equal(lease.Generation)) - Expect(lease.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressLeasePhaseReleased)) + Expect(lease.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressLeasePhaseReleased)) } -func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.VirtualMachineIPAddressLease) { +func ExpectToBeBound(vmip *v1alpha2.VirtualMachineIPAddress, lease *v1alpha2.VirtualMachineIPAddressLease) { GinkgoHelper() // 1. Check vmip to be Bound. @@ -212,7 +212,7 @@ func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.Virtual Expect(boundCondition.Reason).To(Equal(vmipcondition.Bound.String())) Expect(boundCondition.ObservedGeneration).To(Equal(vmip.Generation)) - Expect(vmip.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressPhaseBound)) + Expect(vmip.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressPhaseBound)) Expect(vmip.Status.Address).NotTo(BeEmpty()) Expect(ipAddressToLeaseName(vmip.Status.Address)).To(Equal(lease.Name)) @@ -223,7 +223,7 @@ func ExpectToBeBound(vmip *virtv2.VirtualMachineIPAddress, lease *virtv2.Virtual Expect(boundCondition.Reason).To(Equal(vmiplcondition.Bound.String())) Expect(boundCondition.ObservedGeneration).To(Equal(lease.Generation)) - Expect(lease.Status.Phase).To(Equal(virtv2.VirtualMachineIPAddressLeasePhaseBound)) + Expect(lease.Status.Phase).To(Equal(v1alpha2.VirtualMachineIPAddressLeasePhaseBound)) Expect(lease.Labels["virtualization.deckhouse.io/virtual-machine-ip-address-uid"]).To(Equal(string(vmip.UID))) Expect(lease.Spec.VirtualMachineIPAddressRef).NotTo(BeNil()) Expect(lease.Spec.VirtualMachineIPAddressRef.Name).To(Equal(vmip.Name)) diff --git a/tests/e2e/kubectl/kubectl.go b/tests/e2e/kubectl/kubectl.go index f23d6dba2d..cd9cca0d60 100644 --- a/tests/e2e/kubectl/kubectl.go +++ b/tests/e2e/kubectl/kubectl.go @@ -93,11 +93,12 @@ type DeleteOptions struct { } type GetOptions struct { - ExcludedLabels []string - IgnoreNotFound bool - Labels map[string]string - Namespace string - Output string + ExcludedLabels []string + IgnoreNotFound bool + Labels map[string]string + Namespace string + Output string + ShowManagedFields bool } type LogOptions struct { @@ -368,6 +369,13 @@ func (k KubectlCMD) addFollow(cmd string, follow bool) string { return cmd } +func (k KubectlCMD) addShowManagedFields(cmd string, showManagedFields bool) string { + if showManagedFields { + return fmt.Sprintf("%s --show-managed-fields=true", cmd) + } + return cmd +} + func (k KubectlCMD) applyOptions(cmd string, opts ApplyOptions) string { var resourceEmptyValue Resource = "" cmd = k.addFilenameOptions(cmd, resourceEmptyValue, opts.FilenameOption, opts.Recursive, opts.Filename...) @@ -387,6 +395,7 @@ func (k KubectlCMD) getOptions(cmd string, opts GetOptions) string { cmd = k.addOutput(cmd, opts.Output) cmd = k.addIgnoreNotFound(cmd, opts.IgnoreNotFound) cmd = k.addLabels(cmd, opts.Labels, opts.ExcludedLabels) + cmd = k.addShowManagedFields(cmd, opts.ShowManagedFields) return cmd } diff --git a/tests/e2e/network/cilium_agents.go b/tests/e2e/network/cilium_agents.go index a7fee0dd9d..a03b14fbd3 100644 --- a/tests/e2e/network/cilium_agents.go +++ b/tests/e2e/network/cilium_agents.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" ) @@ -81,12 +81,12 @@ func CheckCiliumAgents(ctx context.Context, kubectl kc.Kubectl, vmName, vmNamesp } func getVMInfo(kubectl kc.Kubectl, vmName, vmNamespace string) (string, string, error) { - result := kubectl.GetResource(virtv2.VirtualMachineResource, vmName, kc.GetOptions{Namespace: vmNamespace, Output: "json"}) + result := kubectl.GetResource(v1alpha2.VirtualMachineResource, vmName, kc.GetOptions{Namespace: vmNamespace, Output: "json"}) if result.Error() != nil { return "", "", fmt.Errorf("failed to get VM: %w", result.Error()) } - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine if err := json.Unmarshal([]byte(result.StdOut()), &vm); err != nil { return "", "", fmt.Errorf("failed to parse VM JSON: %w", err) } diff --git a/tests/e2e/object/cvi.go b/tests/e2e/object/cvi.go index 56f9e52f1c..82d56d81fe 100644 --- a/tests/e2e/object/cvi.go +++ b/tests/e2e/object/cvi.go @@ -18,10 +18,10 @@ package object import ( "github.com/deckhouse/virtualization-controller/pkg/builder/cvi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewHTTPCVIUbuntu(name string) *virtv2.ClusterVirtualImage { +func NewHTTPCVIUbuntu(name string) *v1alpha2.ClusterVirtualImage { return cvi.New( cvi.WithName(name), cvi.WithDataSourceHTTP( @@ -32,7 +32,7 @@ func NewHTTPCVIUbuntu(name string) *virtv2.ClusterVirtualImage { ) } -func NewGenerateHTTPCVIUbuntu(prefix string) *virtv2.ClusterVirtualImage { +func NewGenerateHTTPCVIUbuntu(prefix string) *v1alpha2.ClusterVirtualImage { return cvi.New( cvi.WithGenerateName(prefix), cvi.WithDataSourceHTTP( diff --git a/tests/e2e/object/vd.go b/tests/e2e/object/vd.go index 3475101631..3f00a4067d 100644 --- a/tests/e2e/object/vd.go +++ b/tests/e2e/object/vd.go @@ -20,10 +20,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/builder/vd" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewGeneratedVDFromCVI(prefix, namespace string, cvi *virtv2.ClusterVirtualImage) *virtv2.VirtualDisk { +func NewGeneratedVDFromCVI(prefix, namespace string, cvi *v1alpha2.ClusterVirtualImage) *v1alpha2.VirtualDisk { return vd.New( vd.WithGenerateName(prefix), vd.WithNamespace(namespace), @@ -31,7 +31,7 @@ func NewGeneratedVDFromCVI(prefix, namespace string, cvi *virtv2.ClusterVirtualI ) } -func NewVDFromCVI(name, namespace string, cvi *virtv2.ClusterVirtualImage) *virtv2.VirtualDisk { +func NewVDFromCVI(name, namespace string, cvi *v1alpha2.ClusterVirtualImage) *v1alpha2.VirtualDisk { return vd.New( vd.WithName(name), vd.WithNamespace(namespace), @@ -39,7 +39,7 @@ func NewVDFromCVI(name, namespace string, cvi *virtv2.ClusterVirtualImage) *virt ) } -func NewGeneratedVDFromVI(prefix, namespace string, vi *virtv2.VirtualImage) *virtv2.VirtualDisk { +func NewGeneratedVDFromVI(prefix, namespace string, vi *v1alpha2.VirtualImage) *v1alpha2.VirtualDisk { return vd.New( vd.WithGenerateName(prefix), vd.WithNamespace(namespace), @@ -47,7 +47,7 @@ func NewGeneratedVDFromVI(prefix, namespace string, vi *virtv2.VirtualImage) *vi ) } -func NewVDFromVI(name, namespace string, vi *virtv2.VirtualImage) *virtv2.VirtualDisk { +func NewVDFromVI(name, namespace string, vi *v1alpha2.VirtualImage) *v1alpha2.VirtualDisk { return vd.New( vd.WithName(name), vd.WithNamespace(namespace), @@ -55,7 +55,7 @@ func NewVDFromVI(name, namespace string, vi *virtv2.VirtualImage) *virtv2.Virtua ) } -func NewBlankVD(name, namespace string, storageClass *string, size *resource.Quantity) *virtv2.VirtualDisk { +func NewBlankVD(name, namespace string, storageClass *string, size *resource.Quantity) *v1alpha2.VirtualDisk { return vd.New( vd.WithName(name), vd.WithNamespace(namespace), @@ -63,14 +63,12 @@ func NewBlankVD(name, namespace string, storageClass *string, size *resource.Qua ) } -func NewGeneratedHTTPVDUbuntu(prefix, namespace string) *virtv2.VirtualDisk { +func NewGeneratedHTTPVDUbuntu(prefix, namespace string) *v1alpha2.VirtualDisk { return vd.New( vd.WithGenerateName(prefix), vd.WithNamespace(namespace), - vd.WithDataSourceHTTP( - UbuntuHTTP, - nil, - nil, - ), + vd.WithDataSourceHTTP(&v1alpha2.DataSourceHTTP{ + URL: UbuntuHTTP, + }), ) } diff --git a/tests/e2e/object/vi.go b/tests/e2e/object/vi.go index 1e4e63caa5..4b716c9d35 100644 --- a/tests/e2e/object/vi.go +++ b/tests/e2e/object/vi.go @@ -18,10 +18,10 @@ package object import ( "github.com/deckhouse/virtualization-controller/pkg/builder/vi" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewHTTPVIUbuntu(name string) *virtv2.VirtualImage { +func NewHTTPVIUbuntu(name string) *v1alpha2.VirtualImage { return vi.New( vi.WithName(name), vi.WithDataSourceHTTP( @@ -32,7 +32,7 @@ func NewHTTPVIUbuntu(name string) *virtv2.VirtualImage { ) } -func NewGeneratedHTTPVIUbuntu(prefix string) *virtv2.VirtualImage { +func NewGeneratedHTTPVIUbuntu(prefix string) *v1alpha2.VirtualImage { return vi.New( vi.WithGenerateName(prefix), vi.WithDataSourceHTTP( @@ -40,6 +40,6 @@ func NewGeneratedHTTPVIUbuntu(prefix string) *virtv2.VirtualImage { nil, nil, ), - vi.WithStorage(virtv2.StorageContainerRegistry), + vi.WithStorage(v1alpha2.StorageContainerRegistry), ) } diff --git a/tests/e2e/object/vm.go b/tests/e2e/object/vm.go index 2c678ee8ab..442d268c06 100644 --- a/tests/e2e/object/vm.go +++ b/tests/e2e/object/vm.go @@ -21,16 +21,16 @@ import ( "k8s.io/utils/ptr" "github.com/deckhouse/virtualization-controller/pkg/builder/vm" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -func NewMinimalVM(prefix, namespace string, opts ...vm.Option) *virtv2.VirtualMachine { +func NewMinimalVM(prefix, namespace string, opts ...vm.Option) *v1alpha2.VirtualMachine { baseOpts := []vm.Option{ vm.WithGenerateName(prefix), vm.WithNamespace(namespace), vm.WithCPU(1, ptr.To("100%")), vm.WithMemory(*resource.NewQuantity(Mi256, resource.BinarySI)), - vm.WithLiveMigrationPolicy(virtv2.AlwaysSafeMigrationPolicy), + vm.WithLiveMigrationPolicy(v1alpha2.AlwaysSafeMigrationPolicy), vm.WithVirtualMachineClass(DefaultVMClass), vm.WithProvisioningUserData(DefaultCloudInit), } diff --git a/tests/e2e/object/vmbda.go b/tests/e2e/object/vmbda.go new file mode 100644 index 0000000000..b148caa7a6 --- /dev/null +++ b/tests/e2e/object/vmbda.go @@ -0,0 +1,33 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/deckhouse/virtualization-controller/pkg/builder/vmbda" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func NewVMBDAFromDisk(name, vmName string, vd *v1alpha2.VirtualDisk, opts ...vmbda.Option) *v1alpha2.VirtualMachineBlockDeviceAttachment { + bda := vmbda.New( + vmbda.WithName(name), + vmbda.WithNamespace(vd.Namespace), + vmbda.WithVirtualMachineName(vmName), + vmbda.WithBlockDeviceRef(v1alpha2.VMBDAObjectRefKindVirtualDisk, vd.Name), + ) + vmbda.ApplyOptions(bda, opts...) + return bda +} diff --git a/tests/e2e/sizing_policy_test.go b/tests/e2e/sizing_policy_test.go index bc6cfb1bf1..b88fe82423 100644 --- a/tests/e2e/sizing_policy_test.go +++ b/tests/e2e/sizing_policy_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -72,7 +72,7 @@ var _ = Describe("SizingPolicy", framework.CommonE2ETestDecorators(), func() { AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -185,7 +185,7 @@ var _ = Describe("SizingPolicy", framework.CommonE2ETestDecorators(), func() { }) It("creates new `VirtualMachineClass`", func() { - vmClass := virtv2.VirtualMachineClass{} + vmClass := v1alpha2.VirtualMachineClass{} err := GetObject(kc.ResourceVMClass, vmClassDiscovery, &vmClass, kc.GetOptions{}) Expect(err).NotTo(HaveOccurred()) vmClass.Name = vmClassDiscoveryCopy @@ -222,13 +222,13 @@ var _ = Describe("SizingPolicy", framework.CommonE2ETestDecorators(), func() { Expect(res.Error()).NotTo(HaveOccurred(), res.StdErr()) vms := strings.Split(res.StdOut(), " ") - vmClass := virtv2.VirtualMachineClass{} + vmClass := v1alpha2.VirtualMachineClass{} err := GetObject(kc.ResourceVMClass, vmClassDiscovery, &vmClass, kc.GetOptions{}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms { By(fmt.Sprintf("Check virtual machine: %s", vm)) - vmObj := virtv2.VirtualMachine{} + vmObj := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vm, &vmObj, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred()) ValidateVirtualMachineByClass(&vmClass, &vmObj) @@ -246,8 +246,8 @@ var _ = Describe("SizingPolicy", framework.CommonE2ETestDecorators(), func() { }) }) -func ValidateVirtualMachineByClass(virtualMachineClass *virtv2.VirtualMachineClass, virtualMachine *virtv2.VirtualMachine) { - var sizingPolicy virtv2.SizingPolicy +func ValidateVirtualMachineByClass(virtualMachineClass *v1alpha2.VirtualMachineClass, virtualMachine *v1alpha2.VirtualMachine) { + var sizingPolicy v1alpha2.SizingPolicy for _, p := range virtualMachineClass.Spec.SizingPolicies { if virtualMachine.Spec.CPU.Cores >= p.Cores.Min && virtualMachine.Spec.CPU.Cores <= p.Cores.Max { sizingPolicy = *p.DeepCopy() @@ -262,13 +262,13 @@ func ValidateVirtualMachineByClass(virtualMachineClass *virtv2.VirtualMachineCla coreFraction, err := strconv.Atoi(strings.ReplaceAll(virtualMachine.Spec.CPU.CoreFraction, "%", "")) Expect(err).NotTo(HaveOccurred(), "cannot convert CoreFraction value to integer: %s", err) - checkCoreFraction := slices.Contains(sizingPolicy.CoreFractions, virtv2.CoreFractionValue(coreFraction)) + checkCoreFraction := slices.Contains(sizingPolicy.CoreFractions, v1alpha2.CoreFractionValue(coreFraction)) Expect(checkCoreFraction).To(BeTrue(), fmt.Errorf("sizing policy core fraction list does not contain value from spec: %s\n%v", virtualMachine.Spec.CPU.CoreFraction, sizingPolicy.CoreFractions)) } func CompareVirtualMachineClassReadyStatus(vmNamespace, vmName string, expectedStatus metav1.ConditionStatus) { GinkgoHelper() - vm := virtv2.VirtualMachine{} + vm := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vm, kc.GetOptions{Namespace: vmNamespace}) Expect(err).NotTo(HaveOccurred(), "%v", err) status, err := GetConditionStatus(&vm, vmcondition.TypeClassReady.String()) diff --git a/tests/e2e/storage/volume_migration_local_disks.go b/tests/e2e/storage/volume_migration_local_disks.go index 5ba1558690..4ba49c787a 100644 --- a/tests/e2e/storage/volume_migration_local_disks.go +++ b/tests/e2e/storage/volume_migration_local_disks.go @@ -27,20 +27,23 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" vmopbuilder "github.com/deckhouse/virtualization-controller/pkg/builder/vmop" "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2/vmopcondition" "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/object" "github.com/deckhouse/virtualization/tests/e2e/util" ) -var _ = SIGDescribe("Volume migration with local disks", framework.CommonE2ETestDecorators(), func() { +var _ = SIGDescribe("LocalVirtualDiskMigration", framework.CommonE2ETestDecorators(), func() { var ( f = framework.NewFramework("volume-migration-local-disks") storageClass *storagev1.StorageClass @@ -48,6 +51,9 @@ var _ = SIGDescribe("Volume migration with local disks", framework.CommonE2ETest ) BeforeEach(func() { + // TODO: Remove Skip after fixing the issue. + Skip("This test case is not working everytime. Should be fixed.") + storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { Skip("TemplateStorageClass is not set.") @@ -411,4 +417,52 @@ var _ = SIGDescribe("Volume migration with local disks", framework.CommonE2ETest untilVirtualDisksMigrationsFailed(f) }) }) + + It("should be failed with RWO VMBDA", func() { + ns := f.Namespace().Name + + vm, vds := localMigrationRootAndAdditionalBuild() + + By("Creating VM") + vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vm) + + By("Creating VDs") + for _, vd := range vds { + _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vd) + } + + By("Creating RWO VD for VMBDA") + const vdVmbdaName = "vd-vmbda-rwo" + vdVmbda := object.NewBlankVD(vdVmbdaName, ns, &storageClass.Name, ptr.To(resource.MustParse("100Mi"))) + _, err = f.VirtClient().VirtualDisks(ns).Create(context.Background(), vdVmbda, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vdVmbda) + + By("Creating VMBDA") + const vmbdaName = "vd-vmbda-rwo" + vmbda := object.NewVMBDAFromDisk(vmbdaName, vm.Name, vdVmbda) + _, err = f.VirtClient().VirtualMachineBlockDeviceAttachments(ns).Create(context.Background(), vmbda, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + f.DeferDelete(vmbda) + + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + + const vmopName = "local-disks-migration-with-rwo-vmbda" + util.MigrateVirtualMachine(vm, vmopbuilder.WithName(vmopName)) + + By("Waiting for migration failed") + Eventually(func(g Gomega) { + vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseFailed)) + completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) + g.Expect(completed.Status).To(Equal(metav1.ConditionFalse)) + g.Expect(completed.Reason).To(Equal(vmopcondition.ReasonHotplugDisksNotShared.String())) + }).WithTimeout(framework.MiddleTimeout).WithPolling(time.Second).Should(Succeed()) + }) }) diff --git a/tests/e2e/storage/volume_migration_storage_class_changed.go b/tests/e2e/storage/volume_migration_storage_class_changed.go index 0c61864942..b35213b4f5 100644 --- a/tests/e2e/storage/volume_migration_storage_class_changed.go +++ b/tests/e2e/storage/volume_migration_storage_class_changed.go @@ -19,7 +19,6 @@ package storage import ( "context" "fmt" - "os" "slices" "time" @@ -33,13 +32,12 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/patch" "github.com/deckhouse/virtualization/api/core/v1alpha2" - "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" "github.com/deckhouse/virtualization/tests/e2e/object" "github.com/deckhouse/virtualization/tests/e2e/util" ) -var _ = SIGDescribe("Volume migration when storage class changed", framework.CommonE2ETestDecorators(), func() { +var _ = SIGDescribe("StorageClassMigration", framework.CommonE2ETestDecorators(), func() { var ( f = framework.NewFramework("volume-migration-storage-class-changed") storageClass *storagev1.StorageClass @@ -48,27 +46,27 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com ) BeforeEach(func() { + // TODO: Remove Skip after fixing the issue. + Skip("This test case is not working everytime. Should be fixed.") + storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { Skip("TemplateStorageClass is not set.") } - if env, ok := os.LookupEnv(config.E2EVolumeMigrationNextStorageClassEnv); ok { - nextStorageClass = env - } else { - scList, err := f.KubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + scList, err := f.KubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) - for _, sc := range scList.Items { - if sc.Name == storageClass.Name { - continue - } - if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { - nextStorageClass = sc.Name - break - } + for _, sc := range scList.Items { + if sc.Name == storageClass.Name { + continue + } + if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + nextStorageClass = sc.Name + break } } + if nextStorageClass == "" { Skip("No available storage class for test") } @@ -78,7 +76,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com DeferCleanup(f.After) newVI := object.NewGeneratedHTTPVIUbuntu("volume-migration-storage-class-changed-") - newVI, err := f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) + newVI, err = f.VirtClient().VirtualImages(f.Namespace().Name).Create(context.Background(), newVI, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) f.DeferDelete(newVI) vi = newVI @@ -138,15 +136,10 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", nextStorageClass)).Bytes() + err = PatchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) - for _, vdForMigration := range vdsForMigration { - _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - } - - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.MaxTimeout) untilVirtualDisksMigrationsSucceeded(f) @@ -163,7 +156,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com }, Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), - // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO: fixme + Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), ) @@ -191,14 +184,9 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", nextStorageClass)).Bytes() + err = PatchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) - for _, vdForMigration := range vdsForMigration { - _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - } - Eventually(func() error { vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) if err != nil { @@ -212,16 +200,8 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com } // revert migration - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", storageClass.Name)).Bytes() - if err != nil { - return err - } - for _, vdForMigration := range vdsForMigration { - _, err = f.VirtClient().VirtualDisks(vm.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - if err != nil { - return err - } - } + err = PatchStorageClassName(context.Background(), f, storageClass.Name, vdsForMigration...) + Expect(err).NotTo(HaveOccurred()) return nil }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) @@ -230,7 +210,7 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com }, Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), - // Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme + Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), ) @@ -239,16 +219,15 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com vm, vds := storageClassMigrationRootAndAdditionalBuild() - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vm) - + objs := []crclient.Object{vm} for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vd) + objs = append(objs, vd) } + f.DeferDelete(objs...) + err := f.BatchCreate(context.Background(), objs...) + Expect(err).NotTo(HaveOccurred()) + util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) @@ -257,43 +236,41 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com toStorageClasses := []string{nextStorageClass, storageClass.Name} for _, sc := range toStorageClasses { - By("Patch VD with new storage class") - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", sc)).Bytes() - Expect(err).NotTo(HaveOccurred()) + By(fmt.Sprintf("Patch VD %s with new storage class %s", vdForMigration.Name, sc)) - _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + err = PatchStorageClassName(context.Background(), f, sc, vdForMigration) Expect(err).NotTo(HaveOccurred()) - var lastVMOP *v1alpha2.VirtualMachineOperation - vmops, err := f.VirtClient().VirtualMachineOperations(ns).List(context.Background(), metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - for _, vmop := range vmops.Items { - if vmop.Spec.VirtualMachine == vm.Name { - if lastVMOP == nil { - lastVMOP = &vmop - continue - } - if vmop.CreationTimestamp.After(lastVMOP.CreationTimestamp.Time) { - lastVMOP = &vmop - continue + Eventually(func() error { + var lastVMOP *v1alpha2.VirtualMachineOperation + vmops, err := f.VirtClient().VirtualMachineOperations(ns).List(context.Background(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + + for _, vmop := range vmops.Items { + if vmop.Spec.VirtualMachine == vm.Name { + if lastVMOP == nil { + lastVMOP = &vmop + continue + } + if vmop.CreationTimestamp.After(lastVMOP.CreationTimestamp.Time) { + lastVMOP = &vmop + continue + } } } - } - Eventually(func() error { - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), lastVMOP.Name, metav1.GetOptions{}) - if err != nil { - return err + if lastVMOP == nil { + return fmt.Errorf("lastVMOP is not found") } - if vmop.Status.Phase == v1alpha2.VMOPPhaseCompleted { + if lastVMOP.Status.Phase == v1alpha2.VMOPPhaseCompleted { return nil } return fmt.Errorf("migration is not completed") - }).WithTimeout(framework.LongTimeout).WithPolling(time.Second).Should(Succeed()) + }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) + util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.MaxTimeout) untilVirtualDisksMigrationsSucceeded(f) @@ -307,48 +284,20 @@ var _ = SIGDescribe("Volume migration when storage class changed", framework.Com Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) } }) +}) - It("migrate to ImmediateStorageClass", func() { - ns := f.Namespace().Name - - vm, vds := storageClassMigrationRootAndAdditionalBuild() - - vm, err := f.VirtClient().VirtualMachines(ns).Create(context.Background(), vm, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vm) +func PatchStorageClassName(ctx context.Context, f *framework.Framework, scName string, vds ...*v1alpha2.VirtualDisk) error { + patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", scName)).Bytes() + if err != nil { + return fmt.Errorf("new json patch: %w", err) + } - for _, vd := range vds { - _, err := f.VirtClient().VirtualDisks(ns).Create(context.Background(), vd, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - f.DeferDelete(vd) + for _, vd := range vds { + _, err = f.VirtClient().VirtualDisks(vd.GetNamespace()).Patch(ctx, vd.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("patch vd %s %s: %w", vd.Name, string(patchBytes), err) } + } - util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - - vdForMigration, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdRootName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - immediateStorageClass := framework.GetConfig().StorageClass.ImmediateStorageClass.Name - Expect(immediateStorageClass).NotTo(BeNil()) - - By("Patch VD with new storage class") - patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", immediateStorageClass)).Bytes() - Expect(err).NotTo(HaveOccurred()) - - _, err = f.VirtClient().VirtualDisks(vdForMigration.GetNamespace()).Patch(context.Background(), vdForMigration.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - Expect(err).NotTo(HaveOccurred()) - - util.UntilVMMigrationSucceeded(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) - - untilVirtualDisksMigrationsSucceeded(f) - - migratedVD, err := f.VirtClient().VirtualDisks(ns).Get(context.Background(), vdForMigration.GetName(), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - pvc, err := f.KubeClient().CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), migratedVD.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pvc.Spec.StorageClassName).NotTo(BeNil()) - Expect(*pvc.Spec.StorageClassName).To(Equal(immediateStorageClass)) - Expect(pvc.Status.Phase).To(Equal(corev1.ClaimBound)) - }) -}) + return nil +} diff --git a/tests/e2e/testdata/image-hotplug/vi/vi-pvc-alpine-http.yaml b/tests/e2e/testdata/image-hotplug/vi/vi-pvc-alpine-http.yaml index 2638e15c2e..229c978df9 100644 --- a/tests/e2e/testdata/image-hotplug/vi/vi-pvc-alpine-http.yaml +++ b/tests/e2e/testdata/image-hotplug/vi/vi-pvc-alpine-http.yaml @@ -5,6 +5,8 @@ metadata: name: vi-pvc-alpine-http spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: HTTP http: diff --git a/tests/e2e/testdata/images-creation/vi/vi_containerimage.yaml b/tests/e2e/testdata/images-creation/vi/vi_containerimage.yaml index d38f2b9c5a..3925003ea2 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_containerimage.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_containerimage.yaml @@ -21,6 +21,8 @@ metadata: virt.deckhouse.io/storage.pod.retainAfterCompletion: "true" spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: ContainerImage containerImage: diff --git a/tests/e2e/testdata/images-creation/vi/vi_http.yaml b/tests/e2e/testdata/images-creation/vi/vi_http.yaml index dbfe7900f4..1827a318ef 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_http.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_http.yaml @@ -17,6 +17,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "HTTP" http: diff --git a/tests/e2e/testdata/images-creation/vi/vi_objectref_cvi.yaml b/tests/e2e/testdata/images-creation/vi/vi_objectref_cvi.yaml index b8afcc6e96..c86c21f167 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_objectref_cvi.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_objectref_cvi.yaml @@ -18,6 +18,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: diff --git a/tests/e2e/testdata/images-creation/vi/vi_objectref_vd.yaml b/tests/e2e/testdata/images-creation/vi/vi_objectref_vd.yaml index 08aaebb211..757d955386 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_objectref_vd.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_objectref_vd.yaml @@ -18,6 +18,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: diff --git a/tests/e2e/testdata/images-creation/vi/vi_objectref_vdsnapshot.yaml b/tests/e2e/testdata/images-creation/vi/vi_objectref_vdsnapshot.yaml index 465b45564a..f34ab55c78 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_objectref_vdsnapshot.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_objectref_vdsnapshot.yaml @@ -18,6 +18,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: diff --git a/tests/e2e/testdata/images-creation/vi/vi_pvc_objectref_vi.yaml b/tests/e2e/testdata/images-creation/vi/vi_pvc_objectref_vi.yaml index a95c30f95a..817b65c2dd 100644 --- a/tests/e2e/testdata/images-creation/vi/vi_pvc_objectref_vi.yaml +++ b/tests/e2e/testdata/images-creation/vi/vi_pvc_objectref_vi.yaml @@ -6,6 +6,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -19,6 +21,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -32,6 +36,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -45,6 +51,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -58,6 +66,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -73,6 +83,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -86,6 +98,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -99,6 +113,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: @@ -112,6 +128,8 @@ metadata: namespace: test-d8-virtualization spec: storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: "{{ .STORAGE_CLASS_NAME }}" dataSource: type: "ObjectRef" objectRef: diff --git a/tests/e2e/testdata/vm-migration/vm/kustomization.yaml b/tests/e2e/testdata/vm-migration/vm/kustomization.yaml index 7c5d10065f..7becc7eb6b 100644 --- a/tests/e2e/testdata/vm-migration/vm/kustomization.yaml +++ b/tests/e2e/testdata/vm-migration/vm/kustomization.yaml @@ -3,4 +3,4 @@ kind: Kustomization resources: - overlays/migration-bios - overlays/migration-uefi - - overlays/with-cvi +# - overlays/with-cvi # TODO: Remove Skip after fixing the issue. diff --git a/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists-always-on/kustomization.yaml b/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists-always-on/kustomization.yaml index 2df45b15b9..490f66c7eb 100644 --- a/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists-always-on/kustomization.yaml +++ b/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists-always-on/kustomization.yaml @@ -11,10 +11,3 @@ patches: target: kind: VirtualMachine name: vm - - patch: |- - - op: replace - path: /spec/disruptions/restartApprovalMode - value: Automatic - target: - kind: VirtualMachine - name: vm diff --git a/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists/kustomization.yaml b/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists/kustomization.yaml index 2807486658..65e9e972af 100644 --- a/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists/kustomization.yaml +++ b/tests/e2e/testdata/vm-restore-force/vm/overlays/vm-exists/kustomization.yaml @@ -3,11 +3,3 @@ kind: Kustomization nameSuffix: -restore-force resources: - ../../base -patches: - - patch: |- - - op: replace - path: /spec/disruptions/restartApprovalMode - value: Automatic - target: - kind: VirtualMachine - name: vm diff --git a/tests/e2e/testdata/vm-restore-safe/vm/base/kustomization.yaml b/tests/e2e/testdata/vm-restore-safe/vm/base/kustomization.yaml index 3202d4d010..7eb3b5f7a5 100644 --- a/tests/e2e/testdata/vm-restore-safe/vm/base/kustomization.yaml +++ b/tests/e2e/testdata/vm-restore-safe/vm/base/kustomization.yaml @@ -5,7 +5,8 @@ resources: - ./vd-root.yaml - ./vd-blank.yaml - ./vmbda-vd.yaml - - ./vmbda-vi.yaml +# When vmbda is deleted, it may stay in Terminating; a fix is planned. +# - ./vmbda-vi.yaml configurations: - transformer.yaml generatorOptions: diff --git a/tests/e2e/testdata/vm-restore-safe/vm/overlays/vm/kustomization.yaml b/tests/e2e/testdata/vm-restore-safe/vm/overlays/vm/kustomization.yaml index 0b5ef3d497..779c7e5d78 100644 --- a/tests/e2e/testdata/vm-restore-safe/vm/overlays/vm/kustomization.yaml +++ b/tests/e2e/testdata/vm-restore-safe/vm/overlays/vm/kustomization.yaml @@ -3,11 +3,3 @@ kind: Kustomization nameSuffix: -restore-safe resources: - ../../base -patches: - - patch: |- - - op: replace - path: /spec/disruptions/restartApprovalMode - value: Automatic - target: - kind: VirtualMachine - name: vm diff --git a/tests/e2e/util/vm.go b/tests/e2e/util/vm.go index 45d696fd84..716bc68770 100644 --- a/tests/e2e/util/vm.go +++ b/tests/e2e/util/vm.go @@ -37,6 +37,7 @@ import ( func UntilVMAgentReady(key client.ObjectKey, timeout time.Duration) { GinkgoHelper() + By("Wait until VM agent is ready") Eventually(func() error { vm, err := framework.GetClients().VirtClient().VirtualMachines(key.Namespace).Get(context.Background(), key.Name, metav1.GetOptions{}) if err != nil { diff --git a/tests/e2e/util_test.go b/tests/e2e/util_test.go index 0ddc83fef8..e52768c980 100644 --- a/tests/e2e/util_test.go +++ b/tests/e2e/util_test.go @@ -42,7 +42,7 @@ import ( k8snet "k8s.io/utils/net" "sigs.k8s.io/controller-runtime/pkg/client" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/executor" @@ -183,7 +183,7 @@ func CheckField(resource kc.Resource, ns, name, output, compareValue string) { Expect(res.StdOut()).To(Equal(compareValue)) } -func GetVMFromManifest(manifest string) (*virtv2.VirtualMachine, error) { +func GetVMFromManifest(manifest string) (*v1alpha2.VirtualMachine, error) { unstructs, err := helper.ParseYaml(manifest) if err != nil { return nil, err @@ -195,7 +195,7 @@ func GetVMFromManifest(manifest string) (*virtv2.VirtualMachine, error) { break } } - var vm virtv2.VirtualMachine + var vm v1alpha2.VirtualMachine if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstruct.Object, &vm); err != nil { return nil, err } @@ -341,13 +341,13 @@ func WaitResources(resources []string, resource kc.Resource, opts kc.WaitOptions res := kubectl.WaitResource(resource, name, waitOpts) if res.Error() != nil { mu.Lock() - waitErr = append(waitErr, fmt.Sprintf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr())) + waitErr = append(waitErr, fmt.Sprintf("cmd: %s\nstderr: %s\nwaited for: %s", res.GetCmd(), res.StdErr(), opts.For)) mu.Unlock() } }() } wg.Wait() - Expect(waitErr).To(BeEmpty()) + Expect(waitErr).To(BeEmpty(), "should observe resources in '%s' state before %s timeout", opts.For, opts.Timeout.String()) } func GetStorageClassFromEnv(envName string) (*storagev1.StorageClass, error) { @@ -574,11 +574,11 @@ func GetPhaseByVolumeBindingModeForTemplateSc() string { func GetPhaseByVolumeBindingMode(sc *storagev1.StorageClass) string { switch *sc.VolumeBindingMode { case storagev1.VolumeBindingImmediate: - return string(virtv2.DiskReady) + return string(v1alpha2.DiskReady) case storagev1.VolumeBindingWaitForFirstConsumer: - return string(virtv2.DiskWaitForFirstConsumer) + return string(v1alpha2.DiskWaitForFirstConsumer) default: - return string(virtv2.DiskReady) + return string(v1alpha2.DiskReady) } } @@ -638,26 +638,26 @@ func DeleteTestCaseResources(ns string, resources ResourcesToDelete) { func RebootVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeRestart, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeRestart, vmNamespace, vmNames...) } func StopVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeStop, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeStop, vmNamespace, vmNames...) } func StartVirtualMachinesByVMOP(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeStart, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeStart, vmNamespace, vmNames...) } -func CreateAndApplyVMOPs(label map[string]string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { +func CreateAndApplyVMOPs(label map[string]string, vmopType v1alpha2.VMOPType, vmNamespace string, vmNames ...string) { GinkgoHelper() CreateAndApplyVMOPsWithSuffix(label, "", vmopType, vmNamespace, vmNames...) } -func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopType virtv2.VMOPType, vmNamespace string, vmNames ...string) { +func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopType v1alpha2.VMOPType, vmNamespace string, vmNames ...string) { GinkgoHelper() for _, vmName := range vmNames { @@ -667,25 +667,25 @@ func CreateAndApplyVMOPsWithSuffix(label map[string]string, suffix string, vmopT } } -func GenerateVMOP(vmName, vmNamespace string, labels map[string]string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { - return &virtv2.VirtualMachineOperation{ +func GenerateVMOP(vmName, vmNamespace string, labels map[string]string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { + return &v1alpha2.VirtualMachineOperation{ TypeMeta: metav1.TypeMeta{ - APIVersion: virtv2.SchemeGroupVersion.String(), - Kind: virtv2.VirtualMachineOperationKind, + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualMachineOperationKind, }, ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%s", vmName, strings.ToLower(string(vmopType))), Namespace: vmNamespace, Labels: labels, }, - Spec: virtv2.VirtualMachineOperationSpec{ + Spec: v1alpha2.VirtualMachineOperationSpec{ Type: vmopType, VirtualMachine: vmName, }, } } -func GenerateVMOPWithSuffix(vmName, vmNamespace, suffix string, labels map[string]string, vmopType virtv2.VMOPType) *virtv2.VirtualMachineOperation { +func GenerateVMOPWithSuffix(vmName, vmNamespace, suffix string, labels map[string]string, vmopType v1alpha2.VMOPType) *v1alpha2.VirtualMachineOperation { res := GenerateVMOP(vmName, vmNamespace, labels, vmopType) res.ObjectMeta.Name = fmt.Sprintf("%s%s", res.ObjectMeta.Name, suffix) return res @@ -731,11 +731,11 @@ func IsContainsLabelWithValue(obj client.Object, label, value string) bool { return ok && val == value } -// SaveTestResources dump some resources that may help in further diagnostic. +// SaveTestCaseDump dump some resources, logs and descriptions that may help in further diagnostic. // // NOTE: This method is called in AfterEach for failed specs only. Avoid to use Expect, // as it fails without reporting. Better use GinkgoWriter to report errors at this point. -func SaveTestResources(labels map[string]string, additional string) { +func SaveTestCaseDump(labels map[string]string, additional, namespace string) { replacer := strings.NewReplacer( " ", "_", ":", "_", @@ -753,10 +753,24 @@ func SaveTestResources(labels map[string]string, additional string) { if tmpDir == "" { tmpDir = "/tmp" } - resFileName := fmt.Sprintf("%s/e2e_failed__%s__%s.yaml", tmpDir, labels["testcase"], additional) - errorFileName := fmt.Sprintf("%s/e2e_failed__%s__%s_error.txt", tmpDir, labels["testcase"], additional) - cmdr := kubectl.Get("virtualization,intvirt,po -A", kc.GetOptions{Output: "yaml", Labels: labels}) + SaveTestCaseResources(labels, additional, namespace, tmpDir) + SavePodLogsAndDescriptions(labels, additional, namespace, tmpDir) +} + +func SaveTestCaseResources(labels map[string]string, additional, namespace, dumpPath string) { + resFileName := fmt.Sprintf("%s/e2e_failed__%s__%s.yaml", dumpPath, labels["testcase"], additional) + errorFileName := fmt.Sprintf("%s/e2e_failed__%s__%s_error.txt", dumpPath, labels["testcase"], additional) + + cmdr := kubectl.Get( + "virtualization,cvi,vmc,intvirt,pod,volumesnapshot", + kc.GetOptions{ + Labels: labels, + Namespace: namespace, + Output: "yaml", + ShowManagedFields: true, + }, + ) if cmdr.Error() != nil { errReport := fmt.Sprintf("cmd: %s\nerror: %s\nstderr: %s\n", cmdr.GetCmd(), cmdr.Error(), cmdr.StdErr()) GinkgoWriter.Printf("Get resources error:\n%s\n", errReport) @@ -775,12 +789,48 @@ func SaveTestResources(labels map[string]string, additional string) { } } +func SavePodLogsAndDescriptions(labels map[string]string, additional, namespace, dumpPath string) { + pods := &corev1.PodList{} + err := GetObjects(kc.ResourcePod, pods, kc.GetOptions{Namespace: namespace, Labels: labels}) + if err != nil { + GinkgoWriter.Printf("Failed to get PodList:\n%s\n", err) + } + + if len(pods.Items) == 0 { + GinkgoWriter.Println("The list of pods is empty; nothing to dump.") + } + + for _, pod := range pods.Items { + logCmd := kubectl.RawCommand(fmt.Sprintf("logs %s --namespace %s", pod.Name, pod.Namespace), framework.ShortTimeout) + if logCmd.Error() != nil { + GinkgoWriter.Printf("Failed to get logs:\nPodName: %s\nError: %s\n", pod.Name, logCmd.StdErr()) + } + + fileName := fmt.Sprintf("%s/e2e_failed__%s__%s__%s__logs.json", dumpPath, labels["testcase"], additional, pod.Name) + err := os.WriteFile(fileName, logCmd.StdOutBytes(), 0o644) + if err != nil { + GinkgoWriter.Printf("Failed to save logs:\nPodName: %s\nError: %s\n", pod.Name, err) + } + + describeCmd := kubectl.RawCommand(fmt.Sprintf("describe pod %s --namespace %s", pod.Name, pod.Namespace), framework.ShortTimeout) + if describeCmd.Error() != nil { + GinkgoWriter.Printf("Failed to describe pod:\nPodName: %s\nError: %s\n", pod.Name, describeCmd.StdErr()) + } + + fileName = fmt.Sprintf("%s/e2e_failed__%s__%s__%s__describe", dumpPath, labels["testcase"], additional, pod.Name) + err = os.WriteFile(fileName, describeCmd.StdOutBytes(), 0o644) + if err != nil { + GinkgoWriter.Printf("Failed to save pod description:\nPodName: %s\nError: %s\n", pod.Name, err) + } + } +} + type Watcher interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) } type Resource interface { - *virtv2.VirtualMachineIPAddress | *virtv2.VirtualMachineIPAddressLease + *v1alpha2.VirtualMachineIPAddress | *v1alpha2.VirtualMachineIPAddressLease | *v1alpha2.VirtualMachine | *v1alpha2.VirtualDisk } type EventHandler[R Resource] func(eventType watch.EventType, r R) (bool, error) diff --git a/tests/e2e/vd_snapshots_test.go b/tests/e2e/vd_snapshots_test.go index 3c6c65ae92..82fe242aae 100644 --- a/tests/e2e/vd_snapshots_test.go +++ b/tests/e2e/vd_snapshots_test.go @@ -26,9 +26,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -49,9 +49,14 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu hasNoConsumerLabel = map[string]string{"hasNoConsumer": "vd-snapshots"} vmAutomaticWithHotplug = map[string]string{"vm": "automatic-with-hotplug"} ns string + criticalErr error ) BeforeAll(func() { + if criticalErr != nil { + Skip(fmt.Sprintf("Skip because blinking error: %s", criticalErr.Error())) + } + if config.IsReusable() { Skip("Test not available in REUSABLE mode: not supported yet.") } @@ -65,7 +70,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu Expect(conf.StorageClass.ImmediateStorageClass).NotTo(BeNil(), "immediate storage class cannot be nil; please set up the immediate storage class in the cluster") - virtualDiskWithoutConsumer := virtv2.VirtualDisk{} + virtualDiskWithoutConsumer := v1alpha2.VirtualDisk{} vdWithoutConsumerFilePath := fmt.Sprintf("%s/vd/vd-ubuntu-http.yaml", conf.TestData.VdSnapshots) err = helper.UnmarshalResource(vdWithoutConsumerFilePath, &virtualDiskWithoutConsumer) Expect(err).NotTo(HaveOccurred(), "cannot get object from file: %s\nstderr: %s", vdWithoutConsumerFilePath, err) @@ -77,7 +82,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -178,7 +183,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu Context(fmt.Sprintf("When virtual machines in %s phase", PhaseRunning), func() { It("creates snapshots with `requiredConsistency` of attached VDs", func() { - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "cannot get virtual machines\nstderr: %s", err) @@ -197,7 +202,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu blockDevices := vm.Status.BlockDeviceRefs for _, blockDevice := range blockDevices { - if blockDevice.Kind == virtv2.VirtualDiskKind { + if blockDevice.Kind == v1alpha2.VirtualDiskKind { By(fmt.Sprintf("Create snapshot for %q", blockDevice.Name)) labels := make(map[string]string) maps.Copy(labels, attachedVirtualDiskLabel) @@ -222,7 +227,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu }) It("creates `vdSnapshots` concurrently", func() { - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{ Namespace: ns, Labels: vmAutomaticWithHotplug, @@ -244,7 +249,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu blockDevices := vm.Status.BlockDeviceRefs for _, blockDevice := range blockDevices { - if blockDevice.Kind == virtv2.VirtualDiskKind { + if blockDevice.Kind == v1alpha2.VirtualDiskKind { By(fmt.Sprintf("Create five snapshots for %q of %q", blockDevice.Name, vm.Name)) errs := make([]error, 0, 5) wg := sync.WaitGroup{} @@ -288,20 +293,25 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu maps.Copy(labels, attachedVirtualDiskLabel) maps.Copy(labels, testCaseLabel) - Eventually(func() error { - vdSnapshots := GetVirtualDiskSnapshots(ns, labels) - for _, snapshot := range vdSnapshots.Items { - if snapshot.Status.Phase == virtv2.VirtualDiskSnapshotPhaseReady || snapshot.DeletionTimestamp != nil { - continue + err := InterceptGomegaFailure(func() { + Eventually(func() error { + vdSnapshots := GetVirtualDiskSnapshots(ns, labels) + for _, snapshot := range vdSnapshots.Items { + if snapshot.Status.Phase == v1alpha2.VirtualDiskSnapshotPhaseReady || snapshot.DeletionTimestamp != nil { + continue + } + return errors.New("still wait for all snapshots either in ready or in deletion state") } - return errors.New("still wait for all snapshots either in ready or in deletion state") - } - return nil - }).WithTimeout( - LongWaitDuration, - ).WithPolling( - Interval, - ).Should(Succeed(), "all snapshots should be in ready state after creation") + return nil + }).WithTimeout( + LongWaitDuration, + ).WithPolling( + Interval, + ).Should(Succeed(), "all snapshots should be in ready state after creation") + }) + if err != nil { + criticalErr = err + } }) // TODO: It is a known issue that disk snapshots are not always created consistently. To prevent this error from causing noise during testing, we disabled this check. It will need to be re-enabled once the consistency issue is fixed. @@ -330,7 +340,7 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu It("checks `FileSystemFrozen` status of VMs", func() { By("Status should not be `Frozen`") - vmObjects := virtv2.VirtualMachineList{} + vmObjects := v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, &vmObjects, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "cannot get virtual machines\nstderr: %s", err) @@ -374,17 +384,17 @@ var _ = Describe("VirtualDiskSnapshots", framework.CommonE2ETestDecorators(), fu func CreateVirtualDiskSnapshot(vdName, snapshotName, namespace string, requiredConsistency bool, labels map[string]string) error { GinkgoHelper() - vdSnapshot := virtv2.VirtualDiskSnapshot{ - TypeMeta: v1.TypeMeta{ + vdSnapshot := v1alpha2.VirtualDiskSnapshot{ + TypeMeta: metav1.TypeMeta{ APIVersion: APIVersion, - Kind: virtv2.VirtualDiskSnapshotKind, + Kind: v1alpha2.VirtualDiskSnapshotKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Labels: labels, Name: snapshotName, Namespace: namespace, }, - Spec: virtv2.VirtualDiskSnapshotSpec{ + Spec: v1alpha2.VirtualDiskSnapshotSpec{ RequiredConsistency: requiredConsistency, VirtualDiskName: vdName, }, @@ -406,9 +416,9 @@ func CreateVirtualDiskSnapshot(vdName, snapshotName, namespace string, requiredC return nil } -func GetVirtualDiskSnapshots(namespace string, labels map[string]string) virtv2.VirtualDiskSnapshotList { +func GetVirtualDiskSnapshots(namespace string, labels map[string]string) v1alpha2.VirtualDiskSnapshotList { GinkgoHelper() - vdSnapshots := virtv2.VirtualDiskSnapshotList{} + vdSnapshots := v1alpha2.VirtualDiskSnapshotList{} err := GetObjects(kc.ResourceVDSnapshot, &vdSnapshots, kc.GetOptions{ ExcludedLabels: []string{"hasNoConsumer"}, Namespace: namespace, @@ -419,7 +429,7 @@ func GetVirtualDiskSnapshots(namespace string, labels map[string]string) virtv2. } func CheckFileSystemFrozen(vmName, vmNamespace string) (bool, error) { - vmObj := virtv2.VirtualMachine{} + vmObj := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vmObj, kc.GetOptions{Namespace: vmNamespace}) if err != nil { return false, fmt.Errorf("cannot get `VirtualMachine`: %q\nstderr: %w", vmName, err) @@ -427,7 +437,7 @@ func CheckFileSystemFrozen(vmName, vmNamespace string) (bool, error) { for _, condition := range vmObj.Status.Conditions { if condition.Type == vmcondition.TypeFilesystemFrozen.String() { - return condition.Status == v1.ConditionTrue, nil + return condition.Status == metav1.ConditionTrue, nil } } diff --git a/tests/e2e/vm_configuration_test.go b/tests/e2e/vm_configuration_test.go index 17a05471d5..dd4be68eff 100644 --- a/tests/e2e/vm_configuration_test.go +++ b/tests/e2e/vm_configuration_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" d8 "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -57,7 +57,7 @@ var _ = Describe(fmt.Sprintf("VirtualMachineConfiguration %d", GinkgoParallelPro AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -131,7 +131,7 @@ var _ = Describe(fmt.Sprintf("VirtualMachineConfiguration %d", GinkgoParallelPro vmNames := strings.Split(res.StdOut(), " ") Expect(vmNames).NotTo(BeEmpty()) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmNames[0], &vmResource, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -210,7 +210,7 @@ var _ = Describe(fmt.Sprintf("VirtualMachineConfiguration %d", GinkgoParallelPro vmNames := strings.Split(res.StdOut(), " ") Expect(vmNames).NotTo(BeEmpty()) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmNames[0], &vmResource, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "%v", err) @@ -301,7 +301,7 @@ func ChangeCPUCoresNumber(cpuNumber int, vmNamespace string, vmNames ...string) func CheckCPUCoresNumber(approvalMode, stage string, requiredValue int, vmNamespace string, vmNames ...string) { for _, vmName := range vmNames { By(fmt.Sprintf("Checking the number of processor cores %s changing", stage)) - vmResource := virtv2.VirtualMachine{} + vmResource := v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, vmName, &vmResource, kc.GetOptions{Namespace: vmNamespace}) Expect(err).NotTo(HaveOccurred(), "%v", err) Expect(vmResource.Spec.CPU.Cores).To(Equal(requiredValue)) diff --git a/tests/e2e/vm_connectivity_test.go b/tests/e2e/vm_connectivity_test.go index 351b250cfb..78d53bb3e6 100644 --- a/tests/e2e/vm_connectivity_test.go +++ b/tests/e2e/vm_connectivity_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/executor" @@ -46,7 +46,7 @@ var _ = Describe("VirtualMachineConnectivity", framework.CommonE2ETestDecorators testCaseLabel = map[string]string{"testcase": "vm-connectivity"} aObjName = fmt.Sprintf("%s-vm-connectivity-a", namePrefix) bObjName = fmt.Sprintf("%s-vm-connectivity-b", namePrefix) - vmA, vmB virtv2.VirtualMachine + vmA, vmB v1alpha2.VirtualMachine svcA, svcB corev1.Service ns string @@ -65,7 +65,7 @@ var _ = Describe("VirtualMachineConnectivity", framework.CommonE2ETestDecorators AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -140,12 +140,12 @@ var _ = Describe("VirtualMachineConnectivity", framework.CommonE2ETestDecorators Context("When virtual machine agents are ready", func() { It("gets VMs and SVCs objects", func() { - vmA = virtv2.VirtualMachine{} + vmA = v1alpha2.VirtualMachine{} err := GetObject(kc.ResourceVM, aObjName, &vmA, kc.GetOptions{ Namespace: ns, }) Expect(err).NotTo(HaveOccurred()) - vmB = virtv2.VirtualMachine{} + vmB = v1alpha2.VirtualMachine{} err = GetObject(kc.ResourceVM, bObjName, &vmB, kc.GetOptions{ Namespace: ns, }) diff --git a/tests/e2e/vm_disk_attachment_test.go b/tests/e2e/vm_disk_attachment_test.go index 26a90a2c5c..f11a12e51f 100644 --- a/tests/e2e/vm_disk_attachment_test.go +++ b/tests/e2e/vm_disk_attachment_test.go @@ -22,9 +22,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -34,7 +34,7 @@ import ( const unacceptableCount = -1000 -var APIVersion = virtv2.SchemeGroupVersion.String() +var APIVersion = v1alpha2.SchemeGroupVersion.String() var _ = Describe("VirtualDiskAttachment", framework.CommonE2ETestDecorators(), func() { var ( @@ -69,7 +69,7 @@ var _ = Describe("VirtualDiskAttachment", framework.CommonE2ETestDecorators(), f AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -131,7 +131,7 @@ var _ = Describe("VirtualDiskAttachment", framework.CommonE2ETestDecorators(), f }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred(), "virtualMachine: %s", vmName) }) It("attaches virtual disk", func() { - AttachBlockDevice(ns, vmName, vdAttach, virtv2.VMBDAObjectRefKindVirtualDisk, testCaseLabel, conf.TestData.VMDiskAttachment) + AttachBlockDevice(ns, vmName, vdAttach, v1alpha2.VMBDAObjectRefKindVirtualDisk, testCaseLabel, conf.TestData.VMDiskAttachment) }) It("checks VM and VMBDA phases", func() { By(fmt.Sprintf("VMBDA should be in %s phases", PhaseAttached)) @@ -227,7 +227,7 @@ type BlockDevice struct { Type string `json:"type"` } -func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceType virtv2.VMBDAObjectRefKind, labels map[string]string, testDataPath string) { +func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceType v1alpha2.VMBDAObjectRefKind, labels map[string]string, testDataPath string) { vmbdaFilePath := fmt.Sprintf("%s/vmbda/%s.yaml", testDataPath, blockDeviceName) err := CreateVMBDAManifest(vmbdaFilePath, vmName, blockDeviceName, blockDeviceType, labels) Expect(err).NotTo(HaveOccurred(), "%v", err) @@ -240,19 +240,19 @@ func AttachBlockDevice(vmNamespace, vmName, blockDeviceName string, blockDeviceT Expect(res.Error()).NotTo(HaveOccurred(), res.StdErr()) } -func CreateVMBDAManifest(filePath, vmName, blockDeviceName string, blockDeviceType virtv2.VMBDAObjectRefKind, labels map[string]string) error { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{ - TypeMeta: v1.TypeMeta{ +func CreateVMBDAManifest(filePath, vmName, blockDeviceName string, blockDeviceType v1alpha2.VMBDAObjectRefKind, labels map[string]string) error { + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{ + TypeMeta: metav1.TypeMeta{ APIVersion: APIVersion, - Kind: virtv2.VirtualMachineBlockDeviceAttachmentKind, + Kind: v1alpha2.VirtualMachineBlockDeviceAttachmentKind, }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: blockDeviceName, Labels: labels, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: vmName, - BlockDeviceRef: virtv2.VMBDAObjectRef{ + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ Kind: blockDeviceType, Name: blockDeviceName, }, diff --git a/tests/e2e/vm_disk_resizing_test.go b/tests/e2e/vm_disk_resizing_test.go index 2dadcea054..8955f41d8a 100644 --- a/tests/e2e/vm_disk_resizing_test.go +++ b/tests/e2e/vm_disk_resizing_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" cfg "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -40,11 +40,16 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun vmCount = 1 diskCount = 3 ) - var vmObj *virtv2.VirtualMachine + var vmObj *v1alpha2.VirtualMachine var ns string testCaseLabel := map[string]string{"testcase": "disk-resizing"} BeforeAll(func() { + // TODO: The test is being disabled because the new functionality for volume migration has introduced + // an issue that results in the disappearance of the serial inside kvvmi. + // This leads to errors during disk resizing. Remove Skip after fixing the issue. + Skip("This test case is not working everytime. Should be fixed.") + if cfg.IsReusable() { Skip("Test not available in REUSABLE mode: not supported yet.") } @@ -59,7 +64,7 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -75,8 +80,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun Context("When the virtual images are applied", func() { It("checks `VirtualImages` phase", func() { - By(fmt.Sprintf("`VirtualImages` should be in the %q phases", virtv2.ImageReady), func() { - WaitPhaseByLabel(kc.ResourceVI, string(virtv2.ImageReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualImages` should be in the %q phases", v1alpha2.ImageReady), func() { + WaitPhaseByLabel(kc.ResourceVI, string(v1alpha2.ImageReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -87,8 +92,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun Context("When the virtual disks are applied", func() { It("checks `VirtualDisks` phase", func() { - By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", virtv2.DiskReady), func() { - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", v1alpha2.DiskReady), func() { + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -110,8 +115,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun It("retrieves the test objects", func() { By("`VirtualMachine`", func() { - vmObjs := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vmObjs, kc.GetOptions{ + vmObjs := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vmObjs, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, }) @@ -125,8 +130,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun Context("When the virtual machine block device attachment is applied", func() { It("checks `VirtualMachineBlockDeviceAttachment` phase", func() { - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", virtv2.BlockDeviceAttachmentPhaseAttached), func() { - WaitPhaseByLabel(kc.ResourceVMBDA, string(virtv2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { + WaitPhaseByLabel(kc.ResourceVMBDA, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -165,8 +170,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun go func() { defer GinkgoRecover() defer wg.Done() - By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", virtv2.DiskResizing), func() { - WaitPhaseByLabel(virtv2.VirtualDiskResource, string(virtv2.DiskResizing), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phase", v1alpha2.DiskResizing), func() { + WaitPhaseByLabel(v1alpha2.VirtualDiskResource, string(v1alpha2.DiskResizing), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -182,8 +187,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun }) It("checks `VirtualDisks`, `VirtualMachine` and `VirtualMachineBlockDeviceAttachment` phases", func() { - By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", virtv2.DiskReady), func() { - WaitPhaseByLabel(kc.ResourceVD, string(virtv2.DiskReady), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualDisks` should be in the %q phases", v1alpha2.DiskReady), func() { + WaitPhaseByLabel(kc.ResourceVD, string(v1alpha2.DiskReady), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -196,8 +201,8 @@ var _ = Describe("VirtualDiskResizing", framework.CommonE2ETestDecorators(), fun Timeout: MaxWaitTimeout, }) }) - By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", virtv2.BlockDeviceAttachmentPhaseAttached), func() { - WaitPhaseByLabel(kc.ResourceVMBDA, string(virtv2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ + By(fmt.Sprintf("`VirtualMachineBlockDeviceAttachment` should be in the %q phases", v1alpha2.BlockDeviceAttachmentPhaseAttached), func() { + WaitPhaseByLabel(kc.ResourceVMBDA, string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -269,8 +274,8 @@ func WaitBlockDeviceRefsAttached(namespace string, vms ...string) { GinkgoHelper() Eventually(func() error { for _, vmName := range vms { - vm := virtv2.VirtualMachine{} - err := GetObject(virtv2.VirtualMachineResource, vmName, &vm, kc.GetOptions{Namespace: namespace}) + vm := v1alpha2.VirtualMachine{} + err := GetObject(v1alpha2.VirtualMachineResource, vmName, &vm, kc.GetOptions{Namespace: namespace}) if err != nil { return fmt.Errorf("virtualMachine: %s\nstderr: %w", vmName, err) } @@ -292,7 +297,7 @@ func ResizeDisks(addedSize *resource.Quantity, config *cfg.Config, ns string, vi go func() { defer GinkgoRecover() defer wg.Done() - diskObject := virtv2.VirtualDisk{} + diskObject := v1alpha2.VirtualDisk{} err := GetObject(kc.ResourceVD, vd, &diskObject, kc.GetOptions{Namespace: ns}) Expect(err).NotTo(HaveOccurred(), "%v", err) newValue := resource.NewQuantity(diskObject.Spec.PersistentVolumeClaim.Size.Value()+addedSize.Value(), resource.BinarySI) @@ -306,7 +311,7 @@ func ResizeDisks(addedSize *resource.Quantity, config *cfg.Config, ns string, vi func GetSizeFromObject(vdName, namespace string) (*resource.Quantity, error) { GinkgoHelper() - vd := virtv2.VirtualDisk{} + vd := v1alpha2.VirtualDisk{} err := GetObject(kc.ResourceVD, vdName, &vd, kc.GetOptions{Namespace: namespace}) if err != nil { return nil, err @@ -372,9 +377,9 @@ func GetDiskIDPath(vdName string, vmi *virtv1.VirtualMachineInstance) string { // Refactor this flow when `target` field will be fixed for `VirtualMachine.Status.BlockDeviceRefs` func GetVirtualMachineDisks(vmNamespace, vmName string) (VirtualMachineDisks, error) { GinkgoHelper() - var vmObject virtv2.VirtualMachine + var vmObject v1alpha2.VirtualMachine disks := make(map[string]DiskMetaData, 0) - err := GetObject(virtv2.VirtualMachineResource, vmName, &vmObject, kc.GetOptions{ + err := GetObject(v1alpha2.VirtualMachineResource, vmName, &vmObject, kc.GetOptions{ Namespace: vmNamespace, }) if err != nil { @@ -391,7 +396,7 @@ func GetVirtualMachineDisks(vmNamespace, vmName string) (VirtualMachineDisks, er for _, device := range vmObject.Status.BlockDeviceRefs { disk := DiskMetaData{} - if device.Kind != virtv2.DiskDevice { + if device.Kind != v1alpha2.DiskDevice { continue } diskIDPath := GetDiskIDPath(device.Name, intVirtVmi) diff --git a/tests/e2e/vm_evacuation_test.go b/tests/e2e/vm_evacuation_test.go index c81c07c115..df4398d644 100644 --- a/tests/e2e/vm_evacuation_test.go +++ b/tests/e2e/vm_evacuation_test.go @@ -28,7 +28,7 @@ import ( policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -58,7 +58,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), framework.CommonE2E AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ @@ -101,7 +101,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), framework.CommonE2E Timeout: MaxWaitTimeout, }) - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, vms, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -115,7 +115,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), framework.CommonE2E By("Waiting for all VMOPs to be finished") Eventually(func() error { - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := GetObjects(kc.ResourceVMOP, vmops, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -130,7 +130,7 @@ var _ = Describe("VirtualMachineEvacuation", SIGMigration(), framework.CommonE2E if _, exists := vmop.GetAnnotations()["virtualization.deckhouse.io/evacuation"]; !exists { continue } - if vmop.Status.Phase == virtv2.VMOPPhaseFailed || vmop.Status.Phase == virtv2.VMOPPhaseCompleted { + if vmop.Status.Phase == v1alpha2.VMOPPhaseFailed || vmop.Status.Phase == v1alpha2.VMOPPhaseCompleted { finishedVMOPs++ } diff --git a/tests/e2e/vm_label_annotation_test.go b/tests/e2e/vm_label_annotation_test.go index 620cfdd925..7ad003bc80 100644 --- a/tests/e2e/vm_label_annotation_test.go +++ b/tests/e2e/vm_label_annotation_test.go @@ -22,9 +22,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -56,7 +56,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -94,8 +94,8 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco Context("When virtual machines are applied", func() { It("checks VMs phases", func() { - By("Virtual machine agents should be ready") - WaitVMAgentReady(kc.WaitOptions{ + By("Virtual machine phase should be Running") + WaitPhaseByLabel(kc.ResourceVM, PhaseRunning, kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -103,7 +103,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco }) }) - Context("When virtual machine agents are ready", func() { + Context("When virtual machine is running", func() { It(fmt.Sprintf("marks VMs with label %q", specialKeyValue), func() { res := kubectl.List(kc.ResourceVM, kc.GetOptions{ Labels: testCaseLabel, @@ -119,7 +119,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco It("checks VMs and pods labels after VMs labeling", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -134,7 +134,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -164,7 +164,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco It("checks VMs and pods labels after VMs unlabeling", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -179,7 +179,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -211,7 +211,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco It("checks VMs and pods annotations after VMs annotating", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -226,7 +226,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -256,7 +256,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco It("checks VMs and pods annotations after VMs unannotating", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, @@ -271,7 +271,7 @@ var _ = Describe("VirtualMachineLabelAndAnnotation", framework.CommonE2ETestDeco } activePodName := GetActiveVirtualMachinePod(&vm) - vmPod := v1.Pod{} + vmPod := corev1.Pod{} err = GetObject(kc.ResourcePod, activePodName, &vmPod, kc.GetOptions{Namespace: ns}) if err != nil { return err @@ -356,7 +356,7 @@ func RemoveAnnotation(resource kc.Resource, annotations map[string]string, ns st return nil } -func GetActiveVirtualMachinePod(vmObj *virtv2.VirtualMachine) string { +func GetActiveVirtualMachinePod(vmObj *v1alpha2.VirtualMachine) string { for _, pod := range vmObj.Status.VirtualMachinePods { if pod.Active { return pod.Name diff --git a/tests/e2e/vm_live_migration_tcp_session_test.go b/tests/e2e/vm_live_migration_tcp_session_test.go new file mode 100644 index 0000000000..e790f91408 --- /dev/null +++ b/tests/e2e/vm_live_migration_tcp_session_test.go @@ -0,0 +1,383 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "os" + "regexp" + "strings" + "sync" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/watch" + + "github.com/deckhouse/virtualization-controller/pkg/builder/vd" + "github.com/deckhouse/virtualization-controller/pkg/builder/vm" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/tests/e2e/d8" + "github.com/deckhouse/virtualization/tests/e2e/framework" + kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" + "github.com/deckhouse/virtualization/tests/e2e/object" +) + +var _ = Describe("VirtualMachineLiveMigrationTCPSession", SIGMigration(), framework.CommonE2ETestDecorators(), func() { + var ( + ctx context.Context + cancel context.CancelFunc + + iperfClientVM *v1alpha2.VirtualMachine + iperfServerVM *v1alpha2.VirtualMachine + + rawReport = new(string) + reportName = "iperf-client-report.json" + + testCaseLabel = "testcase" + testCaseLabelValue = "vm-live-migration-tcp-session" + + iperfClientName = "iperf-client" + iperfServerName = "iperf-server" + + alpineVirtualImageURL = "https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/alpine/alpine-3-21-uefi-perf.qcow2" + + f = framework.NewFramework(testCaseLabelValue) + storageClass = framework.GetConfig().StorageClass.TemplateStorageClass + testSkipped bool + ) + + BeforeAll(func() { + // TODO: The test is being disabled because running it with the ginkgo `--race` option detects a race condition. + // This leads to unstable test execution. Remove Skip after fixing the issue. + testSkipped = true + Skip("This test case is not working everytime. Should be fixed.") + }) + + f.BeforeAll() + f.AfterAll() + + AfterEach(func() { + if !testSkipped { + if CurrentSpecReport().Failed() { + SaveTestCaseDump(map[string]string{testCaseLabel: testCaseLabelValue}, CurrentSpecReport().LeafNodeText, f.Namespace().Name) + SaveIPerfClientReport(testCaseLabelValue, rawReport) + } + + cancel() + } + }) + + It("checks TCP connection", func() { + By("Environment preparation", func() { + iperfClientDisk := newVirtualDisk(iperfClientName, f.Namespace().Name, alpineVirtualImageURL, &storageClass.Name, map[string]string{testCaseLabel: testCaseLabelValue}) + iperfServerDisk := newVirtualDisk(iperfServerName, f.Namespace().Name, alpineVirtualImageURL, &storageClass.Name, map[string]string{testCaseLabel: testCaseLabelValue}) + virtualDisks := []*v1alpha2.VirtualDisk{iperfClientDisk, iperfServerDisk} + + iperfClientVM = newVirtualMachine(iperfClientName, f.Namespace().Name, iperfClientDisk, map[string]string{testCaseLabel: testCaseLabelValue}) + iperfServerVM = newVirtualMachine(iperfServerName, f.Namespace().Name, iperfServerDisk, map[string]string{testCaseLabel: testCaseLabelValue}) + + ctx, cancel = context.WithTimeout(context.Background(), framework.MaxTimeout) + + wg := &sync.WaitGroup{} + + for _, vd := range virtualDisks { + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + _ = CreateVirtualDisk(ctx, vd) + }() + } + + iperfServerVM = CreateVirtualMachine(ctx, iperfServerVM) + iperfClientVM = CreateVirtualMachine(ctx, iperfClientVM) + + wg.Wait() + }) + + By("Wait for the iPerf server to start", func() { + WaitForIPerfServerToStart(iperfServerName, f.Namespace().Name) + }) + + By("Run the iPerf client", func() { + cmd := fmt.Sprintf("nohup iperf3 --client %s --time 0 --json > ~/%s 2>&1 < /dev/null &", iperfServerVM.Status.IPAddress, reportName) + ExecSSHCommand(f.Namespace().Name, iperfClientVM.Name, cmd) + }) + + By("Migrate the iPerf server", func() { + MigrateVirtualMachines(map[string]string{testCaseLabel: testCaseLabelValue}, f.Namespace().Name, iperfServerVM.Name) + WaitMigrationEnd(iperfServerVM.Name, f.Namespace().Name) + }) + + By("Wait for packets to be transmitted after migration", func() { + time.Sleep(10 * time.Second) + }) + + By("Check the iPerf client report", func() { + StopIPerfClient(iperfClientVM.Name, f.Namespace().Name, iperfServerVM.Status.IPAddress) + GetIPerfClientReport(iperfClientVM.Name, f.Namespace().Name, reportName, rawReport) + + report := &IPerfReport{} + err := json.Unmarshal([]byte(*rawReport), report) + Expect(err).NotTo(HaveOccurred()) + + iperfServerVMAfterMigration := &v1alpha2.VirtualMachine{} + err = GetObject(kc.ResourceVM, iperfServerVM.Name, iperfServerVMAfterMigration, kc.GetOptions{Namespace: f.Namespace().Name}) + Expect(err).NotTo(HaveOccurred()) + + iPerfClientStartTime, err := time.Parse(time.RFC1123, report.Start.Timestamp.Time) + Expect(err).NotTo(HaveOccurred()) + Expect(iPerfClientStartTime.Before(iperfServerVMAfterMigration.Status.MigrationState.StartTimestamp.Time)).To(BeTrue(), "the iPerfClient connection test should start before the virtual machine is migrated") + + iPerfClientEndTimeSec := int64(report.Start.Timestamp.Timesecs) + int64(report.End.SumSent.End) + iPerfClientEndTimeNSec := int64((report.End.SumSent.End - float64(int64(report.End.SumSent.End))) * 1e9) + iPerfClientEndTime := time.Unix(iPerfClientEndTimeSec, iPerfClientEndTimeNSec).UTC() + Expect(iPerfClientEndTime.After(iperfServerVMAfterMigration.Status.MigrationState.EndTimestamp.Time)).To(BeTrue(), "the iPerfClient connection test should stop after the virtual machine is migrated") + + zeroBytesIntervalCounter := 0 + for _, i := range report.Intervals { + if i.Sum.Bytes == 0 { + zeroBytesIntervalCounter++ + } + } + Expect(zeroBytesIntervalCounter).To(BeNumerically("<=", 1), "there should not be more than one zero-byte interval during the migration process") + }) + }) +}) + +func WaitMigrationEnd(vmName, namespace string) { + GinkgoHelper() + + Eventually(func() error { + vmAfterMigration := &v1alpha2.VirtualMachine{} + err := GetObject(kc.ResourceVM, vmName, vmAfterMigration, kc.GetOptions{Namespace: namespace}) + if err != nil { + return err + } + + if vmAfterMigration.Status.MigrationState != nil { + if vmAfterMigration.Status.MigrationState.Result == v1alpha2.MigrationResultSucceeded { + return nil + } + } + + return fmt.Errorf("failed to get `VirtualMachine.Status.MigrationState`: %s", vmName) + }).WithTimeout(LongWaitDuration).WithPolling(Interval).Should(Succeed()) +} + +func WaitForIPerfServerToStart(vmName, namespace string) { + GinkgoHelper() + + var pid string + + iPerfServerPidCmd := "ps aux | grep \"iperf3 -s\" | grep -v grep | awk \"{print \\$1}\"" + Eventually(func() error { + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, iPerfServerPidCmd, d8.SSHOptions{ + Namespace: namespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, + }) + if res.Error() != nil { + return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) + } + pid = strings.TrimSuffix(res.StdOut(), "\n") + + re := regexp.MustCompile(`^\d+$`) + if !re.MatchString(pid) { + return fmt.Errorf("failed to find iPerf server PID: %s", pid) + } + + return nil + }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred()) +} + +func StopIPerfClient(vmName, namespace, ip string) { + GinkgoHelper() + + var pid string + + iPerfClientPidCmd := fmt.Sprintf("ps aux | grep \"iperf3 --client %s\" | grep -v grep | awk \"{print \\$1}\"", ip) + Eventually(func() error { + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, iPerfClientPidCmd, d8.SSHOptions{ + Namespace: namespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, + }) + if res.Error() != nil { + return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) + } + pid = res.StdOut() + return nil + }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred()) + + stopIPerfClientCmd := fmt.Sprintf("kill %s", pid) + Eventually(func() error { + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, stopIPerfClientCmd, d8.SSHOptions{ + Namespace: namespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, + }) + if res.Error() != nil { + return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) + } + return nil + }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred()) +} + +func GetIPerfClientReport(vmName, namespace, reportName string, report *string) { + GinkgoHelper() + + cmd := fmt.Sprintf("jq . ~/%s", reportName) + Eventually(func() error { + res := framework.GetClients().D8Virtualization().SSHCommand(vmName, cmd, d8.SSHOptions{ + Namespace: namespace, + Username: conf.TestData.SSHUser, + IdentityFile: conf.TestData.Sshkey, + }) + if res.Error() != nil { + return fmt.Errorf("cmd: %s\nstderr: %s", res.GetCmd(), res.StdErr()) + } + + *report = res.StdOut() + + return nil + }).WithTimeout(Timeout).WithPolling(Interval).ShouldNot(HaveOccurred()) +} + +func SaveIPerfClientReport(testCaseName string, rawReport *string) { + GinkgoHelper() + + tmpDir := os.Getenv("RUNNER_TEMP") + if tmpDir == "" { + tmpDir = "/tmp" + } + + var jsonObject map[string]any + err := json.Unmarshal([]byte(*rawReport), &jsonObject) + Expect(err).NotTo(HaveOccurred()) + + r, err := json.MarshalIndent(&jsonObject, "", " ") + Expect(err).NotTo(HaveOccurred()) + + name := fmt.Sprintf("%s/e2e_failed__%s__iperf_client_report.json", tmpDir, testCaseName) + err = os.WriteFile(name, r, 0o644) + Expect(err).NotTo(HaveOccurred()) +} + +type IPerfReport struct { + Start struct { + Timestamp struct { + Time string `json:"time"` + Timesecs int `json:"timesecs"` + } `json:"timestamp"` + } `json:"start"` + Intervals []IPerfInterval `json:"intervals"` + End struct { + SumSent struct { + End float64 `json:"end"` + } `json:"sum_sent"` + } `json:"end"` + Error string `json:"error,omitempty"` +} + +type IPerfInterval struct { + Sum struct { + Bytes int64 `json:"bytes"` + } `json:"sum"` +} + +func newVirtualMachine(name, namespace string, disk *v1alpha2.VirtualDisk, labels map[string]string) *v1alpha2.VirtualMachine { + cpuCount := 1 + coreFraction := "10%" + + return vm.New( + vm.WithName(name), + vm.WithNamespace(namespace), + vm.WithBootloader(v1alpha2.EFI), + vm.WithCPU(cpuCount, &coreFraction), + vm.WithMemory(*resource.NewQuantity(object.Mi256, resource.BinarySI)), + vm.WithDisks(disk), + vm.WithLiveMigrationPolicy(v1alpha2.AlwaysSafeMigrationPolicy), + vm.WithProvisioning(&v1alpha2.Provisioning{ + Type: v1alpha2.ProvisioningTypeUserData, + UserData: object.DefaultCloudInit, + }), + vm.WithLabels(labels), + ) +} + +func newVirtualDisk(name, namespace, image string, storageClass *string, labels map[string]string) *v1alpha2.VirtualDisk { + return vd.New( + vd.WithName(name), + vd.WithNamespace(namespace), + vd.WithStorageClass(storageClass), + vd.WithDataSourceHTTP(&v1alpha2.DataSourceHTTP{ + URL: image, + }), + vd.WithLabels(labels), + ) +} + +func WaitForVirtualMachine(ctx context.Context, namespace, name string, h EventHandler[*v1alpha2.VirtualMachine]) *v1alpha2.VirtualMachine { + GinkgoHelper() + + virtualMachine, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualMachines(namespace), h, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }) + Expect(err).NotTo(HaveOccurred()) + + return virtualMachine +} + +func WaitForVirtualDisk(ctx context.Context, namespace, name string, h EventHandler[*v1alpha2.VirtualDisk]) *v1alpha2.VirtualDisk { + GinkgoHelper() + + virtualDisk, err := WaitFor(ctx, framework.GetClients().VirtClient().VirtualDisks(namespace), h, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }) + Expect(err).NotTo(HaveOccurred()) + + return virtualDisk +} + +func CreateVirtualMachine(ctx context.Context, virtualMachine *v1alpha2.VirtualMachine) *v1alpha2.VirtualMachine { + GinkgoHelper() + + CreateResource(ctx, virtualMachine) + virtualMachine = WaitForVirtualMachine(ctx, virtualMachine.Namespace, virtualMachine.Name, func(_ watch.EventType, e *v1alpha2.VirtualMachine) (bool, error) { + return e.Status.Phase == v1alpha2.MachineRunning, nil + }) + + return virtualMachine +} + +func CreateVirtualDisk(ctx context.Context, virtualDisk *v1alpha2.VirtualDisk) *v1alpha2.VirtualDisk { + GinkgoHelper() + + CreateResource(ctx, virtualDisk) + virtualDisk = WaitForVirtualDisk(ctx, virtualDisk.Namespace, virtualDisk.Name, func(_ watch.EventType, e *v1alpha2.VirtualDisk) (bool, error) { + return e.Status.Phase == v1alpha2.DiskReady, nil + }) + + return virtualDisk +} diff --git a/tests/e2e/vm_migration_cancel_test.go b/tests/e2e/vm_migration_cancel_test.go index 21bc7a3756..bb424e97dc 100644 --- a/tests/e2e/vm_migration_cancel_test.go +++ b/tests/e2e/vm_migration_cancel_test.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/gomega" virtv1 "kubevirt.io/api/core/v1" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/d8" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -58,7 +58,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), framework.Comm AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ @@ -83,7 +83,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), framework.Comm Timeout: MaxWaitTimeout, }) - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} err := GetObjects(kc.ResourceVM, vms, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) Expect(err).NotTo(HaveOccurred()) @@ -112,7 +112,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), framework.Comm someCompleted := false Eventually(func() error { - vmops := &virtv2.VirtualMachineOperationList{} + vmops := &v1alpha2.VirtualMachineOperationList{} err := GetObjects(kc.ResourceVMOP, vmops, kc.GetOptions{Labels: testCaseLabel, Namespace: ns}) if err != nil { return err @@ -144,7 +144,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), framework.Comm for _, vmop := range vmops.Items { switch vmop.Status.Phase { - case virtv2.VMOPPhaseInProgress: + case v1alpha2.VMOPPhaseInProgress: _, readyToDelete := migrationReady[vmop.Name] if readyToDelete && vmop.GetDeletionTimestamp().IsZero() { @@ -157,7 +157,7 @@ var _ = Describe("VirtualMachineCancelMigration", SIGMigration(), framework.Comm return res.Error() } } - case virtv2.VMOPPhaseFailed, virtv2.VMOPPhaseCompleted: + case v1alpha2.VMOPPhaseFailed, v1alpha2.VMOPPhaseCompleted: someCompleted = true return nil } diff --git a/tests/e2e/vm_migration_test.go b/tests/e2e/vm_migration_test.go index 446429bfde..a2448f679f 100644 --- a/tests/e2e/vm_migration_test.go +++ b/tests/e2e/vm_migration_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -44,7 +44,7 @@ var _ = Describe("VirtualMachineMigration", SIGMigration(), framework.CommonE2ET AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -98,8 +98,8 @@ var _ = Describe("VirtualMachineMigration", SIGMigration(), framework.CommonE2ET Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -149,5 +149,5 @@ var _ = Describe("VirtualMachineMigration", SIGMigration(), framework.CommonE2ET func MigrateVirtualMachines(label map[string]string, vmNamespace string, vmNames ...string) { GinkgoHelper() - CreateAndApplyVMOPs(label, virtv2.VMOPTypeEvict, vmNamespace, vmNames...) + CreateAndApplyVMOPs(label, v1alpha2.VMOPTypeEvict, vmNamespace, vmNames...) } diff --git a/tests/e2e/vm_restore_force_test.go b/tests/e2e/vm_restore_force_test.go index bd700801dd..bab2d3cdae 100644 --- a/tests/e2e/vm_restore_force_test.go +++ b/tests/e2e/vm_restore_force_test.go @@ -24,10 +24,10 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" vmrestorecondition "github.com/deckhouse/virtualization/api/core/v1alpha2/vm-restore-condition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -48,7 +48,7 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo namespace string testCaseLabel = map[string]string{"testcase": "vm-restore-force"} additionalDiskLabel = map[string]string{"additionalDisk": "vm-restore-force"} - originalVMNetworks map[string][]virtv2.NetworksStatus + originalVMNetworks map[string][]v1alpha2.NetworksStatus criticalError string ) @@ -70,7 +70,7 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, namespace) } cancel() @@ -80,16 +80,16 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -118,8 +118,8 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo }) By("`VirtualMachineBlockDeviceAttachment` should be attached", func() { WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentKind, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentKind, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Labels: testCaseLabel, Namespace: namespace, @@ -161,11 +161,11 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo }) By("remembering the .status.networks of each VM after patching", func() { - vms := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vms := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) - originalVMNetworks = make(map[string][]virtv2.NetworksStatus, len(vms.Items)) + originalVMNetworks = make(map[string][]v1alpha2.NetworksStatus, len(vms.Items)) for _, vm := range vms.Items { originalVMNetworks[vm.Name] = vm.Status.Networks } @@ -175,11 +175,11 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo Context("When the resources are ready to use", func() { It("restore the `VirtualMachines` with `forced` mode", func() { - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} vmBlockDeviceCountBeforeSnapshotting := make(map[string]int, len(vms.Items)) By("Getting `VirtualMachines`", func() { - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms.Items { vmBlockDeviceCountBeforeSnapshotting[vm.Name] = len(vm.Status.BlockDeviceRefs) @@ -191,14 +191,14 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo vmsnapshot := NewVirtualMachineSnapshot( vm.Name, vm.Namespace, true, - virtv2.KeepIPAddressAlways, + v1alpha2.KeepIPAddressAlways, testCaseLabel, ) CreateResource(ctx, vmsnapshot) } WaitPhaseByLabel( - virtv2.VirtualMachineSnapshotResource, - string(virtv2.VirtualMachineSnapshotPhaseReady), + v1alpha2.VirtualMachineSnapshotResource, + string(v1alpha2.VirtualMachineSnapshotPhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -211,43 +211,43 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) CreateResource(ctx, newDisk) - newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, virtv2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) CreateResource(ctx, newVmbda) WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentResource, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Namespace: vm.Namespace, Labels: additionalDiskLabel, Timeout: LongWaitDuration, }) - err := GetObject(virtv2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) } }) By("Creating `VirtualMachineRestores`", func() { - vmsnapshots := &virtv2.VirtualMachineSnapshotList{} - err := GetObjects(virtv2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmsnapshots := &v1alpha2.VirtualMachineSnapshotList{} + err := GetObjects(v1alpha2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vmsnapshot := range vmsnapshots.Items { - vmrestore := NewVirtualMachineRestore(&vmsnapshot, virtv2.RestoreModeForced) + vmrestore := NewVirtualMachineRestore(&vmsnapshot, v1alpha2.RestoreModeForced) CreateResource(ctx, vmrestore) } - vmrestores := &virtv2.VirtualMachineRestoreList{} - err = GetObjects(virtv2.VirtualMachineRestoreResource, vmrestores, kc.GetOptions{Namespace: namespace}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err = GetObjects(v1alpha2.VirtualMachineRestoreResource, vmrestores, kc.GetOptions{Namespace: namespace}) Expect(err).NotTo(HaveOccurred()) // TODO: Remove this block when the bug with the virtual machine status phase "pending" is fixed. // Cause: When a virtual machine is in the restoration process, it can transition from the "stopped" phase to "pending" and the Virtualization Controller cannot complete the restoration process. for _, vmrestore := range vmrestores.Items { Eventually(func() error { - vmRestoreObj := &virtv2.VirtualMachineRestore{} - err := GetObject(virtv2.VirtualMachineRestoreResource, vmrestore.Name, vmRestoreObj, kc.GetOptions{Namespace: vmrestore.Namespace}) + vmRestoreObj := &v1alpha2.VirtualMachineRestore{} + err := GetObject(v1alpha2.VirtualMachineRestoreResource, vmrestore.Name, vmRestoreObj, kc.GetOptions{Namespace: vmrestore.Namespace}) if err != nil { return err } @@ -258,12 +258,12 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo } msg := "A virtual machine cannot be restored from the pending phase with `Forced` mode; you can delete the virtual machine and restore it with `Safe` mode." - if vmRestoreObj.Status.Phase == virtv2.VirtualMachineRestorePhaseFailed && readyCondition.Message == msg { + if vmRestoreObj.Status.Phase == v1alpha2.VirtualMachineRestorePhaseFailed && readyCondition.Message == msg { criticalError = "A bug has occurred with a virtual machine in the \"Pending\" phase." Skip(criticalError) } - if vmRestoreObj.Status.Phase != virtv2.VirtualMachineRestorePhaseReady { + if vmRestoreObj.Status.Phase != v1alpha2.VirtualMachineRestorePhaseReady { return fmt.Errorf("virtual machine restore status phase should be \"Ready\": actual status is %q", vmRestoreObj.Status.Phase) } @@ -297,17 +297,17 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo // testAnnotationValue = "test-annotation-value" // ) - vmrestores := &virtv2.VirtualMachineRestoreList{} - err := GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err := GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) @@ -315,9 +315,9 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo // Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name])) for _, bd := range vm.Status.BlockDeviceRefs { - if bd.Kind == virtv2.DiskDevice { - vd := &virtv2.VirtualDisk{} - err := GetObject(virtv2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) + if bd.Kind == v1alpha2.DiskDevice { + vd := &v1alpha2.VirtualDisk{} + err := GetObject(v1alpha2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vd.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) @@ -328,8 +328,8 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo } if bd.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{} - err := GetObject(virtv2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } @@ -344,17 +344,17 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo Skip("Module SDN is disabled. Skipping part of tests.") } - vmrestores := &virtv2.VirtualMachineRestoreList{} - err = GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err = GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) // Skip the network checks until the issue with the virtual machine's MAC address is fixed. // Cause: Sometimes, a virtual machine has a different MAC address after restoration, causing the test to fail. @@ -368,19 +368,19 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ { - Resource: virtv2.VirtualMachineSnapshotResource, + Resource: v1alpha2.VirtualMachineSnapshotResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualMachineRestoreResource, + Resource: v1alpha2.VirtualMachineRestoreResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, Labels: additionalDiskLabel, }, { - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, Labels: additionalDiskLabel, }, }, @@ -398,16 +398,16 @@ var _ = Describe("VirtualMachineRestoreForce", SIGRestoration(), framework.Commo func NewVirtualMachineSnapshot( vmName, vmNamespace string, requiredConsistency bool, - keepIPaddress virtv2.KeepIPAddress, + keepIPaddress v1alpha2.KeepIPAddress, labels map[string]string, -) *virtv2.VirtualMachineSnapshot { - return &virtv2.VirtualMachineSnapshot{ - ObjectMeta: v1.ObjectMeta{ +) *v1alpha2.VirtualMachineSnapshot { + return &v1alpha2.VirtualMachineSnapshot{ + ObjectMeta: metav1.ObjectMeta{ Name: vmName, Namespace: vmNamespace, Labels: labels, }, - Spec: virtv2.VirtualMachineSnapshotSpec{ + Spec: v1alpha2.VirtualMachineSnapshotSpec{ VirtualMachineName: vmName, RequiredConsistency: requiredConsistency, KeepIPAddress: keepIPaddress, @@ -415,30 +415,30 @@ func NewVirtualMachineSnapshot( } } -func NewVirtualMachineRestore(vmsnapshot *virtv2.VirtualMachineSnapshot, restoreMode virtv2.RestoreMode) *virtv2.VirtualMachineRestore { - return &virtv2.VirtualMachineRestore{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualMachineRestore(vmsnapshot *v1alpha2.VirtualMachineSnapshot, restoreMode v1alpha2.RestoreMode) *v1alpha2.VirtualMachineRestore { + return &v1alpha2.VirtualMachineRestore{ + ObjectMeta: metav1.ObjectMeta{ Name: vmsnapshot.Spec.VirtualMachineName, Namespace: vmsnapshot.Namespace, Labels: vmsnapshot.Labels, }, - Spec: virtv2.VirtualMachineRestoreSpec{ + Spec: v1alpha2.VirtualMachineRestoreSpec{ RestoreMode: restoreMode, VirtualMachineSnapshotName: vmsnapshot.Name, }, } } -func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, bdKind virtv2.VMBDAObjectRefKind, labels map[string]string) *virtv2.VirtualMachineBlockDeviceAttachment { - return &virtv2.VirtualMachineBlockDeviceAttachment{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, bdKind v1alpha2.VMBDAObjectRefKind, labels map[string]string) *v1alpha2.VirtualMachineBlockDeviceAttachment { + return &v1alpha2.VirtualMachineBlockDeviceAttachment{ + ObjectMeta: metav1.ObjectMeta{ Name: bdName, Namespace: vmNamespace, Labels: labels, }, - Spec: virtv2.VirtualMachineBlockDeviceAttachmentSpec{ + Spec: v1alpha2.VirtualMachineBlockDeviceAttachmentSpec{ VirtualMachineName: vmName, - BlockDeviceRef: virtv2.VMBDAObjectRef{ + BlockDeviceRef: v1alpha2.VMBDAObjectRef{ Kind: bdKind, Name: bdName, }, @@ -446,15 +446,15 @@ func NewVirtualMachineBlockDeviceAttachment(vmName, vmNamespace, bdName string, } } -func NewVirtualDisk(vdName, vdNamespace string, labels map[string]string, size *resource.Quantity) *virtv2.VirtualDisk { - return &virtv2.VirtualDisk{ - ObjectMeta: v1.ObjectMeta{ +func NewVirtualDisk(vdName, vdNamespace string, labels map[string]string, size *resource.Quantity) *v1alpha2.VirtualDisk { + return &v1alpha2.VirtualDisk{ + ObjectMeta: metav1.ObjectMeta{ Name: vdName, Namespace: vdNamespace, Labels: labels, }, - Spec: virtv2.VirtualDiskSpec{ - PersistentVolumeClaim: virtv2.VirtualDiskPersistentVolumeClaim{ + Spec: v1alpha2.VirtualDiskSpec{ + PersistentVolumeClaim: v1alpha2.VirtualDiskPersistentVolumeClaim{ Size: size, }, }, diff --git a/tests/e2e/vm_restore_safe_test.go b/tests/e2e/vm_restore_safe_test.go index 6399cb3fe8..6d775f366a 100644 --- a/tests/e2e/vm_restore_safe_test.go +++ b/tests/e2e/vm_restore_safe_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -46,7 +46,7 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common namespace string testCaseLabel = map[string]string{"testcase": "vm-restore-safe"} additionalDiskLabel = map[string]string{"additionalDisk": "vm-restore-safe"} - originalVMNetworks map[string][]virtv2.NetworksStatus + originalVMNetworks map[string][]v1alpha2.NetworksStatus ) BeforeAll(func() { @@ -64,7 +64,7 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, namespace) } cancel() @@ -74,16 +74,16 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common It("result should be succeeded", func() { if config.IsReusable() { err := CheckReusableResources(ReusableResources{ - virtv2.VirtualMachineResource: &Counter{ + v1alpha2.VirtualMachineResource: &Counter{ Expected: vmCount, }, - virtv2.VirtualDiskResource: &Counter{ + v1alpha2.VirtualDiskResource: &Counter{ Expected: vdCount, }, - virtv2.VirtualImageResource: &Counter{ + v1alpha2.VirtualImageResource: &Counter{ Expected: viCount, }, - virtv2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ + v1alpha2.VirtualMachineBlockDeviceAttachmentResource: &Counter{ Expected: vmbdaCount, }, }, kc.GetOptions{ @@ -142,11 +142,11 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common }) }) By("remembering the .status.networks of each VM after patching", func() { - vms := &virtv2.VirtualMachineList{} - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vms := &v1alpha2.VirtualMachineList{} + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) - originalVMNetworks = make(map[string][]virtv2.NetworksStatus, len(vms.Items)) + originalVMNetworks = make(map[string][]v1alpha2.NetworksStatus, len(vms.Items)) for _, vm := range vms.Items { originalVMNetworks[vm.Name] = vm.Status.Networks } @@ -156,11 +156,11 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common Context("When the resources are ready to use", func() { It("restore the `VirtualMachines` with `Safe` mode", func() { - vms := &virtv2.VirtualMachineList{} + vms := &v1alpha2.VirtualMachineList{} vmBlockDeviceCountBeforeSnapshotting := make(map[string]int, len(vms.Items)) By("Getting `VirtualMachines`", func() { - err := GetObjects(virtv2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + err := GetObjects(v1alpha2.VirtualMachineResource, vms, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vm := range vms.Items { vmBlockDeviceCountBeforeSnapshotting[vm.Name] = len(vm.Status.BlockDeviceRefs) @@ -172,14 +172,14 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common vmsnapshot := NewVirtualMachineSnapshot( vm.Name, vm.Namespace, true, - virtv2.KeepIPAddressAlways, + v1alpha2.KeepIPAddressAlways, testCaseLabel, ) CreateResource(ctx, vmsnapshot) } WaitPhaseByLabel( - virtv2.VirtualMachineSnapshotResource, - string(virtv2.VirtualMachineSnapshotPhaseReady), + v1alpha2.VirtualMachineSnapshotResource, + string(v1alpha2.VirtualMachineSnapshotPhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -192,18 +192,18 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common vdName := fmt.Sprintf("%s-%d", "vd-attached-after-vm-snapshotting", i) newDisk := NewVirtualDisk(vdName, vm.Namespace, additionalDiskLabel, resource.NewQuantity(1*1024*1024, resource.BinarySI)) CreateResource(ctx, newDisk) - newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, virtv2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) + newVmbda := NewVirtualMachineBlockDeviceAttachment(vm.Name, vm.Namespace, newDisk.Name, v1alpha2.VMBDAObjectRefKindVirtualDisk, additionalDiskLabel) CreateResource(ctx, newVmbda) WaitPhaseByLabel( - virtv2.VirtualMachineBlockDeviceAttachmentResource, - string(virtv2.BlockDeviceAttachmentPhaseAttached), + v1alpha2.VirtualMachineBlockDeviceAttachmentResource, + string(v1alpha2.BlockDeviceAttachmentPhaseAttached), kc.WaitOptions{ Namespace: vm.Namespace, Labels: additionalDiskLabel, Timeout: LongWaitDuration, }) - err := GetObject(virtv2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) + err := GetObject(v1alpha2.VirtualMachineKind, vm.Name, &vm, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name] + 1)) } @@ -221,49 +221,49 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common AllFlag: true, IgnoreNotFound: true, Namespace: namespace, - Resource: virtv2.VirtualMachineIPAddressResource, + Resource: v1alpha2.VirtualMachineIPAddressResource, }) Expect(result.Error()).NotTo(HaveOccurred(), result.GetCmd()) result = kubectl.Delete(kc.DeleteOptions{ ExcludedLabels: []string{"additionalDisk"}, Namespace: namespace, - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, }) Expect(result.Error()).NotTo(HaveOccurred(), result.GetCmd()) result = kubectl.Delete(kc.DeleteOptions{ Labels: testCaseLabel, Namespace: namespace, - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, }) Expect(result.Error()).NotTo(HaveOccurred(), result.GetCmd()) vmipls, err := GetVMIPLByNamespace(namespace) Expect(err).NotTo(HaveOccurred()) WaitResourcesByPhase( - vmipls, virtv2.VirtualMachineIPAddressLeaseResource, - string(virtv2.VirtualMachineIPAddressLeasePhaseReleased), + vmipls, v1alpha2.VirtualMachineIPAddressLeaseResource, + string(v1alpha2.VirtualMachineIPAddressLeasePhaseReleased), kc.WaitOptions{Timeout: ShortTimeout}, ) Eventually(func() error { - err := CheckResourceCount(virtv2.VirtualMachineResource, namespace, testCaseLabel, 0) + err := CheckResourceCount(v1alpha2.VirtualMachineResource, namespace, testCaseLabel, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualDiskResource, namespace, testCaseLabel, 0) + err = CheckResourceCount(v1alpha2.VirtualDiskResource, namespace, testCaseLabel, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualMachineIPAddressResource, namespace, map[string]string{}, 0) + err = CheckResourceCount(v1alpha2.VirtualMachineIPAddressResource, namespace, map[string]string{}, 0) if err != nil { return err } - err = CheckResourceCount(virtv2.VirtualMachineBlockDeviceAttachmentResource, namespace, testCaseLabel, 0) + err = CheckResourceCount(v1alpha2.VirtualMachineBlockDeviceAttachmentResource, namespace, testCaseLabel, 0) if err != nil { return err } @@ -273,17 +273,17 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common }) By("Creating `VirtualMachineRestores`", func() { - vmsnapshots := &virtv2.VirtualMachineSnapshotList{} - err := GetObjects(virtv2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmsnapshots := &v1alpha2.VirtualMachineSnapshotList{} + err := GetObjects(v1alpha2.VirtualMachineSnapshotResource, vmsnapshots, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, vmsnapshot := range vmsnapshots.Items { - vmrestore := NewVirtualMachineRestore(&vmsnapshot, virtv2.RestoreModeSafe) + vmrestore := NewVirtualMachineRestore(&vmsnapshot, v1alpha2.RestoreModeSafe) CreateResource(ctx, vmrestore) } WaitPhaseByLabel( - virtv2.VirtualMachineRestoreResource, - string(virtv2.VirtualMachineRestorePhaseReady), + v1alpha2.VirtualMachineRestoreResource, + string(v1alpha2.VirtualMachineRestorePhaseReady), kc.WaitOptions{ Namespace: namespace, Labels: testCaseLabel, @@ -305,26 +305,26 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common // testAnnotationValue = "test-annotation-value" // ) - vmrestores := &virtv2.VirtualMachineRestoreList{} - err := GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err := GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vm.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) Expect(vm.Status.BlockDeviceRefs).To(HaveLen(vmBlockDeviceCountBeforeSnapshotting[vm.Name])) for _, bd := range vm.Status.BlockDeviceRefs { - if bd.Kind == virtv2.DiskDevice { - vd := &virtv2.VirtualDisk{} - err := GetObject(virtv2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) + if bd.Kind == v1alpha2.DiskDevice { + vd := &v1alpha2.VirtualDisk{} + err := GetObject(v1alpha2.VirtualDiskKind, bd.Name, vd, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vd.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) @@ -335,8 +335,8 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common } if bd.VirtualMachineBlockDeviceAttachmentName != "" { - vmbda := &virtv2.VirtualMachineBlockDeviceAttachment{} - err := GetObject(virtv2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) + vmbda := &v1alpha2.VirtualMachineBlockDeviceAttachment{} + err := GetObject(v1alpha2.VirtualMachineBlockDeviceAttachmentKind, bd.VirtualMachineBlockDeviceAttachmentName, vmbda, kc.GetOptions{Namespace: vm.Namespace}) Expect(err).NotTo(HaveOccurred()) Expect(vmbda.Annotations).To(HaveKeyWithValue(annotations.AnnVMRestore, string(restore.UID))) } @@ -351,17 +351,17 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common Skip("Module SDN is disabled. Skipping part of tests.") } - vmrestores := &virtv2.VirtualMachineRestoreList{} - err = GetObjects(virtv2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) + vmrestores := &v1alpha2.VirtualMachineRestoreList{} + err = GetObjects(v1alpha2.VirtualMachineRestoreKind, vmrestores, kc.GetOptions{Namespace: namespace, Labels: testCaseLabel}) Expect(err).NotTo(HaveOccurred()) for _, restore := range vmrestores.Items { - vmsnapshot := &virtv2.VirtualMachineSnapshot{} - err := GetObject(virtv2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) + vmsnapshot := &v1alpha2.VirtualMachineSnapshot{} + err := GetObject(v1alpha2.VirtualMachineSnapshotKind, restore.Spec.VirtualMachineSnapshotName, vmsnapshot, kc.GetOptions{Namespace: restore.Namespace}) Expect(err).NotTo(HaveOccurred()) - vm := &virtv2.VirtualMachine{} - err = GetObject(virtv2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) + vm := &v1alpha2.VirtualMachine{} + err = GetObject(v1alpha2.VirtualMachineKind, vmsnapshot.Spec.VirtualMachineName, vm, kc.GetOptions{Namespace: vmsnapshot.Namespace}) Expect(err).NotTo(HaveOccurred()) // Skip the network checks until the issue with the virtual machine's MAC address is fixed. // Cause: Sometimes, a virtual machine has a different MAC address after restoration, causing the test to fail. @@ -375,19 +375,19 @@ var _ = Describe("VirtualMachineRestoreSafe", SIGRestoration(), framework.Common resourcesToDelete := ResourcesToDelete{ AdditionalResources: []AdditionalResource{ { - Resource: virtv2.VirtualMachineSnapshotResource, + Resource: v1alpha2.VirtualMachineSnapshotResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualMachineRestoreResource, + Resource: v1alpha2.VirtualMachineRestoreResource, Labels: testCaseLabel, }, { - Resource: virtv2.VirtualDiskResource, + Resource: v1alpha2.VirtualDiskResource, Labels: additionalDiskLabel, }, { - Resource: virtv2.VirtualMachineBlockDeviceAttachmentResource, + Resource: v1alpha2.VirtualMachineBlockDeviceAttachmentResource, Labels: additionalDiskLabel, }, }, @@ -425,8 +425,8 @@ func CheckResourceCount(resource, namespace string, labels map[string]string, co } func GetVMIPLByNamespace(namespace string) ([]string, error) { - vmipls := &virtv2.VirtualMachineIPAddressLeaseList{} - err := GetObjects(virtv2.VirtualMachineIPAddressLeaseResource, vmipls, kc.GetOptions{}) + vmipls := &v1alpha2.VirtualMachineIPAddressLeaseList{} + err := GetObjects(v1alpha2.VirtualMachineIPAddressLeaseResource, vmipls, kc.GetOptions{}) if err != nil { return nil, err } diff --git a/tests/e2e/vm_version_test.go b/tests/e2e/vm_version_test.go index 2c3077ac93..903a04d3ff 100644 --- a/tests/e2e/vm_version_test.go +++ b/tests/e2e/vm_version_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" kc "github.com/deckhouse/virtualization/tests/e2e/kubectl" @@ -49,7 +49,7 @@ var _ = Describe("VirtualMachineVersions", framework.CommonE2ETestDecorators(), AfterEach(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -87,7 +87,7 @@ var _ = Describe("VirtualMachineVersions", framework.CommonE2ETestDecorators(), Context("When virtual machines are ready:", func() { Eventually(func() error { - var vms virtv2.VirtualMachineList + var vms v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vms, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, diff --git a/tests/e2e/vm_vpc_test.go b/tests/e2e/vm_vpc_test.go index 54d5852be9..4a6cd4a694 100644 --- a/tests/e2e/vm_vpc_test.go +++ b/tests/e2e/vm_vpc_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - virtv2 "github.com/deckhouse/virtualization/api/core/v1alpha2" + "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vmcondition" "github.com/deckhouse/virtualization/tests/e2e/config" "github.com/deckhouse/virtualization/tests/e2e/framework" @@ -59,7 +59,7 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), fr AfterAll(func() { if CurrentSpecReport().Failed() { - SaveTestResources(testCaseLabel, CurrentSpecReport().LeafNodeText) + SaveTestCaseDump(testCaseLabel, CurrentSpecReport().LeafNodeText, ns) } }) @@ -123,8 +123,8 @@ var _ = Describe("VirtualMachineAdditionalNetworkInterfaces", SIGMigration(), fr Context("When VMs migrations are applied", func() { It("checks VMs and VMOPs phases", func() { - By(fmt.Sprintf("VMOPs should be in %s phases", virtv2.VMOPPhaseCompleted)) - WaitPhaseByLabel(kc.ResourceVMOP, string(virtv2.VMOPPhaseCompleted), kc.WaitOptions{ + By(fmt.Sprintf("VMOPs should be in %s phases", v1alpha2.VMOPPhaseCompleted)) + WaitPhaseByLabel(kc.ResourceVMOP, string(v1alpha2.VMOPPhaseCompleted), kc.WaitOptions{ Labels: testCaseLabel, Namespace: ns, Timeout: MaxWaitTimeout, @@ -196,7 +196,7 @@ func isSdnModuleEnabled() (bool, error) { } func CheckVMConnectivityToTargetIPs(ns string, testCaseLabel map[string]string) { - var vmList virtv2.VirtualMachineList + var vmList v1alpha2.VirtualMachineList err := GetObjects(kc.ResourceVM, &vmList, kc.GetOptions{ Labels: testCaseLabel, Namespace: ns, diff --git a/tests/performance/.helmignore b/tests/performance/.helmignore index f45ef12c06..db4e51df6b 100644 --- a/tests/performance/.helmignore +++ b/tests/performance/.helmignore @@ -4,4 +4,4 @@ # Match any file or path named .git .git -status-access-vms/ \ No newline at end of file +tools/ \ No newline at end of file diff --git a/tests/performance/README.md b/tests/performance/README.md new file mode 100644 index 0000000000..430c268845 --- /dev/null +++ b/tests/performance/README.md @@ -0,0 +1,447 @@ +- [Performance Testing Framework](#performance-testing-framework) + - [🏗️ Architecture](#️-architecture) + - [Core Components](#core-components) + - [Directory Structure](#directory-structure) + - [🚀 Quick Start](#-quick-start) + - [Prerequisites](#prerequisites) + - [Install Dependencies](#install-dependencies) + - [Create Test Resources](#create-test-resources) + - [Remove Resources](#remove-resources) + - [🛠️ Tools](#️-tools) + - [bootstrap](#bootstrap) + - [Taskfile Integration](#taskfile-integration) + - [Shatal - VM Wobbling Tool](#shatal---vm-wobbling-tool) + - [Evicter - Migration Tool](#evicter---migration-tool) + - [Statistics - Statistics Collection](#statistics---statistics-collection) + - [📊 Monitoring](#-monitoring) + - [Grafana Dashboards](#grafana-dashboards) + - [Prometheus Rules](#prometheus-rules) + - [⚙️ Configuration](#️-configuration) + - [values.yaml](#valuesyaml) + - [Resource Types](#resource-types) + - [🎯 Testing Scenarios](#-testing-scenarios) + - [1. Basic Performance Testing](#1-basic-performance-testing) + - [2. Migration Testing](#2-migration-testing) + - [3. VM Access Testing](#3-vm-access-testing) + - [📈 Metrics and Monitoring](#-metrics-and-monitoring) + - [Key Metrics](#key-metrics) + - [Dashboards](#dashboards) + - [🔧 Development](#-development) + - [Building Tools](#building-tools) + - [Adding New Tests](#adding-new-tests) + - [📝 Usage Examples](#-usage-examples) + - [Creating Test Environment](#creating-test-environment) + - [Resource Cleanup](#resource-cleanup) + - [🤝 Contributing](#-contributing) + - [📄 License](#-license) + +# Performance Testing Framework + +A comprehensive framework for virtualization performance testing, including tools for creating, migrating, and monitoring virtual machines in Kubernetes. + +## 🏗️ Architecture + +### Core Components + +- **Helm Chart**: Resource management through Helm +- **bootstrap**: Main script for creating/deleting test resources +- **Shatal**: Virtual machine "wobbling" tool +- **Evicter**: Continuous VM migration tool +- **Statistics**: Performance statistics collection +- **Monitoring**: Grafana dashboards and Prometheus rules + +### Directory Structure + +``` +performance/ +├── templates/ # Kubernetes manifests +├── tools/ # Testing tools +│ ├── evicter/ # VM migration +│ ├── shatal/ # VM migration tool via node drain +│ ├── statistic/ # Statistics collection +│ └── status-access-vms/ # VM access and monitoring +├── monitoring/ # Grafana dashboards +├── ssh/ # SSH keys for VM access +├── bootstrap.sh # Main script +├── values.yaml # Configuration +└── Taskfile.yaml # Task automation +``` + +## 🚀 Quick Start + +### Prerequisites + +- Kubernetes cluster with virtualization support +- Helm 3 +- kubectl +- Go (for building tools) + +### Install Dependencies + +```bash +task check_or_install_software +``` + +### Create Test Resources + +```bash +# Create 10 virtual machines +task apply COUNT=10 + +# Create only disks +task apply:disks COUNT=5 + +# Create only VMs +task apply:vms COUNT=5 +``` + +### Remove Resources + +```bash +# Remove all resources +task destroy + +# Remove only VMs +task destroy:vms + +# Remove only disks +task destroy:disks +``` + +## 🛠️ Tools + +### bootstrap + +Main script for managing test resources. + +**Available Flags:** +- `--count, -c`: Number of virtual machines to create (required for apply) +- `--namespace, -n`: Namespace for resources (default: current context namespace) +- `--storage-class, -s`: Storage class for VM disks +- `--name, -r`: Release name (default: performance) +- `--resources, -R`: Resources to manage - 'vds', 'vms', or 'all' (default: all) +- `--resources-prefix, -p`: Prefix for resource names (default: performance) + +```bash +# Create resources (using long flags) +./bootstrap.sh apply --count=10 --namespace=perf --storage-class=ceph-pool-r2-csi-rbd + +# Create resources (using short flags) +./bootstrap.sh apply -c 10 -n perf -s ceph-pool-r2-csi-rbd + +# Create only disks +./bootstrap.sh apply -c 5 -n perf -R vds -r performance-disks + +# Create only VMs (assuming disks exist) +./bootstrap.sh apply -c 5 -n perf -R vms -r performance-vms + +# Remove resources +./bootstrap.sh destroy --namespace=perf --resources=all +# or using short flags +./bootstrap.sh destroy -n perf -R all + +# Remove specific resources +./bootstrap.sh destroy -n perf -R vms -r performance-vms +``` + +### Taskfile Integration + +The framework includes comprehensive Taskfile integration for easy automation: + +**Available Tasks:** +```bash +# Basic operations +task apply COUNT=10 # Create 10 VMs +task destroy # Remove all resources +task apply:disks COUNT=5 # Create only disks +task apply:vms COUNT=5 # Create only VMs +task destroy:disks # Remove only disks +task destroy:vms # Remove only VMs + +# Two-step deployment +task apply:all COUNT=30 # Create disks first, then VMs +task destroy:all # Remove VMs first, then disks + +# Utility tasks +task render # Preview Helm templates +task help # Show bootstrap.sh help +task check_or_install_software # Install dependencies +``` + +**Environment Variables:** +```bash +# Set custom values +COUNT=50 NAMESPACE=test STORAGE_CLASS=ceph-pool-r2-csi-rbd task apply +``` + +### Shatal - VM Wobbling Tool + +Tool for continuous stress testing of virtual machines. + +**Features:** +- Node draining with VM migration +- CPU core fraction changes (10% ↔ 25%) +- VM creation/deletion +- Configurable operation weights + +**Usage:** +```bash +cd tools/shatal +KUBECONFIG=$(cat ~/.kube/config | base64 -w 0) +KUBECONFIG_BASE64=$KUBECONFIG task run +``` + +### Evicter - Migration Tool + +Continuous migration of a specified percentage of virtual machines. + +```bash +# Migrate 20% of VMs in namespace 'perf' for 1 hour +./evicter --target=20 --duration=1h --ns=perf +``` + +### Statistics - Statistics Collection + +```bash +cd tools/statistic +task run +``` + +## 📊 Monitoring + +### Grafana Dashboards + +The monitoring directory contains pre-configured Grafana dashboards: + +- **virtualization-dashboard.yaml**: General virtualization statistics +- **virtual-machine-dashboard.yaml**: Detailed VM statistics +- **ceph-dashboard.yaml**: Storage monitoring + +### SSH Access + +The `ssh/` directory contains SSH keys for VM access: +- `id_ed`: Private SSH key +- `id_ed.pub`: Public SSH key + +### Prometheus Rules + +Configured rules for performance monitoring and alerts. + +## ⚙️ Configuration + +### values.yaml + +Main configuration parameters: + +```yaml +# Number of resources +count: 1 + +# Resources to create +resources: + default: all # all, vms, vds, vi + prefix: "performance" + storageClassName: "ceph-pool-r2-csi-rbd" + + # VM configuration + vm: + runPolicy: AlwaysOnUnlessStoppedManually + restartApprovalMode: Dynamic + spec: + cpu: + cores: 1 + coreFraction: 10% + memory: + size: 256Mi + + # Virtual disk configuration + vd: + spec: + type: vd # vi or vd + diskSize: 300Mi + + # Virtual image configuration + vi: + spec: + type: vi # vi or pvc + baseImage: + name: alpine + url: "https://example.com/alpine.qcow2" +``` + +### Resource Types + +**VirtualDisk (vd.spec.type):** +- `vi`: creates VMs with VirtualImage in blockDeviceRefs +- `vd`: creates VMs with corresponding VirtualDisk + +**VirtualImage (vi.spec.type):** +- `vi`: creates image through ContainerRegistry +- `pvc`: creates image through PersistentVolumeClaim + +## 🎯 Testing Scenarios + +### 1. Basic Performance Testing + +```bash +# Create 100 VMs for load testing +task apply COUNT=100 + +# Start statistics collection +cd tools/statistic && task run + +# Start wobbling tool +cd tools/shatal && task run +``` + +### 2. Migration Testing + +```bash +# Start continuous migration of 30% VMs +cd tools/evicter +go run cmd/main.go --target=30 --duration=2h +``` + +### 3. VM Access Testing + +```bash +# Configure VM access through Ansible +cd tools/status-access-vms/ansible +task run + +# Start load testing +cd tools/status-access-vms/tank +task run +``` + +## 📈 Metrics and Monitoring + +### Key Metrics + +- VM creation time +- VM migration time +- Resource usage (CPU, memory, disk) +- VM availability +- Storage performance + +### Dashboards + +All dashboards are automatically deployed when creating resources and are available in Grafana. + +## 🔧 Development + +### Building Tools + +```bash +# Build evicter +cd tools/evicter +go build -o evicter cmd/main.go + +# Build shatal +cd tools/shatal +go build -o shatal cmd/shatal/main.go + +# Build statistic +cd tools/statistic +go build -o stat cmd/stat/main.go +``` + +### Adding New Tests + +1. Create a new template in `templates/` +2. Add configuration to `values.yaml` +3. Update `bootstrap.sh` if necessary +4. Add tasks to `Taskfile.yaml` + +## 📝 Usage Examples + +### Creating Test Environment + +```bash +# 1. Create namespace +kubectl create namespace perf + +# 2. Create 50 VMs with disks +task apply COUNT=50 NAMESPACE=perf + +# 3. Start monitoring +cd tools/statistic && task run + +# 4. Start stress testing +cd tools/shatal && task run +``` + +### Resource Cleanup + +```bash +# Remove all resources from namespace +task destroy NAMESPACE=perf +``` + +## 🔧 Troubleshooting + +### Common Issues + +**1. Helm Template Errors** +```bash +# If you get template errors, check the values structure +helm template test . --values values.yaml + +# Debug with verbose output +task apply COUNT=1 --verbose +``` + +**2. Resource Conflicts** +```bash +# If resources are stuck in terminating state +kubectl delete virtualmachines --all -n perf --force --grace-period=0 +kubectl delete virtualdisks --all -n perf --force --grace-period=0 + +# Clean up secrets +kubectl delete secrets --all -n perf +``` + +**3. Namespace Issues** +```bash +# Check current namespace +kubectl config view --minify -o jsonpath='{..namespace}' + +# Switch to correct namespace +kubectl config set-context --current --namespace=perf +``` + +**4. Storage Class Issues** +```bash +# List available storage classes +kubectl get storageclass + +# Use correct storage class +task apply COUNT=5 STORAGE_CLASS=ceph-pool-r2-csi-rbd +``` + +### Debug Commands + +```bash +# Check Helm releases +helm list -n perf + +# Check resource status +kubectl get all -n perf +kubectl get virtualmachines -n perf +kubectl get virtualdisks -n perf + +# Check logs +kubectl logs -n perf -l app=performance +``` + +## 🤝 Contributing + +1. Fork the repository +2. Create a branch for new feature +3. Make changes +4. Add tests +5. Create Pull Request + +## 📄 License + +Copyright 2024 Flant JSC. Licensed under the Apache License, Version 2.0. \ No newline at end of file diff --git a/tests/performance/Taskfile.yaml b/tests/performance/Taskfile.yaml index b74a1b4e7c..2e05b3d7a5 100644 --- a/tests/performance/Taskfile.yaml +++ b/tests/performance/Taskfile.yaml @@ -6,40 +6,58 @@ silent: true includes: shatal: - taskfile: ./shatal - dir: ./shatal + taskfile: tools/shatal + dir: tools/shatal tank: - taskfile: status-access-vms/tank/Taskfile.tank.yaml - dir: status-access-vms/tank + taskfile: tools/status-access-vms/tank/Taskfile.tank.yaml + dir: tools/status-access-vms/tank optional: true ansible: - taskfile: status-access-vms/ansible/Taskfile.ansible.yaml - dir: status-access-vms/ansible + taskfile: tools/status-access-vms/ansible/Taskfile.ansible.yaml + dir: tools/status-access-vms/ansible + optional: true + statistic: + taskfile: tools/statistic/Taskfile.yaml + dir: tools/statistic + optional: true + evicter: + taskfile: tools/evicter/Taskfile.yaml + dir: tools/evicter optional: true vars: COUNT: '{{ .COUNT | default "1" }}' - NAMESPACE: "{{ .NAMESPACE }}" - STORAGE_CLASS: "{{ .STORAGE_CLASS }}" + NAMESPACE: '{{ .NAMESPACE | default "perf" }}' + STORAGE_CLASS: '{{ .STORAGE_CLASS | default "linstor-thin-r1" }}' RESOURCES: '{{ .RESOURCES | default "all" }}' NAME_PREFIX: '{{ .NAME_PREFIX | default "performance" }}' RESOURCES_PREFIX: '{{ .RESOURCES_PREFIX | default "performance" }}' tasks: + tst: + cmds: + - echo "{{ .STORAGE_CLASS }}" + + render: + desc: "Render templates with default values" + cmds: + - | + helm template test . --values values.yaml + help: - desc: "Help about bootstrapper.sh." + desc: "Help about bootstrap.sh." cmds: - - ./bootstrapper.sh --help + - ./bootstrap.sh --help apply: desc: "Apply disks and virtual machines." cmds: - - ./bootstrapper.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --storage-class="{{ .STORAGE_CLASS }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="{{ .RESOURCES }}" --name="{{ .NAME_PREFIX }}" + - ./bootstrap.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --storage-class="{{ .STORAGE_CLASS }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="{{ .RESOURCES }}" --name="{{ .NAME_PREFIX }}" destroy: desc: "Destroy disks and virtual machines." cmds: - - ./bootstrapper.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="{{ .RESOURCES }}" --name="{{ .NAME_PREFIX }}" + - ./bootstrap.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="{{ .RESOURCES }}" --name="{{ .NAME_PREFIX }}" apply:all: desc: "Apply disks and virtual machines in two steps (in two different releases)." @@ -75,22 +93,22 @@ tasks: apply:disks: desc: "Apply virtual machine disks." cmds: - - ./bootstrapper.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --storage-class="{{ .STORAGE_CLASS }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="disks" --name="{{ .NAME_PREFIX }}-disks" + - ./bootstrap.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --storage-class="{{ .STORAGE_CLASS }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vds" --name="{{ .NAME_PREFIX }}-disks" apply:vms: desc: "Apply virtual machines." cmds: - - ./bootstrapper.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vms" --name="{{ .NAME_PREFIX }}-vms" + - ./bootstrap.sh apply --count="{{ .COUNT }}" --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vms" --name="{{ .NAME_PREFIX }}-vms" destroy:disks: desc: "Destroy disks." cmds: - - ./bootstrapper.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="disks" --name="{{ .NAME_PREFIX }}-disks" + - ./bootstrap.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vds" --name="{{ .NAME_PREFIX }}-disks" destroy:vms: desc: "Destroy virtual machines." cmds: - - ./bootstrapper.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vms" --name="{{ .NAME_PREFIX }}-vms" + - ./bootstrap.sh destroy --namespace="{{ .NAMESPACE }}" --resources-prefix="{{ .RESOURCES_PREFIX }}" --resources="vms" --name="{{ .NAME_PREFIX }}-vms" check_or_install_software: desc: "Check and install Helm3, Ansible, and K9s" diff --git a/tests/performance/bootstrapper.sh b/tests/performance/bootstrap.sh similarity index 60% rename from tests/performance/bootstrapper.sh rename to tests/performance/bootstrap.sh index 8a2aebb795..c8376d55f4 100755 --- a/tests/performance/bootstrapper.sh +++ b/tests/performance/bootstrap.sh @@ -21,22 +21,24 @@ Usage: $(basename "$0") COMMAND OPTIONS Commands: apply Apply virtual machines. Arguments: - (Required) --count: count of virtual machines to create. - (Optional) --namespace: namespace for virtual machines. If not defined - using default namespace. - (Optional) --storage-class: storage-class for virtual machine disks. If not defined - using default SC. - (Optional) --resources-prefix (default: performance): prefix to be used fo resource names. + (Required) --count, -c: count of virtual machines to create. + (Optional) --namespace, -n: namespace for virtual machines. If not defined - using default namespace. + (Optional) --storage-class, -s: storage-class for virtual machine disks. If not defined - using default SC. + (Optional) --resources-prefix, -p (default: performance): prefix to be used for resource names. --- destroy Destroy set of virtual machines. Global Arguments: - --name (default: performance): name for release of virtual machine. - --resources: (default: 'all'): resources to manage. Possible values: 'disks', 'vms' or 'all'. + --name, -r (default: performance): name for release of virtual machine. + --resources, -R (default: 'all'): resources to manage. Possible values: 'vds', 'vms' or 'all'. Examples: Bootstrap: $(basename "$0") apply --count=1 - $(basename "$0") apply --resources=disks --count=1 --namespace=default --storage-class=default - $(basename "$0") destroy --resources=disks --namespace=default + $(basename "$0") apply -c 1 -n default -s ceph-pool-r2-csi-rbd + $(basename "$0") apply --resources=vds --count=1 --namespace=default --storage-class=default + $(basename "$0") destroy --resources=vds --namespace=default + $(basename "$0") destroy -R vds -n default EOF } @@ -45,8 +47,8 @@ function handle_exit() { } function validate_global_args() { - if [ "${RESOURCES}" != "all" ] && [ "${RESOURCES}" != "vms" ] && [ "${RESOURCES}" != "disks" ]; then - echo "ERROR: Invalid --resources flag: allowed values 'disks', 'vms' or 'all'" + if [ "${RESOURCES}" != "all" ] && [ "${RESOURCES}" != "vms" ] && [ "${RESOURCES}" != "vds" ]; then + echo "ERROR: Invalid --resources flag: allowed values 'vds', 'vms' or 'all'" usage exit 1 fi @@ -69,9 +71,9 @@ function validate_apply_args() { function apply() { echo "Apply resources: ${RESOURCES}" - args=( upgrade --install "${RELEASE_NAME}" . -n "${NAMESPACE}" --create-namespace --set "count=${COUNT}" --set "resourcesPrefix=${RESOURCES_PREFIX}" --set "resources=${RESOURCES}" ) + args=( upgrade --install "${RELEASE_NAME}" . -n "${NAMESPACE}" --create-namespace --set "count=${COUNT}" --set "resourcesPrefix=${RESOURCES_PREFIX}" --set "resources.default=${RESOURCES}" ) if [ -n "${STORAGE_CLASS}" ]; then - args+=( --set "storageClass=${STORAGE_CLASS}" ) + args+=( --set "resources.storageClassName=${STORAGE_CLASS}" ) fi helm "${args[@]}" @@ -80,27 +82,10 @@ function apply() { function destroy() { echo "Delete resources: ${RESOURCES}" + echo "$(date +"%Y-%m-%d %H:%M:%S") - Deleting release [${RELEASE_NAME}]" helm uninstall "${RELEASE_NAME}" -n "${NAMESPACE}" + echo "$(date +"%Y-%m-%d %H:%M:%S") - Release [${RELEASE_NAME}] was deleted" - case "${RESOURCES}" in - "all") - kubectl wait -n "${NAMESPACE}" --for=delete vm -l vm="${RESOURCES_PREFIX}" - kubectl wait -n "${NAMESPACE}" --for=delete vd -l vm="${RESOURCES_PREFIX}" - kubectl wait -n "${NAMESPACE}" --for=delete vi -l vm="${RESOURCES_PREFIX}" - ;; - "disks") - kubectl wait -n "${NAMESPACE}" --for=delete vd -l vm="${RESOURCES_PREFIX}" - kubectl wait -n "${NAMESPACE}" --for=delete vi -l vm="${RESOURCES_PREFIX}" - ;; - "vms") - kubectl wait -n "${NAMESPACE}" --for=delete vm -l vm="${RESOURCES_PREFIX}" - ;; - *) - echo "ERROR: Invalid argument" - usage - exit 1 - ;; - esac } if [ "$#" -eq 0 ] || [ "${1}" == "--help" ] ; then @@ -120,30 +105,54 @@ shift # Set naming variable while [[ $# -gt 0 ]]; do case "$1" in - --count=*) + --count=*|-c=*) COUNT="${1#*=}" shift ;; - --namespace=*) + -c) + COUNT="$2" + shift 2 + ;; + --namespace=*|-n=*) NAMESPACE="${1#*=}" shift ;; - --storage-class=*) + -n) + NAMESPACE="$2" + shift 2 + ;; + --storage-class=*|-s=*) STORAGE_CLASS="${1#*=}" shift ;; - --name=*) + -s) + STORAGE_CLASS="$2" + shift 2 + ;; + --name=*|-r=*) RELEASE_NAME="${1#*=}" shift ;; - --resources=*) + -r) + RELEASE_NAME="$2" + shift 2 + ;; + --resources=*|-R=*) RESOURCES="${1#*=}" shift ;; - --resources-prefix=*) + -R) + RESOURCES="$2" + shift 2 + ;; + --resources-prefix=*|-p=*) RESOURCES_PREFIX="${1#*=}" shift ;; + -p) + RESOURCES_PREFIX="$2" + shift 2 + ;; *) echo "ERROR: Invalid argument: $1" usage diff --git a/tests/performance/ceph-dashboard.yaml b/tests/performance/monitoring/ceph-dashboard.yaml similarity index 100% rename from tests/performance/ceph-dashboard.yaml rename to tests/performance/monitoring/ceph-dashboard.yaml diff --git a/tests/performance/virtual-machine-dashboard.yaml b/tests/performance/monitoring/virtual-machine-dashboard.yaml similarity index 100% rename from tests/performance/virtual-machine-dashboard.yaml rename to tests/performance/monitoring/virtual-machine-dashboard.yaml diff --git a/tests/performance/virtualization-dashboard.yaml b/tests/performance/monitoring/virtualization-dashboard.yaml similarity index 75% rename from tests/performance/virtualization-dashboard.yaml rename to tests/performance/monitoring/virtualization-dashboard.yaml index d1f69f1d50..96c01f5ad7 100644 --- a/tests/performance/virtualization-dashboard.yaml +++ b/tests/performance/monitoring/virtualization-dashboard.yaml @@ -187,7 +187,7 @@ spec: "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (phase) ((kube_pod_status_phase{} * on (pod) group_left(label_vm_kubevirt_internal_virtualization_deckhouse_io_name) kube_pod_labels{label_vm_kubevirt_internal_virtualization_deckhouse_io_name!=\"\"}))", + "expr": "sum by (phase) ((kube_pod_status_phase{namespace=~\"$namespace\"} * on (pod) group_left(label_vm_kubevirt_internal_virtualization_deckhouse_io_name) kube_pod_labels{label_vm_kubevirt_internal_virtualization_deckhouse_io_name!=\"\"}))", "hide": false, "instant": false, "legendFormat": "{{phase}}", @@ -442,7 +442,7 @@ spec: "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (phase) ((kube_pod_status_phase{} * on (pod) group_left(label_vm_kubevirt_internal_virtualization_deckhouse_io_name) kube_pod_labels{label_vm_kubevirt_internal_virtualization_deckhouse_io_name!=\"\"})==1)", + "expr": "sum by (phase) ((kube_pod_status_phase{namespace=~\"$namespace\"} * on (pod) group_left(label_vm_kubevirt_internal_virtualization_deckhouse_io_name) kube_pod_labels{label_vm_kubevirt_internal_virtualization_deckhouse_io_name!=\"\"})==1)", "hide": false, "instant": false, "legendFormat": "__auto", @@ -540,7 +540,7 @@ spec: "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (label_kubevirt_internal_virtualization_deckhouse_io_node_name) ((kube_pod_status_phase{} * on (pod) group_left(label_kubevirt_internal_virtualization_deckhouse_io_node_name) kube_pod_labels{label_kubevirt_internal_virtualization_deckhouse_io_node_name!=\"\"})==1)", + "expr": "sum by (label_kubevirt_internal_virtualization_deckhouse_io_node_name) ((kube_pod_status_phase{namespace=~\"$namespace\"} * on (pod) group_left(label_kubevirt_internal_virtualization_deckhouse_io_node_name) kube_pod_labels{label_kubevirt_internal_virtualization_deckhouse_io_node_name!=\"\"})==1)", "hide": false, "instant": false, "legendFormat": "__auto", @@ -873,7 +873,8 @@ spec: "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "light-red", @@ -1116,7 +1117,8 @@ spec: "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1242,7 +1244,8 @@ spec: "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1342,7 +1345,7 @@ spec: "type": "table" }, { - "collapsed": true, + "collapsed": false, "gridPos": { "h": 1, "w": 24, @@ -1350,629 +1353,632 @@ spec: "y": 42 }, "id": 42, - "panels": [ + "panels": [], + "title": "VirtualDIsks", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${ds_prometheus}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "always", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 46, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.5.13", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${ds_prometheus}" }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false + "editorMode": "code", + "exemplar": false, + "expr": "count(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\"}==1) by (phase)", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ phase }}", + "range": true, + "refId": "F" + } + ], + "title": "Count VirtualDISK Phases", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "left", + "axisSoftMax": 7, + "axisSoftMin": -1, + "axisWidth": 125, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 3, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "area" + } + }, + "mappings": [ + { + "options": { + "0": { + "color": "red", + "index": 1, + "text": "Unknown" }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineStyle": { - "fill": "solid" + "1": { + "color": "red", + "index": 2, + "text": "PVCLost" }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" + "2": { + "color": "red", + "index": 3, + "text": "Failed" }, - "showPoints": "always", - "spanNulls": true, - "stacking": { - "group": "A", - "mode": "none" + "3": { + "color": "red", + "index": 4, + "text": "Pending" }, - "thresholdsStyle": { - "mode": "off" + "4": { + "color": "red", + "index": 5, + "text": "WaitForUserUpload" + }, + "5": { + "color": "red", + "index": 6, + "text": "Provisioning" + }, + "6": { + "color": "red", + "index": 7, + "text": "Ready" + }, + "7": { + "color": "red", + "index": 8, + "text": "-" + }, + "-1": { + "color": "red", + "index": 0, + "text": "-" } }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 46, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.5.13", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${ds_prometheus}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\"}==1) by (phase)", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ phase }}", - "range": true, - "refId": "F" + "type": "value" } ], - "title": "Count VirtualDISK Phases", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "left", - "axisSoftMax": 7, - "axisSoftMin": -1, - "axisWidth": 125, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "area" - } + { + "color": "light-red", + "value": -1 }, - "mappings": [ - { - "options": { - "0": { - "color": "red", - "index": 1, - "text": "Unknown" - }, - "1": { - "color": "red", - "index": 2, - "text": "PVCLost" - }, - "2": { - "color": "red", - "index": 3, - "text": "Failed" - }, - "3": { - "color": "red", - "index": 4, - "text": "Pending" - }, - "4": { - "color": "red", - "index": 5, - "text": "WaitForUserUpload" - }, - "5": { - "color": "red", - "index": 6, - "text": "Provisioning" - }, - "6": { - "color": "red", - "index": 7, - "text": "Ready" - }, - "7": { - "color": "red", - "index": 8, - "text": "-" - }, - "-1": { - "color": "red", - "index": 0, - "text": "-" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "light-red", - "value": -1 - }, - { - "color": "light-yellow", - "value": 2.5 - }, - { - "color": "light-green", - "value": 4.5 - } - ] + { + "color": "light-yellow", + "value": 2.5 + }, + { + "color": "light-green", + "value": 4.5 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "test-complete" + ], + "prefix": "All except:", + "readOnly": true } }, - "overrides": [ + "properties": [ { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "test-complete" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } } ] - }, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 21 - }, - "id": 50, - "options": { - "legend": { - "calcs": [], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.5.13", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Unknown\"} > 0) - 1 + 0", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "F" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"PVCLost\"} > 0) - 1 + 1", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Failed\"} > 0) - 1 + 2", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Pending\"} > 0) - 1 + 3", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"WaitForUserUpload\"} > 0) - 1 + 4", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "G" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Provisioning\"} > 0) - 1 + 5", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P0D6E4079E36703EB" - }, - "editorMode": "code", - "exemplar": false, - "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Ready\"} > 0) -1 + 6", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "{{ name }}", - "range": true, - "refId": "E" - } - ], - "title": "VirtualDISK Phases TimeLine", - "type": "timeseries" + } + ] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 50, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.5.13", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Unknown\"} > 0) - 1 + 0", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "F" }, { "datasource": { "type": "prometheus", - "uid": "${ds_prometheus}" + "uid": "P0D6E4079E36703EB" }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "left", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.width", - "value": 377 - } - ] - } - ] + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"PVCLost\"} > 0) - 1 + 1", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" }, - "gridPos": { - "h": 11, - "w": 6, - "x": 12, - "y": 21 + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Failed\"} > 0) - 1 + 2", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" }, - "id": 47, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Pending\"} > 0) - 1 + 3", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"WaitForUserUpload\"} > 0) - 1 + 4", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Provisioning\"} > 0) - 1 + 5", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P0D6E4079E36703EB" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\",phase=\"Ready\"} > 0) -1 + 6", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "{{ name }}", + "range": true, + "refId": "E" + } + ], + "title": "VirtualDISK Phases TimeLine", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${ds_prometheus}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" }, - "showHeader": true, - "sortBy": [ + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ { - "desc": false, - "displayName": "Name" + "color": "green", + "value": null } ] - }, - "pluginVersion": "10.4.5", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${ds_prometheus}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\",namespace=~\"$namespace\"}==1", - "format": "table", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - } - ], - "title": "VirtualDISK ALL Phases", - "transformations": [ - { - "id": "merge", - "options": {} - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "exported_namespace", - "name", - "phase" - ] - } - } + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": {}, - "renameByName": { - "exported_namespace": "Namespace", - "name": "Name", - "phase": "Phase" - } + "properties": [ + { + "id": "custom.width", + "value": 377 } - } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 12, + "y": 53 + }, + "id": 47, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" ], - "type": "table" + "show": false }, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Name" + } + ] + }, + "pluginVersion": "10.4.5", + "targets": [ { "datasource": { "type": "prometheus", "uid": "${ds_prometheus}" }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "left", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } + "editorMode": "code", + "exemplar": false, + "expr": "d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\",namespace=~\"$namespace\"}==1", + "format": "table", + "hide": false, + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "VirtualDISK ALL Phases", + "transformations": [ + { + "id": "merge", + "options": {} + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "exported_namespace", + "name", + "phase" + ] + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "exported_namespace": "Namespace", + "name": "Name", + "phase": "Phase" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${ds_prometheus}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "cellOptions": { + "type": "auto" }, - "overrides": [ + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.width", - "value": 377 - } - ] + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 } ] - }, - "gridPos": { - "h": 11, - "w": 6, - "x": 18, - "y": 21 - }, - "id": 48, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" }, - "showHeader": true, - "sortBy": [ + "properties": [ { - "desc": false, - "displayName": "Namespace" + "id": "custom.width", + "value": 377 } ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 6, + "x": 18, + "y": 53 + }, + "id": 48, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Namespace" + } + ] + }, + "pluginVersion": "10.4.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${ds_prometheus}" }, - "pluginVersion": "10.4.5", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${ds_prometheus}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\", phase!=\"Ready\"} > 0", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" + "editorMode": "code", + "exemplar": false, + "expr": "d8_virtualization_virtualdisk_status_phase{prometheus!=\"deckhouse\", namespace=~\"$namespace\", phase!=\"Ready\"} > 0", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "VirtualDISK NotReady Phases", + "transformations": [ + { + "id": "merge", + "options": {} + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "exported_namespace", + "name", + "phase" + ] } - ], - "title": "VirtualDISK NotReady Phases", - "transformations": [ - { - "id": "merge", - "options": {} - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "exported_namespace", - "name", - "phase" - ] - } - } - }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": {}, - "renameByName": { - "exported_namespace": "Namespace", - "name": "Name", - "phase": "Phase" - } - } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "exported_namespace": "Namespace", + "name": "Name", + "phase": "Phase" } - ], - "type": "table" + } } ], - "title": "VirtualDIsks", - "type": "row" + "type": "table" }, { "collapsed": true, @@ -1980,7 +1986,7 @@ spec: "h": 1, "w": 24, "x": 0, - "y": 43 + "y": 64 }, "id": 54, "panels": [ @@ -2036,8 +2042,7 @@ spec: "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2168,8 +2173,7 @@ spec: "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "light-red", @@ -2292,8 +2296,7 @@ spec: "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2423,8 +2426,7 @@ spec: "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] } @@ -2524,14 +2526,14 @@ spec: "type": "row" } ], - "refresh": "", + "refresh": "1m", "schemaVersion": 39, "tags": [], "templating": { "list": [ { "current": { - "selected": true, + "selected": false, "text": "main", "value": "P0D6E4079E36703EB" }, @@ -2552,10 +2554,10 @@ spec: "current": { "selected": true, "text": [ - "All" + "perf" ], "value": [ - "$__all" + "perf" ] }, "datasource": { @@ -2581,7 +2583,7 @@ spec: }, { "current": { - "selected": true, + "selected": false, "text": [ "All" ], diff --git a/tests/performance/templates/_cloud-config.tpl b/tests/performance/templates/_cloud-config.tpl index c467c84e41..d8311ce8f7 100644 --- a/tests/performance/templates/_cloud-config.tpl +++ b/tests/performance/templates/_cloud-config.tpl @@ -17,6 +17,7 @@ users: {{- range .Values.sshAuthorizeKeys }} - {{.}} {{- end }} +{{- if eq .Values.resources.virtualImage.spec.template.image.name "ubuntu" }} apt: sources_list: | deb http://mirror.yandex.ru/ubuntu jammy main restricted @@ -32,10 +33,11 @@ apt: package_update: true package_upgrade: true packages: - - prometheus-node-exporter - - qemu-guest-agent - - stress-ng + # - prometheus-node-exporter + # - qemu-guest-agent + # - stress-ng - nginx +{{- end }} write_files: - path: /usr/local/bin/generate.sh permissions: "0755" diff --git a/tests/performance/templates/cloud-init-secret.yaml b/tests/performance/templates/cloud-init-secret.yaml index a5edd71896..dbd2ef0ae8 100644 --- a/tests/performance/templates/cloud-init-secret.yaml +++ b/tests/performance/templates/cloud-init-secret.yaml @@ -1,8 +1,8 @@ -{{- if or (eq .Values.resources "vms") (eq .Values.resources "all") }} +{{- if or (eq .Values.resources.default "vms") (eq .Values.resources.default "all") }} apiVersion: v1 kind: Secret metadata: - name: {{ $.Values.resourcesPrefix }}-cloud-init + name: {{ $.Values.resources.prefix }}-cloud-init namespace: {{ .Release.Namespace }} type: "provisioning.virtualization.deckhouse.io/cloud-init" data: diff --git a/tests/performance/templates/nginx-service.yaml b/tests/performance/templates/nginx-service.yaml index 260dba2a30..253daf50d3 100644 --- a/tests/performance/templates/nginx-service.yaml +++ b/tests/performance/templates/nginx-service.yaml @@ -1,4 +1,5 @@ -{{- if or (eq .Values.resources "vms") (eq .Values.resources "all") }} +{{- if eq .Values.nginx true }} +{{- if or (eq .Values.resources.default "vms") (eq .Values.resources.default "all") }} apiVersion: v1 kind: Service metadata: @@ -28,4 +29,5 @@ spec: name: {{ $.Values.resourcesPrefix }}-svc-nginx port: number: 80 +{{- end }} {{- end }} \ No newline at end of file diff --git a/tests/performance/templates/node-exporter-service.yaml b/tests/performance/templates/node-exporter-service.yaml.back similarity index 85% rename from tests/performance/templates/node-exporter-service.yaml rename to tests/performance/templates/node-exporter-service.yaml.back index 24aeb40c03..4dee23276d 100644 --- a/tests/performance/templates/node-exporter-service.yaml +++ b/tests/performance/templates/node-exporter-service.yaml.back @@ -1,4 +1,5 @@ -{{- if or (eq .Values.resources "vms") (eq .Values.resources "all") }} +{{- if eq .Values.nginx true }} +{{- if or (eq .Values.resources.default "vms") (eq .Values.resources.default "all") }} apiVersion: v1 kind: Service metadata: @@ -34,4 +35,5 @@ spec: selector: matchLabels: app: {{ $.Values.resourcesPrefix }}-svc-node-exporter -{{- end }} \ No newline at end of file +{{- end }} +{{- end }} diff --git a/tests/performance/templates/vds.yaml b/tests/performance/templates/vds.yaml new file mode 100644 index 0000000000..911192aef6 --- /dev/null +++ b/tests/performance/templates/vds.yaml @@ -0,0 +1,49 @@ +{{- if or (eq .Values.resources.default "vds") (eq .Values.resources.default "all") }} +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: {{ $.Values.resources.prefix }} + namespace: {{ $.Release.Namespace }} + labels: + vms: {{ $.Values.resources.prefix }} +spec: +{{- if eq .Values.resources.virtualImage.spec.template.type "persistentVolumeClaim" }} + storage: PersistentVolumeClaim + persistentVolumeClaim: + storageClassName: {{ $.Values.resources.storageClassName }} + dataSource: + type: "HTTP" + http: + url: {{ $.Values.resources.virtualImage.spec.template.image.url }} +{{- else }} + storage: ContainerRegistry + dataSource: + type: "HTTP" + http: + url: {{ $.Values.resources.virtualImage.spec.template.image.url }} +{{- end }} +{{- if ne .Values.resources.virtualDisk.spec.template.type "virtualImage" }} +{{- $count := (.Values.count | int) }} +{{- range until $count }} +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualDisk +metadata: + name: {{ $.Values.resources.prefix }}-{{ . }} + namespace: {{ $.Release.Namespace }} + labels: + vms: {{ $.Values.resources.prefix }} +spec: + persistentVolumeClaim: + size: {{ $.Values.resources.virtualDisk.spec.template.size }} + {{- if $.Values.resources.storageClass }} + storageClassName: {{ $.Values.resources.storageClass }} + {{- end }} + dataSource: + type: "ObjectRef" + objectRef: + kind: "VirtualImage" + name: {{ $.Values.resources.prefix }} +{{- end }} +{{- end }} +{{- end }} diff --git a/tests/performance/templates/vmds.yaml b/tests/performance/templates/vmds.yaml deleted file mode 100644 index 8d466050f9..0000000000 --- a/tests/performance/templates/vmds.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if or (eq .Values.resources "disks") (eq .Values.resources "all") }} -apiVersion: virtualization.deckhouse.io/v1alpha2 -kind: VirtualImage -metadata: - name: {{ $.Values.resourcesPrefix }} - namespace: {{ $.Release.Namespace }} - labels: - vm: {{ $.Values.resourcesPrefix }} -spec: - storage: ContainerRegistry - dataSource: - type: "HTTP" - http: - url: {{ $.Values.imageURL }} -{{- $count := (.Values.count | int) }} -{{- range until $count }} ---- -apiVersion: virtualization.deckhouse.io/v1alpha2 -kind: VirtualDisk -metadata: - name: {{ $.Values.resourcesPrefix }}-{{ . }} - namespace: {{ $.Release.Namespace }} - labels: - vm: {{ $.Values.resourcesPrefix }} -spec: - persistentVolumeClaim: - size: {{ $.Values.diskSize }} - {{- if $.Values.storageClass }} - storageClassName: {{ $.Values.storageClass }} - {{- end }} - dataSource: - type: "ObjectRef" - objectRef: - kind: "VirtualImage" - name: {{ $.Values.resourcesPrefix }} -{{- end }} -{{- end }} diff --git a/tests/performance/templates/vms.yaml b/tests/performance/templates/vms.yaml index 4637386ea2..4118bf7d06 100644 --- a/tests/performance/templates/vms.yaml +++ b/tests/performance/templates/vms.yaml @@ -1,43 +1,52 @@ -{{- if or (eq .Values.resources "vms") (eq .Values.resources "all") }} +{{- if or (eq .Values.resources.default "vms") (eq .Values.resources.default "all") }} {{- $count := (.Values.count | int) }} {{- range until $count }} --- apiVersion: virtualization.deckhouse.io/v1alpha2 kind: VirtualMachine metadata: - name: {{ $.Values.resourcesPrefix }}-{{ . }} + name: {{ $.Values.resources.prefix }}-{{ . }} namespace: {{ $.Release.Namespace }} labels: - vm: {{ $.Values.resourcesPrefix }} + vms: {{ $.Values.resources.prefix }} spec: - runPolicy: AlwaysOn + runPolicy: {{ $.Values.resources.virtualMachine.spec.template.runPolicy }} enableParavirtualization: true disruptions: # To ensure an equal amount of virtual machines with Manual and Automatic modes during testing, # we create every second machine with Automatic mode. - {{- if eq (mod . 2) 0 }} + {{- if eq $.Values.resources.virtualMachine.spec.template.restartApprovalMode "Dynamic" }} + {{- if eq (mod . 2) 0 }} restartApprovalMode: Automatic - {{- else }} + {{- else }} restartApprovalMode: Manual + {{- end }} + {{- else }} + restartApprovalMode: Automatic {{- end }} osType: Generic bootloader: BIOS - {{- with $.Values.spec.cpu }} + {{- with $.Values.resources.virtualMachine.spec.template.cpu }} cpu: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.spec.memory }} + {{- with $.Values.resources.virtualMachine.spec.template.memory }} memory: {{- toYaml . | nindent 4 }} {{- end }} - virtualMachineClassName: host + virtualMachineClassName: {{ $.Values.resources.virtualMachine.spec.template.virtualMachineClassName | default "host" }} blockDeviceRefs: + {{- if eq $.Values.resources.virtualDisk.spec.template.type "virtualImage" }} + - kind: VirtualImage + name: {{ $.Values.resources.prefix }} + {{- else }} - kind: VirtualDisk - name: {{ $.Values.resourcesPrefix }}-{{ . }} + name: {{ $.Values.resources.prefix }}-{{ . }} + {{- end }} provisioning: type: UserDataRef userDataRef: kind: Secret - name: {{ $.Values.resourcesPrefix }}-cloud-init + name: {{ $.Values.resources.prefix }}-cloud-init {{- end }} {{- end }} \ No newline at end of file diff --git a/tests/performance/tools/evicter/Taskfile.yaml b/tests/performance/tools/evicter/Taskfile.yaml new file mode 100644 index 0000000000..6ae4b41228 --- /dev/null +++ b/tests/performance/tools/evicter/Taskfile.yaml @@ -0,0 +1,14 @@ +version: "3" + +silent: true + +vars: + NS: '{{ .NS | default "perf" }}' + TARGET: "{{ .TARGET | default 10 }}" + DURATION: '{{ .DURATION | default "0m" }}' + +tasks: + run:migration: + desc: "Run migration | NS=myns TARGET=10 task run:migration" + cmds: + - go run cmd/main.go -n {{.NS}} -t {{.TARGET}} -d {{ .DURATION }} diff --git a/tests/performance/tools/evicter/cmd/main.go b/tests/performance/tools/evicter/cmd/main.go new file mode 100644 index 0000000000..1106dc4007 --- /dev/null +++ b/tests/performance/tools/evicter/cmd/main.go @@ -0,0 +1,36 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "log/slog" + "os" + + "evicter/pkg/command" +) + +func main() { + // opts := &slog.HandlerOptions{ + // AddSource: true, // This enables source location logging + // } + // logger := slog.New(slog.NewJSONHandler(os.Stdout, opts)) + // logger := slog.New(slog.NewTextHandler(os.Stdout, opts)) + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + slog.SetDefault(logger) + + command.Execute() +} diff --git a/tests/performance/tools/evicter/go.mod b/tests/performance/tools/evicter/go.mod new file mode 100644 index 0000000000..dff7ad9eb5 --- /dev/null +++ b/tests/performance/tools/evicter/go.mod @@ -0,0 +1,63 @@ +module evicter + +go 1.24.6 + +toolchain go1.24.7 + +require ( + github.com/deckhouse/virtualization/api v1.0.0 + github.com/spf13/cobra v1.9.1 + k8s.io/apimachinery v0.33.3 + k8s.io/client-go v0.33.3 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 // indirect + github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/spf13/pflag v1.0.7 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.3 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.33.3 // indirect + k8s.io/apiextensions-apiserver v0.33.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + kubevirt.io/api v1.3.1 // indirect + kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 // indirect + kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/tests/performance/tools/evicter/go.sum b/tests/performance/tools/evicter/go.sum new file mode 100644 index 0000000000..c93d2ea467 --- /dev/null +++ b/tests/performance/tools/evicter/go.sum @@ -0,0 +1,394 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/virtualization/api v1.0.0 h1:q4TvC74tpjk25k0byXJCYP4HjvRexBSeI0cC8QeCMTQ= +github.com/deckhouse/virtualization/api v1.0.0/go.mod h1:meTeGulR+xwnvt0pTGsoI14YhGe0lHUVyAfhZsoQyeQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 h1:t/CahSnpqY46sQR01SoS+Jt0jtjgmhgE6lFmRnO4q70= +github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183/go.mod h1:4VWG+W22wrB4HfBL88P40DxLEpSOaiBVxUnfalfJo9k= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 h1:gAXU86Fmbr/ktY17lkHwSjw5aoThQvhnstGGIYKlKYc= +k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= +kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= +kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 h1:IWo12+ei3jltSN5jQN1xjgakfvRSF3G3Rr4GXVOOy2I= +kubevirt.io/containerized-data-importer-api v1.57.0-alpha1/go.mod h1:Y/8ETgHS1GjO89bl682DPtQOYEU/1ctPFBz6Sjxm4DM= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/tests/performance/tools/evicter/helpers/helpers.go b/tests/performance/tools/evicter/helpers/helpers.go new file mode 100644 index 0000000000..58d2a19ead --- /dev/null +++ b/tests/performance/tools/evicter/helpers/helpers.go @@ -0,0 +1,79 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "fmt" + "os" + "strings" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "k8s.io/client-go/tools/clientcmd" +) + +func CreateKubeConfig() kubeclient.Client { + // Get the KUBECONFIG environment variable + kubeconfigEnv := os.Getenv("KUBECONFIG") + if kubeconfigEnv == "" { + + fmt.Println("Try to use default path $HOME/.kube/config") + userHomeDir, err := os.UserHomeDir() + if err != nil { + fmt.Println("Failed to get user home directory:", err) + os.Exit(1) + } + kubeconfigEnv = userHomeDir + "/.kube/config" + + _ = os.Setenv("KUBECONFIG", kubeconfigEnv) + kubeconfigEnv = os.Getenv("KUBECONFIG") + + if kubeconfigEnv == "" { + fmt.Println("KUBECONFIG environment variable is not set. Exiting.") + os.Exit(1) + } + } + + // Split the KUBECONFIG environment variable (handles merged kubeconfig paths) + kubeconfigPaths := strings.Split(kubeconfigEnv, string(os.PathListSeparator)) + if len(kubeconfigPaths) == 0 { + fmt.Println("No valid kubeconfig paths found in KUBECONFIG. Exiting.") + os.Exit(1) + } + + fmt.Printf("Using KUBECONFIG paths: %v\n", kubeconfigPaths) + + // Load the kubeconfig from the merged paths + loadingRules := &clientcmd.ClientConfigLoadingRules{ + Precedence: kubeconfigPaths, + } + clientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loadingRules, + &clientcmd.ConfigOverrides{}, + ).ClientConfig() + if err != nil { + fmt.Printf("Failed to load kubeconfig: %v\n", err) + os.Exit(1) + } + + // Create a Kubernetes client + client, err := kubeclient.GetClientFromRESTConfig(clientConfig) + if err != nil { + fmt.Printf("Failed to create Kubernetes client: %v\n", err) + os.Exit(1) + } + return client +} diff --git a/tests/performance/tools/evicter/internal/migration.go b/tests/performance/tools/evicter/internal/migration.go new file mode 100644 index 0000000000..ea74d1c149 --- /dev/null +++ b/tests/performance/tools/evicter/internal/migration.go @@ -0,0 +1,325 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "log/slog" + "math/rand" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MigrationState tracks the state of a VM migration +type MigrationState struct { + VMName string + StartTime time.Time + VMOPName string + IsComplete bool +} + +// ContinuousMigrator manages continuous VM migrations +type ContinuousMigrator struct { + client kubeclient.Client + namespace string + targetPercentage int + runDuration time.Duration + migratingVMs map[string]*MigrationState + mutex sync.RWMutex + stopChan chan struct{} + doneChan chan struct{} +} + +// NewContinuousMigrator creates a new continuous migrator +func NewContinuousMigrator(client kubeclient.Client, namespace string, targetPercentage int, runDuration time.Duration) *ContinuousMigrator { + return &ContinuousMigrator{ + client: client, + namespace: namespace, + targetPercentage: targetPercentage, + runDuration: runDuration, + migratingVMs: make(map[string]*MigrationState), + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + } +} + +// StartContinuousMigrator starts the continuous migration process +func StartContinuousMigrator(client kubeclient.Client, namespace string, targetPercentage int, runDuration time.Duration) { + migrator := NewContinuousMigrator(client, namespace, targetPercentage, runDuration) + + // Setup signal handling for graceful shutdown + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + // Start the migrator in a goroutine + go migrator.run() + + // Setup timeout if specified + if runDuration > 0 { + go func() { + time.Sleep(runDuration) + slog.Info("Migration timeout reached, stopping migrator") + close(migrator.stopChan) + }() + } + + // Wait for signal or completion + select { + case <-sigChan: + slog.Info("Received interrupt signal, stopping migrator gracefully...") + close(migrator.stopChan) + case <-migrator.doneChan: + slog.Info("Migration process completed") + } + + // Wait for graceful shutdown + <-migrator.doneChan + slog.Info("Migrator stopped") +} + +// run is the main migration loop +func (m *ContinuousMigrator) run() { + defer close(m.doneChan) + + slog.Info("Starting continuous migrator", + "namespace", m.namespace, + "targetPercentage", m.targetPercentage, + "duration", m.runDuration) + + ticker := time.NewTicker(10 * time.Second) // Check every 10 seconds + defer ticker.Stop() + + for { + select { + case <-m.stopChan: + slog.Info("Stop signal received, shutting down...") + return + case <-ticker.C: + m.checkAndStartMigrations() + m.monitorMigrations() + } + } +} + +// checkAndStartMigrations checks if we need to start new migrations +func (m *ContinuousMigrator) checkAndStartMigrations() { + // Get all VMs in the namespace + vmList, err := m.client.VirtualMachines(m.namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + slog.Error("Failed to list VMs", "error", err) + return + } + + // Filter VMs that are running and not currently migrating + var availableVMs []v1alpha2.VirtualMachine + for _, vm := range vmList.Items { + // Only consider VMs that are in Running state and not already migrating + if vm.Status.Phase == v1alpha2.MachineRunning { + m.mutex.RLock() + _, isMigrating := m.migratingVMs[vm.Name] + m.mutex.RUnlock() + + if !isMigrating { + availableVMs = append(availableVMs, vm) + } + } + } + + // Calculate how many VMs should be migrating + targetCount := (m.targetPercentage * len(vmList.Items)) / 100 + currentMigrating := len(m.migratingVMs) + + // Start new migrations if needed + if currentMigrating < targetCount && len(availableVMs) > 0 { + needed := targetCount - currentMigrating + if needed > len(availableVMs) { + needed = len(availableVMs) + } + + // Shuffle and select VMs to migrate + rand.Shuffle(len(availableVMs), func(i, j int) { + availableVMs[i], availableVMs[j] = availableVMs[j], availableVMs[i] + }) + + for i := 0; i < needed; i++ { + m.startMigration(availableVMs[i]) + } + } + + slog.Info("Migration status", + "totalVMs", len(vmList.Items), + "targetCount", targetCount, + "currentMigrating", currentMigrating, + "availableVMs", len(availableVMs)) +} + +// startMigration starts a migration for a VM +func (m *ContinuousMigrator) startMigration(vm v1alpha2.VirtualMachine) { + // Double-check that VM is still available for migration + ctx := context.TODO() + currentVM, err := m.client.VirtualMachines(m.namespace).Get(ctx, vm.Name, metav1.GetOptions{}) + if err != nil { + slog.Error("Failed to get current VM status", "vm", vm.Name, "error", err) + return + } + + // Check if VM is still in Running state and not migrating + if currentVM.Status.Phase != v1alpha2.MachineRunning { + slog.Info("VM is no longer in Running state, skipping migration", + "vm", vm.Name, + "currentPhase", currentVM.Status.Phase) + return + } + + // Check if VM is already being tracked as migrating + m.mutex.RLock() + _, isMigrating := m.migratingVMs[vm.Name] + m.mutex.RUnlock() + + if isMigrating { + slog.Info("VM is already being migrated, skipping", "vm", vm.Name) + return + } + + // Check if VM already has an active VMOP in the cluster + vmopList, err := m.client.VirtualMachineOperations(m.namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + slog.Error("Failed to check existing VMOPs", "vm", vm.Name, "error", err) + return + } + + // Check if there are any active VMOPs for this VM + for _, vmop := range vmopList.Items { + if vmop.Spec.VirtualMachine == vm.Name { + if vmop.Status.Phase == v1alpha2.VMOPPhaseInProgress || + vmop.Status.Phase == v1alpha2.VMOPPhasePending { + slog.Info("VM already has active VMOP, skipping migration", + "vm", vm.Name, + "vmop", vmop.Name, + "phase", vmop.Status.Phase) + return + } + } + } + + vmop := &v1alpha2.VirtualMachineOperation{ + TypeMeta: metav1.TypeMeta{ + Kind: v1alpha2.VirtualMachineOperationKind, + APIVersion: v1alpha2.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: vm.Name + "-migrate-", + Namespace: m.namespace, + }, + Spec: v1alpha2.VirtualMachineOperationSpec{ + Type: v1alpha2.VMOPTypeMigrate, + VirtualMachine: vm.Name, + }, + } + + createdVMOP, err := m.client.VirtualMachineOperations(m.namespace).Create(ctx, vmop, metav1.CreateOptions{}) + if err != nil { + slog.Error("Failed to create VMOP", "vm", vm.Name, "error", err) + return + } + + // Track the migration + m.mutex.Lock() + m.migratingVMs[vm.Name] = &MigrationState{ + VMName: vm.Name, + StartTime: time.Now(), + VMOPName: createdVMOP.Name, + } + m.mutex.Unlock() + + slog.Info("Started migration", "vm", vm.Name, "vmop", createdVMOP.Name) +} + +// monitorMigrations monitors ongoing migrations +func (m *ContinuousMigrator) monitorMigrations() { + m.mutex.Lock() + defer m.mutex.Unlock() + + ctx := context.TODO() + + for vmName, state := range m.migratingVMs { + if state.IsComplete { + continue + } + + // Get current VM status + vm, err := m.client.VirtualMachines(m.namespace).Get(ctx, vmName, metav1.GetOptions{}) + if err != nil { + slog.Error("Failed to get VM status", "vm", vmName, "error", err) + continue + } + + // Check if VM is in error state + if vm.Status.Phase == v1alpha2.MachineDegraded { + slog.Warn("VM is in degraded state, removing from migration tracking", + "vm", vmName, + "vmop", state.VMOPName) + delete(m.migratingVMs, vmName) + continue + } + + // Check for migration timeout (5 minutes) + if time.Since(state.StartTime) > 5*time.Minute { + slog.Warn("Migration timeout reached, removing from tracking", + "vm", vmName, + "duration", time.Since(state.StartTime), + "vmop", state.VMOPName) + delete(m.migratingVMs, vmName) + continue + } + + // Check if migration is complete + if m.isMigrationComplete(vm) { + state.IsComplete = true + duration := time.Since(state.StartTime) + slog.Info("Migration completed", + "vm", vmName, + "duration", duration, + "vmop", state.VMOPName) + + // Clean up completed migration + delete(m.migratingVMs, vmName) + } + } +} + +// isMigrationComplete checks if a VM migration is complete +func (m *ContinuousMigrator) isMigrationComplete(vm *v1alpha2.VirtualMachine) bool { + if vm.Status.Stats == nil || len(vm.Status.Stats.PhasesTransitions) < 2 { + return false + } + + transitions := vm.Status.Stats.PhasesTransitions + last := transitions[len(transitions)-1] + beforeLast := transitions[len(transitions)-2] + + // Migration is complete if we see Migrating -> Running transition + return last.Phase == v1alpha2.MachineRunning && beforeLast.Phase == v1alpha2.MachineMigrating +} + diff --git a/tests/performance/tools/evicter/pkg/command/command.go b/tests/performance/tools/evicter/pkg/command/command.go new file mode 100644 index 0000000000..97ce332b53 --- /dev/null +++ b/tests/performance/tools/evicter/pkg/command/command.go @@ -0,0 +1,80 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "os" + "time" + + "evicter/helpers" + "evicter/internal" + + "github.com/spf13/cobra" +) + +var ( + namespace string + target int + duration string +) + +var rootCmd = &cobra.Command{ + Use: "migrator", + Short: "continuously migrate a percentage of virtual machines", + Long: `A tool that continuously migrates a specified percentage of virtual machines in a namespace`, + Args: cobra.ArbitraryArgs, + Run: startMigrator, +} + +func init() { + rootCmd.Flags().StringVarP(&namespace, "namespace", "n", "perf", "namespace to look for the VMs, default 'perf'") + rootCmd.Flags().IntVarP(&target, "target", "t", 10, "target percentage for VM migration (1-100)") + rootCmd.Flags().StringVarP(&duration, "duration", "d", "0", "duration to run the migrator (e.g., '30m', '1h', '0' for infinite). Default '0' for infinite") +} + +func startMigrator(cmd *cobra.Command, args []string) { + // Validate target percentage + if target < 1 || target > 100 { + fmt.Println("Error: target percentage must be between 1 and 100") + os.Exit(1) + } + + // Parse duration + var runDuration time.Duration + var err error + if duration != "0" { + runDuration, err = time.ParseDuration(duration) + if err != nil { + fmt.Printf("Error parsing duration '%s': %v\n", duration, err) + os.Exit(1) + } + } + + // create client + client := helpers.CreateKubeConfig() + + // Start the continuous migrator + internal.StartContinuousMigrator(client, namespace, target, runDuration) +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/tests/performance/shatal/.golangci.yaml b/tests/performance/tools/shatal/.golangci.yaml similarity index 80% rename from tests/performance/shatal/.golangci.yaml rename to tests/performance/tools/shatal/.golangci.yaml index 0867b18310..1be21e2a37 100644 --- a/tests/performance/shatal/.golangci.yaml +++ b/tests/performance/tools/shatal/.golangci.yaml @@ -39,6 +39,34 @@ linters-settings: # Enable to require nolint directives to mention the specific linter being suppressed. # Default: false require-specific: true + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + # Do not allow non-required aliases. + # Default: false + no-extra-aliases: false + # List of aliases + # Default: [] + alias: + - pkg: github.com/deckhouse/virtualization/api/core/v1alpha2 + alias: "" + - pkg: github.com/deckhouse/virtualization/api/subresources/v1alpha2 + alias: subv1alpha2 + - pkg: kubevirt.io/api/core/v1 + alias: virtv1 + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/api/authentication/v1 + alias: authnv1 + - pkg: k8s.io/api/storage/v1 + alias: storagev1 + - pkg: k8s.io/api/networking/v1 + alias: netv1 + - pkg: k8s.io/api/policy/v1 + alias: policyv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 linters: disable-all: true @@ -77,3 +105,4 @@ linters: - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - whitespace # detects leading and trailing whitespace - wastedassign # Finds wasted assignment statements. + - importas # checks import aliases against the configured convention diff --git a/tests/performance/shatal/README.md b/tests/performance/tools/shatal/README.md similarity index 100% rename from tests/performance/shatal/README.md rename to tests/performance/tools/shatal/README.md diff --git a/tests/performance/shatal/Taskfile.yaml b/tests/performance/tools/shatal/Taskfile.yaml similarity index 100% rename from tests/performance/shatal/Taskfile.yaml rename to tests/performance/tools/shatal/Taskfile.yaml diff --git a/tests/performance/shatal/cmd/shatal/main.go b/tests/performance/tools/shatal/cmd/shatal/main.go similarity index 100% rename from tests/performance/shatal/cmd/shatal/main.go rename to tests/performance/tools/shatal/cmd/shatal/main.go diff --git a/tests/performance/shatal/config.yaml b/tests/performance/tools/shatal/config.yaml similarity index 100% rename from tests/performance/shatal/config.yaml rename to tests/performance/tools/shatal/config.yaml diff --git a/tests/performance/shatal/go.mod b/tests/performance/tools/shatal/go.mod similarity index 96% rename from tests/performance/shatal/go.mod rename to tests/performance/tools/shatal/go.mod index e15f9f9f65..0767cc3034 100644 --- a/tests/performance/shatal/go.mod +++ b/tests/performance/tools/shatal/go.mod @@ -3,7 +3,7 @@ module github.com/deckhouse/virtualization/shatal go 1.24.6 require ( - github.com/deckhouse/virtualization/api v0.0.0-20240408082728-b892ddd03f9e + github.com/deckhouse/virtualization/api v1.0.0 github.com/google/uuid v1.6.0 github.com/ilyakaznacheev/cleanenv v1.5.0 k8s.io/api v0.33.3 @@ -90,6 +90,4 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/deckhouse/virtualization/api => ./../../../api - replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 diff --git a/tests/performance/shatal/go.sum b/tests/performance/tools/shatal/go.sum similarity index 99% rename from tests/performance/shatal/go.sum rename to tests/performance/tools/shatal/go.sum index 5b40af66db..a3681a62a5 100644 --- a/tests/performance/shatal/go.sum +++ b/tests/performance/tools/shatal/go.sum @@ -34,6 +34,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/virtualization/api v1.0.0 h1:q4TvC74tpjk25k0byXJCYP4HjvRexBSeI0cC8QeCMTQ= +github.com/deckhouse/virtualization/api v1.0.0/go.mod h1:meTeGulR+xwnvt0pTGsoI14YhGe0lHUVyAfhZsoQyeQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= diff --git a/tests/performance/shatal/internal/api/client.go b/tests/performance/tools/shatal/internal/api/client.go similarity index 100% rename from tests/performance/shatal/internal/api/client.go rename to tests/performance/tools/shatal/internal/api/client.go diff --git a/tests/performance/shatal/internal/config/config.go b/tests/performance/tools/shatal/internal/config/config.go similarity index 100% rename from tests/performance/shatal/internal/config/config.go rename to tests/performance/tools/shatal/internal/config/config.go diff --git a/tests/performance/shatal/internal/logger/logger.go b/tests/performance/tools/shatal/internal/logger/logger.go similarity index 100% rename from tests/performance/shatal/internal/logger/logger.go rename to tests/performance/tools/shatal/internal/logger/logger.go diff --git a/tests/performance/shatal/internal/logger/options.go b/tests/performance/tools/shatal/internal/logger/options.go similarity index 100% rename from tests/performance/shatal/internal/logger/options.go rename to tests/performance/tools/shatal/internal/logger/options.go diff --git a/tests/performance/shatal/internal/shatal/creator.go b/tests/performance/tools/shatal/internal/shatal/creator.go similarity index 100% rename from tests/performance/shatal/internal/shatal/creator.go rename to tests/performance/tools/shatal/internal/shatal/creator.go diff --git a/tests/performance/shatal/internal/shatal/deleter.go b/tests/performance/tools/shatal/internal/shatal/deleter.go similarity index 100% rename from tests/performance/shatal/internal/shatal/deleter.go rename to tests/performance/tools/shatal/internal/shatal/deleter.go diff --git a/tests/performance/shatal/internal/shatal/drainer.go b/tests/performance/tools/shatal/internal/shatal/drainer.go similarity index 100% rename from tests/performance/shatal/internal/shatal/drainer.go rename to tests/performance/tools/shatal/internal/shatal/drainer.go diff --git a/tests/performance/shatal/internal/shatal/modifier.go b/tests/performance/tools/shatal/internal/shatal/modifier.go similarity index 100% rename from tests/performance/shatal/internal/shatal/modifier.go rename to tests/performance/tools/shatal/internal/shatal/modifier.go diff --git a/tests/performance/shatal/internal/shatal/nothing.go b/tests/performance/tools/shatal/internal/shatal/nothing.go similarity index 100% rename from tests/performance/shatal/internal/shatal/nothing.go rename to tests/performance/tools/shatal/internal/shatal/nothing.go diff --git a/tests/performance/shatal/internal/shatal/shatal.go b/tests/performance/tools/shatal/internal/shatal/shatal.go similarity index 100% rename from tests/performance/shatal/internal/shatal/shatal.go rename to tests/performance/tools/shatal/internal/shatal/shatal.go diff --git a/tests/performance/shatal/internal/shatal/watcher.go b/tests/performance/tools/shatal/internal/shatal/watcher.go similarity index 100% rename from tests/performance/shatal/internal/shatal/watcher.go rename to tests/performance/tools/shatal/internal/shatal/watcher.go diff --git a/tests/performance/tools/statistic/Taskfile.yaml b/tests/performance/tools/statistic/Taskfile.yaml new file mode 100644 index 0000000000..d93c113789 --- /dev/null +++ b/tests/performance/tools/statistic/Taskfile.yaml @@ -0,0 +1,20 @@ +version: "3" + +silent: true + +vars: + NS: '{{ .NS | default "perf" }}' + +tasks: + get-stat:vd: + desc: "Run collect stat from vds" + cmds: + - go run cmd/statistic/main.go -d -n {{.NS}} + get-stat:vm: + desc: "Run collect stat from vms" + cmds: + - go run cmd/statistic/main.go -v -n {{.NS}} + get-stat:all: + desc: "Run collect stat from vds and vms" + cmds: + - go run cmd/statistic/main.go -n {{.NS}} diff --git a/tests/e2e/config/extra_env.go b/tests/performance/tools/statistic/cmd/statistic/main.go similarity index 58% rename from tests/e2e/config/extra_env.go rename to tests/performance/tools/statistic/cmd/statistic/main.go index 8cbb32b862..34129210dd 100644 --- a/tests/e2e/config/extra_env.go +++ b/tests/performance/tools/statistic/cmd/statistic/main.go @@ -14,16 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package main -const ( - // E2EVolumeMigrationNextStorageClassEnv is the env variable for the next storage class for volume migration tests. - E2EVolumeMigrationNextStorageClassEnv = "E2E_VOLUME_MIGRATION_NEXT_STORAGE_CLASS" +import ( + "statistic/pkg/command" ) -const ( - E2EShortTimeoutEnv = "E2E_SHORT_TIMEOUT" - E2EMiddleTimeoutEnv = "E2E_MIDDLE_TIMEOUT" - E2ELongTimeoutEnv = "E2E_LONG_TIMEOUT" - E2EMaxTimeoutEnv = "E2E_MAX_TIMEOUT" -) +func main() { + command.Execute() +} diff --git a/tests/performance/tools/statistic/go.mod b/tests/performance/tools/statistic/go.mod new file mode 100644 index 0000000000..666f8caf48 --- /dev/null +++ b/tests/performance/tools/statistic/go.mod @@ -0,0 +1,65 @@ +module statistic + +go 1.24.6 + +toolchain go1.24.7 + +require ( + github.com/deckhouse/virtualization/api v1.0.0 + github.com/spf13/cobra v1.9.1 + github.com/stretchr/testify v1.10.0 + k8s.io/apimachinery v0.33.3 + k8s.io/client-go v0.33.3 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 // indirect + github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.7 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.3 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.33.3 // indirect + k8s.io/apiextensions-apiserver v0.33.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + kubevirt.io/api v1.3.1 // indirect + kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 // indirect + kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/tests/performance/tools/statistic/go.sum b/tests/performance/tools/statistic/go.sum new file mode 100644 index 0000000000..c93d2ea467 --- /dev/null +++ b/tests/performance/tools/statistic/go.sum @@ -0,0 +1,394 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/virtualization/api v1.0.0 h1:q4TvC74tpjk25k0byXJCYP4HjvRexBSeI0cC8QeCMTQ= +github.com/deckhouse/virtualization/api v1.0.0/go.mod h1:meTeGulR+xwnvt0pTGsoI14YhGe0lHUVyAfhZsoQyeQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 h1:t/CahSnpqY46sQR01SoS+Jt0jtjgmhgE6lFmRnO4q70= +github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183/go.mod h1:4VWG+W22wrB4HfBL88P40DxLEpSOaiBVxUnfalfJo9k= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 h1:gAXU86Fmbr/ktY17lkHwSjw5aoThQvhnstGGIYKlKYc= +k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= +kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= +kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 h1:IWo12+ei3jltSN5jQN1xjgakfvRSF3G3Rr4GXVOOy2I= +kubevirt.io/containerized-data-importer-api v1.57.0-alpha1/go.mod h1:Y/8ETgHS1GjO89bl682DPtQOYEU/1ctPFBz6Sjxm4DM= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/tests/performance/tools/statistic/internal/helpers/helper.go b/tests/performance/tools/statistic/internal/helpers/helper.go new file mode 100644 index 0000000000..6537e8d14c --- /dev/null +++ b/tests/performance/tools/statistic/internal/helpers/helper.go @@ -0,0 +1,118 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" +) + +func ToSeconds(duration *metav1.Duration) float64 { + if duration == nil { + return 0 + } + return duration.Seconds() +} + +func CreateKubeConfig() kubeclient.Client { + // Get the KUBECONFIG environment variable + kubeconfigEnv := os.Getenv("KUBECONFIG") + if kubeconfigEnv == "" { + + fmt.Println("Try to use default path $HOME/.kube/config") + userHomeDir, err := os.UserHomeDir() + if err != nil { + fmt.Println("Failed to get user home directory:", err) + os.Exit(1) + } + kubeconfigEnv = userHomeDir + "/.kube/config" + + _ = os.Setenv("KUBECONFIG", kubeconfigEnv) + kubeconfigEnv = os.Getenv("KUBECONFIG") + + if kubeconfigEnv == "" { + fmt.Println("KUBECONFIG environment variable is not set. Exiting.") + os.Exit(1) + } + } + + // Split the KUBECONFIG environment variable (handles merged kubeconfig paths) + kubeconfigPaths := strings.Split(kubeconfigEnv, string(os.PathListSeparator)) + if len(kubeconfigPaths) == 0 { + fmt.Println("No valid kubeconfig paths found in KUBECONFIG. Exiting.") + os.Exit(1) + } + + fmt.Printf("Using KUBECONFIG paths: %v\n", kubeconfigPaths) + + // Load the kubeconfig from the merged paths + loadingRules := &clientcmd.ClientConfigLoadingRules{ + Precedence: kubeconfigPaths, + } + clientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loadingRules, + &clientcmd.ConfigOverrides{}, + ).ClientConfig() + if err != nil { + fmt.Printf("Failed to load kubeconfig: %v\n", err) + os.Exit(1) + } + + // Create a Kubernetes client + client, err := kubeclient.GetClientFromRESTConfig(clientConfig) + if err != nil { + fmt.Printf("Failed to create Kubernetes client: %v\n", err) + os.Exit(1) + } + return client +} + +// This format is required for processing output files in Excel. +// Example: performance-7,0:3:1,0:0:11,0:3:34 +func DurationToString(d *metav1.Duration) string { + if d == nil { + return "" + } + dur := fmt.Sprintf("%d:%d:%d", int64(d.Duration.Hours()), int64(d.Duration.Minutes())%60, int64(d.Duration.Seconds())%60) + return dur +} + +func SaveToFile(content string, resType string, ns string) { + filepath := fmt.Sprintf("/%s-%s-%s.csv", resType, ns, time.Now().Format("2006-01-02_15-04-05")) + execpath, err := os.Getwd() + if err != nil { + os.Exit(1) + } + file, err := os.Create(execpath + filepath) + if err != nil { + fmt.Printf("Error creating file: %v\n", err) + return + } + defer file.Close() + + _, err = file.WriteString(content) + if err != nil { + fmt.Printf("Error writing to file: %v\n", err) + return + } +} diff --git a/tests/performance/tools/statistic/internal/helpers/helper_test.go b/tests/performance/tools/statistic/internal/helpers/helper_test.go new file mode 100644 index 0000000000..f0936c36ac --- /dev/null +++ b/tests/performance/tools/statistic/internal/helpers/helper_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestDurationToString(t *testing.T) { + tests := []struct { + name string + d *metav1.Duration + want string + }{ + { + name: "nil duration", + d: nil, + want: "", + }, + { + name: "zero duration", + d: &metav1.Duration{}, + want: "0:0:0", + }, + { + name: "one hour", + d: &metav1.Duration{Duration: time.Hour}, + want: "1:0:0", + }, + { + name: "24 hours", + d: &metav1.Duration{Duration: time.Hour * 24}, + want: "24:0:0", + }, + { + name: "one minute", + d: &metav1.Duration{Duration: time.Minute}, + want: "0:1:0", + }, + { + name: "one second", + d: &metav1.Duration{Duration: time.Second}, + want: "0:0:1", + }, + { + name: "complex duration - 2 hours 30 minutes 45 seconds", + d: &metav1.Duration{Duration: 2*time.Hour + 30*time.Minute + 45*time.Second}, + want: "2:30:45", + }, + { + name: "complex duration - 1 hour 59 minutes 59 seconds", + d: &metav1.Duration{Duration: time.Hour + 59*time.Minute + 59*time.Second}, + want: "1:59:59", + }, + { + name: "complex duration - 0 hours 0 minutes 30 seconds", + d: &metav1.Duration{Duration: 30 * time.Second}, + want: "0:0:30", + }, + { + name: "complex duration - 0 hours 5 minutes 0 seconds", + d: &metav1.Duration{Duration: 5 * time.Minute}, + want: "0:5:0", + }, + { + name: "large duration - 100 hours 30 minutes 15 seconds", + d: &metav1.Duration{Duration: 100*time.Hour + 30*time.Minute + 15*time.Second}, + want: "100:30:15", + }, + { + name: "microseconds precision - should round down", + d: &metav1.Duration{Duration: time.Hour + 30*time.Minute + 45*time.Second + 500*time.Millisecond}, + want: "1:30:45", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DurationToString(tt.d) + assert.Equal(t, tt.want, result) + }) + } +} diff --git a/tests/performance/tools/statistic/internal/vd/get_vd_stat.go b/tests/performance/tools/statistic/internal/vd/get_vd_stat.go new file mode 100644 index 0000000000..727129940c --- /dev/null +++ b/tests/performance/tools/statistic/internal/vd/get_vd_stat.go @@ -0,0 +1,131 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vd + +import ( + "context" + "encoding/csv" + "fmt" + "os" + "statistic/internal/helpers" + "time" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type VD struct { + Name string `json:"name"` + VirtualDiskStats v1alpha2.VirtualDiskStatsCreationDuration `json:"creationDuration,omitempty"` +} + +type VDs struct { + Items []VD `json:"items"` +} + +func (vds *VDs) SaveToCSV(ns string) { + filepath := fmt.Sprintf("/all-%s-%s-%s.csv", "vd", ns, time.Now().Format("2006-01-02_15-04-05")) + execpath, err := os.Getwd() + if err != nil { + os.Exit(1) + } + + file, err := os.Create(execpath + filepath) + if err != nil { + os.Exit(1) + } + defer file.Close() + + writer := csv.NewWriter(file) + defer writer.Flush() + + header := []string{"Name", "WaitingForDependencies", "DVCRProvisioning", "TotalProvisioning"} + if err := writer.Write(header); err != nil { + fmt.Printf("Error writing header to CSV file: %v\n", err) + os.Exit(1) + } + + for _, res := range vds.Items { + + data := []string{ + res.Name, + helpers.DurationToString(res.VirtualDiskStats.WaitingForDependencies), + helpers.DurationToString(res.VirtualDiskStats.DVCRProvisioning), + helpers.DurationToString(res.VirtualDiskStats.TotalProvisioning), + } + if err := writer.Write(data); err != nil { + fmt.Printf("Error writing data to CSV file: %v\n", err) + os.Exit(1) + } + } + fmt.Println("Data of VD saved successfully to csv", file.Name()) +} + +func GetStatistic(client kubeclient.Client, namespace string) { + var ( + vds VDs + sumWaitingForDependencies float64 + sumDVCRProvisioning float64 + sumTotalProvisioning float64 + ) + + // Limit & Continue for separete call res + vdList, err := client.VirtualDisks(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Failed to get vm: %v\n", err) + os.Exit(1) + } + + totalItems := len(vdList.Items) + + for _, vd := range vdList.Items { + if string(vd.Status.Phase) == "Ready" { + + vds.Items = append(vds.Items, VD{ + Name: vd.Name, + VirtualDiskStats: v1alpha2.VirtualDiskStatsCreationDuration{ + WaitingForDependencies: vd.Status.Stats.CreationDuration.WaitingForDependencies, + DVCRProvisioning: vd.Status.Stats.CreationDuration.DVCRProvisioning, + TotalProvisioning: vd.Status.Stats.CreationDuration.TotalProvisioning, + }, + }) + + sumWaitingForDependencies += helpers.ToSeconds(vd.Status.Stats.CreationDuration.WaitingForDependencies) + sumDVCRProvisioning += helpers.ToSeconds(vd.Status.Stats.CreationDuration.DVCRProvisioning) + sumTotalProvisioning += helpers.ToSeconds(vd.Status.Stats.CreationDuration.TotalProvisioning) + } + } + + avgWaitingForDependencies := sumWaitingForDependencies / float64(totalItems) + avgDVCRProvisioning := sumDVCRProvisioning / float64(totalItems) + avgTotalProvisioning := sumTotalProvisioning / float64(totalItems) + + saveData := fmt.Sprintf( + "Total VDs count: %d\n"+ + "Average WaitingForDependencies in seconds: %.2f\n"+ + "Average DVCRProvisioning in seconds: %.2f\n"+ + "Average TotalProvisioning in seconds: %.2f\n", + totalItems, avgWaitingForDependencies, avgDVCRProvisioning, avgTotalProvisioning, + ) + + helpers.SaveToFile(saveData, "vd", namespace) + + fmt.Println(saveData) + + vds.SaveToCSV(namespace) +} diff --git a/tests/performance/tools/statistic/internal/vm/get_vm_stat.go b/tests/performance/tools/statistic/internal/vm/get_vm_stat.go new file mode 100644 index 0000000000..891194cb0c --- /dev/null +++ b/tests/performance/tools/statistic/internal/vm/get_vm_stat.go @@ -0,0 +1,167 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vm + +import ( + "context" + "encoding/csv" + "fmt" + "os" + "statistic/internal/helpers" + "time" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/api/core/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type VM struct { + Name string `json:"name"` + VirtualMachineLaunchTimeDuration v1alpha2.VirtualMachineLaunchTimeDuration `json:"launchTimeDuration"` + VirtualMachineStopTime time.Duration `json:"stopTime,omitempty"` +} + +type VMs struct { + Items []VM `json:"items"` +} + +func (vms *VMs) SaveToCSV(ns string) { + filepath := fmt.Sprintf("/all-%s-%s-%s.csv", "vm", ns, time.Now().Format("2006-01-02_15-04-05")) + execpath, err := os.Getwd() + if err != nil { + os.Exit(1) + } + + file, err := os.Create(execpath + filepath) + if err != nil { + os.Exit(1) + } + defer file.Close() + + writer := csv.NewWriter(file) + defer writer.Flush() + + header := []string{"Name", "WaitingForDependencies", "VirtualMachineStarting", "GuestOSAgentStarting"} + if err := writer.Write(header); err != nil { + fmt.Printf("Error writing header to CSV file: %v\n", err) + os.Exit(1) + } + + for _, res := range vms.Items { + + data := []string{ + res.Name, + helpers.DurationToString(res.VirtualMachineLaunchTimeDuration.WaitingForDependencies), + helpers.DurationToString(res.VirtualMachineLaunchTimeDuration.VirtualMachineStarting), + helpers.DurationToString(res.VirtualMachineLaunchTimeDuration.GuestOSAgentStarting), + } + if err := writer.Write(data); err != nil { + fmt.Printf("Error writing data to CSV file: %v\n", err) + os.Exit(1) + } + } + fmt.Println("Data of VD saved successfully to csv", file.Name()) +} + +func GetStatistic(client kubeclient.Client, namespace string) { + vmList, err := client.VirtualMachines(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Failed to get vm: %v\n", err) + os.Exit(1) + } + + var ( + vms VMs + sumWaitingForDependencies float64 + sumVirtualMachineStarting float64 + sumGuestOSAgentStarting float64 + ) + + totalItems := len(vmList.Items) + + for _, vm := range vmList.Items { + if string(vm.Status.Phase) == "Running" { + + vms.Items = append(vms.Items, VM{ + Name: vm.Name, + VirtualMachineLaunchTimeDuration: v1alpha2.VirtualMachineLaunchTimeDuration{ + WaitingForDependencies: vm.Status.Stats.LaunchTimeDuration.WaitingForDependencies, + VirtualMachineStarting: vm.Status.Stats.LaunchTimeDuration.VirtualMachineStarting, + GuestOSAgentStarting: vm.Status.Stats.LaunchTimeDuration.GuestOSAgentStarting, + }, + }) + + sumWaitingForDependencies += helpers.ToSeconds(vm.Status.Stats.LaunchTimeDuration.WaitingForDependencies) + sumVirtualMachineStarting += helpers.ToSeconds(vm.Status.Stats.LaunchTimeDuration.VirtualMachineStarting) + sumGuestOSAgentStarting += helpers.ToSeconds(vm.Status.Stats.LaunchTimeDuration.GuestOSAgentStarting) + } + } + + avgWaitingForDependencies := sumWaitingForDependencies / float64(totalItems) + avgVirtualMachineStarting := sumVirtualMachineStarting / float64(totalItems) + avgGuestOSAgentStarting := sumGuestOSAgentStarting / float64(totalItems) + + saveData := fmt.Sprintf( + "Total VMs count: %d\n"+ + "Average WaitingForDependencies in seconds: %.2f\n"+ + "Average VirtualMachineStarting in seconds: %.2f\n"+ + "Average GuestOSAgentStarting in seconds: %.2f\n", + totalItems, avgWaitingForDependencies, avgVirtualMachineStarting, avgGuestOSAgentStarting, + ) + + helpers.SaveToFile(saveData, "vm", namespace) + + fmt.Println(saveData) + + vms.SaveToCSV(namespace) +} + +func getStoppingAndStoppedDuration(vm v1alpha2.VirtualMachine) time.Duration { + var ( + stopping metav1.Time + stopped metav1.Time + ) + for _, transition := range vm.Status.Stats.PhasesTransitions { + if string(transition.Phase) == "Stopping" { + stopping = transition.Timestamp + } + if string(transition.Phase) == "Stopped" { + stopped = transition.Timestamp + } + } + return stopped.Time.Sub(stopping.Time) // `Time` is from metav1.Time +} + +func GetStatStop(client kubeclient.Client, namespace string) { + var vms VMs + + vmList, err := client.VirtualMachines(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + fmt.Printf("Failed to get vm: %v\n", err) + os.Exit(1) + } + fmt.Println("Total VMs:", len(vmList.Items)) + + for _, vm := range vmList.Items { + if string(vm.Status.Phase) == "Stopped" { + vms.Items = append(vms.Items, VM{ + Name: vm.Name, + VirtualMachineStopTime: getStoppingAndStoppedDuration(vm), + }) + } + } +} diff --git a/tests/performance/tools/statistic/pkg/command/statistic.go b/tests/performance/tools/statistic/pkg/command/statistic.go new file mode 100644 index 0000000000..b0f57dc025 --- /dev/null +++ b/tests/performance/tools/statistic/pkg/command/statistic.go @@ -0,0 +1,80 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "os" + + "statistic/internal/helpers" + "statistic/internal/vd" + "statistic/internal/vm" + + "github.com/spf13/cobra" +) + +var ( + namespace string + virtualmachine bool + virtualdisk bool +) + +var rootCmd = &cobra.Command{ + Use: "statistic", + Short: "get statistic for vm and vd in name space", + Long: `Get statistic from virtualmachine and virtualdisk in the namespace and save to csv file. Default namespace: 'perf'. + +Example output for avg statistics: + +Total VMs count: 30 +Average WaitingForDependencies in seconds: 107.90 +Average VirtualMachineStarting in seconds: 14.13 +Average GuestOSAgentStarting in seconds: 145.43 + +csv files saved to current directory ./all-{vm/vd}-perf-2025-09-23_12-48-51.csv +`, + Args: cobra.ArbitraryArgs, + Run: getStatistic, +} + +func init() { + rootCmd.Flags().StringVarP(&namespace, "namespace", "n", "perf", "namespace to look for the VMs,VDs, default 'perf'") + rootCmd.Flags().BoolVarP(&virtualmachine, "virtualmachine", "v", false, "get virtualmachine statistics") + rootCmd.Flags().BoolVarP(&virtualdisk, "virtualdisk", "d", false, "get virtualdisk statistics") +} + +func getStatistic(cmd *cobra.Command, args []string) { + client := helpers.CreateKubeConfig() + + // Default is to get all stats. + getAll := !virtualmachine && !virtualdisk + + if getAll || virtualmachine { + vm.GetStatistic(client, namespace) + } + + if getAll || virtualdisk { + vd.GetStatistic(client, namespace) + } +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/tests/performance/status-access-vms/ansible/Taskfile.ansible.yaml b/tests/performance/tools/status-access-vms/ansible/Taskfile.ansible.yaml similarity index 100% rename from tests/performance/status-access-vms/ansible/Taskfile.ansible.yaml rename to tests/performance/tools/status-access-vms/ansible/Taskfile.ansible.yaml diff --git a/tests/performance/status-access-vms/ansible/ansible.cfg b/tests/performance/tools/status-access-vms/ansible/ansible.cfg similarity index 100% rename from tests/performance/status-access-vms/ansible/ansible.cfg rename to tests/performance/tools/status-access-vms/ansible/ansible.cfg diff --git a/tests/performance/status-access-vms/ansible/playbook.yaml b/tests/performance/tools/status-access-vms/ansible/playbook.yaml similarity index 100% rename from tests/performance/status-access-vms/ansible/playbook.yaml rename to tests/performance/tools/status-access-vms/ansible/playbook.yaml diff --git a/tests/performance/status-access-vms/ansible/run.sh b/tests/performance/tools/status-access-vms/ansible/run.sh similarity index 100% rename from tests/performance/status-access-vms/ansible/run.sh rename to tests/performance/tools/status-access-vms/ansible/run.sh diff --git a/tests/performance/status-access-vms/ansible/vmops/vmops_restart.sh b/tests/performance/tools/status-access-vms/ansible/vmops/vmops_restart.sh similarity index 100% rename from tests/performance/status-access-vms/ansible/vmops/vmops_restart.sh rename to tests/performance/tools/status-access-vms/ansible/vmops/vmops_restart.sh diff --git a/tests/performance/status-access-vms/tank/Taskfile.tank.yaml b/tests/performance/tools/status-access-vms/tank/Taskfile.tank.yaml similarity index 100% rename from tests/performance/status-access-vms/tank/Taskfile.tank.yaml rename to tests/performance/tools/status-access-vms/tank/Taskfile.tank.yaml diff --git a/tests/performance/status-access-vms/tank/load.yaml b/tests/performance/tools/status-access-vms/tank/load.yaml similarity index 100% rename from tests/performance/status-access-vms/tank/load.yaml rename to tests/performance/tools/status-access-vms/tank/load.yaml diff --git a/tests/performance/status-access-vms/tank/run_tank.sh b/tests/performance/tools/status-access-vms/tank/run_tank.sh similarity index 100% rename from tests/performance/status-access-vms/tank/run_tank.sh rename to tests/performance/tools/status-access-vms/tank/run_tank.sh diff --git a/tests/performance/values.yaml b/tests/performance/values.yaml index 4a0bb9ac17..26cc5b1227 100644 --- a/tests/performance/values.yaml +++ b/tests/performance/values.yaml @@ -1,13 +1,37 @@ -count: 3 -spec: - cpu: - cores: 1 - coreFraction: 10% - memory: - size: 256Mi - -resources: "all" -resourcesPrefix: "performance" -diskSize: 300Mi -storageClassName: "" -imageURL: "https://0e773854-6b4e-4e76-a65b-d9d81675451a.selstorage.ru/alpine/alpine-v3-20.qcow2" +count: 1 +nginx: false +resources: + storageClassName: "ceph-pool-r2-csi-rbd" + default: all # all, vms, vds, vi + prefix: "performance" + virtualMachine: + spec: + template: + # VM startup policy + # AlwaysOn - after creation the VM is always in a running state, even in case of its shutdown by OS means. + # AlwaysOff - after creation the VM is always in the off state. + # Manual - after creation the VM is switched off, the VM state (switching on/off) is controlled via sub-resources or OS means. + # AlwaysOnUnlessStoppedManually - after creation the VM is always in a running state, even in case of its shutdown by means of the OS, the VM can be shut down using the corresponding subresource. + runPolicy: AlwaysOnUnlessStoppedManually + virtualMachineClassName: generic + # Available values Dynamic, Manual or Automatic + # Dynamic - toggle between Manual and Automatic during VM creation to get 50/50 distribution. + restartApprovalMode: Dynamic + cpu: + cores: 1 + coreFraction: 10% + memory: + size: 256Mi + virtualDisk: + spec: + template: + type: virtualDisk # virtualImage or virtualDisk + size: 300Mi + virtualImage: + spec: + template: + # Virtual image type virtualImage or persistentVolumeClaim + type: virtualImage + image: + name: alpine + url: "https://0e773854-6b4e-4e76-a65b-d9d81675451a.selstorage.ru/alpine/alpine-v3-20.qcow2"