Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package kubevirt for openSUSE:Factory 
checked in at 2023-08-17 19:43:52
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubevirt (Old)
 and      /work/SRC/openSUSE:Factory/.kubevirt.new.1766 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubevirt"

Thu Aug 17 19:43:52 2023 rev:64 rq:1104344 version:1.0.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubevirt/kubevirt.changes        2023-08-08 
15:55:40.561271733 +0200
+++ /work/SRC/openSUSE:Factory/.kubevirt.new.1766/kubevirt.changes      
2023-08-17 19:44:07.274853986 +0200
@@ -1,0 +2,12 @@
+Thu Aug 17 09:10:31 UTC 2023 - Vasily Ulyanov <vasily.ulya...@suse.com>
+
+- Bump client-go (fix possible panic in discovery)
+  0011-Fix-Aggregated-Discovery.patch
+- Wait for new hotplug attachment pod to be ready
+  0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
+- Adapt the storage tests to the new populators flow
+  0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
+- Create export VM datavolumes compatible with populators
+  0014-Export-create-populator-compatible-datavolumes-from-.patch
+
+-------------------------------------------------------------------

New:
----
  0011-Fix-Aggregated-Discovery.patch
  0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
  0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
  0014-Export-create-populator-compatible-datavolumes-from-.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubevirt.spec ++++++
--- /var/tmp/diff_new_pack.mv42LT/_old  2023-08-17 19:44:08.394856081 +0200
+++ /var/tmp/diff_new_pack.mv42LT/_new  2023-08-17 19:44:08.406856104 +0200
@@ -38,6 +38,10 @@
 Patch8:         0008-fix-ticker-leak.patch
 Patch9:         0009-tests-Run-helper-pod-as-qemu-107-user.patch
 Patch10:        0010-Fix-PR-leftover-mount-and-perms.patch
+Patch11:        0011-Fix-Aggregated-Discovery.patch
+Patch12:        0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch
+Patch13:        0013-Adapt-e2e-tests-to-CDI-1.57.0.patch
+Patch14:        0014-Export-create-populator-compatible-datavolumes-from-.patch
 BuildRequires:  glibc-devel-static
 BuildRequires:  golang-packaging
 BuildRequires:  pkgconfig

++++++ 0011-Fix-Aggregated-Discovery.patch ++++++
++++ 752 lines (skipped)

++++++ 0012-Wait-for-new-hotplug-attachment-pod-to-be-ready.patch ++++++
>From ee3463ae990a1776908483b182ad79c79637cd5d Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Fri, 11 Aug 2023 07:56:29 -0500
Subject: [PATCH 1/4] Wait for new attachemnt pod

Before deleting old attachment pod, wait for new attachment
pod to be ready, so k8s should not detach the volume from the
node since there will always be a pod using the volume from
its perspective.

Fixed issue where when adding or removing a volume the existing
volumes would still have the UID of the old attachment pod in
the VMI status which caused errors to appear in the virt-handler
logs about not being able to find the device or image.

Fixed issue where the cleanup would attempt to remove a volume
that was already gone causing errors to appear in the virt-handler
log.

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-controller/watch/vmi.go       | 60 +++++++++++++++-----------
 pkg/virt-controller/watch/vmi_test.go  | 52 ++++++++++++++++++++++
 pkg/virt-handler/hotplug-disk/mount.go |  7 ++-
 tests/storage/hotplug.go               | 10 +++++
 4 files changed, 103 insertions(+), 26 deletions(-)

diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 9afaee4f0..99af8b8cb 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -516,11 +516,7 @@ func (c *VMIController) hasOwnerVM(vmi 
*virtv1.VirtualMachineInstance) bool {
        }
 
        ownerVM := obj.(*virtv1.VirtualMachine)
-       if controllerRef.UID == ownerVM.UID {
-               return true
-       }
-
-       return false
+       return controllerRef.UID == ownerVM.UID
 }
 
 func (c *VMIController) updateStatus(vmi *virtv1.VirtualMachineInstance, pod 
*k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr syncError) error {
@@ -1816,15 +1812,29 @@ func (c *VMIController) 
waitForFirstConsumerTemporaryPods(vmi *virtv1.VirtualMac
 }
 
 func (c *VMIController) needsHandleHotplug(hotplugVolumes []*virtv1.Volume, 
hotplugAttachmentPods []*k8sv1.Pod) bool {
+       if len(hotplugAttachmentPods) > 1 {
+               return true
+       }
        // Determine if the ready volumes have changed compared to the current 
pod
        for _, attachmentPod := range hotplugAttachmentPods {
                if c.podVolumesMatchesReadyVolumes(attachmentPod, 
hotplugVolumes) {
-                       log.DefaultLogger().Infof("Don't need to handle as we 
have a matching attachment pod")
                        return false
                }
-               return true
        }
-       return len(hotplugVolumes) > 0
+       return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
+}
+
+func (c *VMIController) getActiveAndOldAttachmentPods(readyHotplugVolumes 
[]*virtv1.Volume, hotplugAttachmentPods []*k8sv1.Pod) (*k8sv1.Pod, 
[]*k8sv1.Pod) {
+       var currentPod *k8sv1.Pod
+       oldPods := make([]*k8sv1.Pod, 0)
+       for _, attachmentPod := range hotplugAttachmentPods {
+               if !c.podVolumesMatchesReadyVolumes(attachmentPod, 
readyHotplugVolumes) {
+                       oldPods = append(oldPods, attachmentPod)
+               } else {
+                       currentPod = attachmentPod
+               }
+       }
+       return currentPod, oldPods
 }
 
 func (c *VMIController) handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, 
hotplugAttachmentPods []*k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, 
virtLauncherPod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) syncError {
@@ -1855,29 +1865,25 @@ func (c *VMIController) 
handleHotplugVolumes(hotplugVolumes []*virtv1.Volume, ho
                readyHotplugVolumes = append(readyHotplugVolumes, volume)
        }
        // Determine if the ready volumes have changed compared to the current 
pod
-       currentPod := make([]*k8sv1.Pod, 0)
-       oldPods := make([]*k8sv1.Pod, 0)
-       for _, attachmentPod := range hotplugAttachmentPods {
-               if !c.podVolumesMatchesReadyVolumes(attachmentPod, 
readyHotplugVolumes) {
-                       oldPods = append(oldPods, attachmentPod)
-               } else {
-                       currentPod = append(currentPod, attachmentPod)
-               }
-       }
+       currentPod, oldPods := 
c.getActiveAndOldAttachmentPods(readyHotplugVolumes, hotplugAttachmentPods)
 
-       if len(currentPod) == 0 && len(readyHotplugVolumes) > 0 {
+       if currentPod == nil && len(readyHotplugVolumes) > 0 {
                // ready volumes have changed
                // Create new attachment pod that holds all the ready volumes
                if err := c.createAttachmentPod(vmi, virtLauncherPod, 
readyHotplugVolumes); err != nil {
                        return err
                }
        }
-       // Delete old attachment pod
-       for _, attachmentPod := range oldPods {
-               if err := c.deleteAttachmentPodForVolume(vmi, attachmentPod); 
err != nil {
-                       return &syncErrorImpl{fmt.Errorf("Error deleting 
attachment pod %v", err), FailedDeletePodReason}
+
+       if len(readyHotplugVolumes) == 0 || (currentPod != nil && 
currentPod.Status.Phase == k8sv1.PodRunning) {
+               // Delete old attachment pod
+               for _, attachmentPod := range oldPods {
+                       if err := c.deleteAttachmentPodForVolume(vmi, 
attachmentPod); err != nil {
+                               return &syncErrorImpl{fmt.Errorf("Error 
deleting attachment pod %v", err), FailedDeletePodReason}
+                       }
                }
        }
+
        return nil
 }
 
@@ -2121,6 +2127,9 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
        if err != nil {
                return err
        }
+
+       attachmentPod, _ := c.getActiveAndOldAttachmentPods(hotplugVolumes, 
attachmentPods)
+
        newStatus := make([]virtv1.VolumeStatus, 0)
        for i, volume := range vmi.Spec.Volumes {
                status := virtv1.VolumeStatus{}
@@ -2142,7 +2151,6 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
                                        ClaimName: volume.Name,
                                }
                        }
-                       attachmentPod := 
c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
                        if attachmentPod == nil {
                                if !c.volumeReady(status.Phase) {
                                        status.HotplugVolume.AttachPodUID = ""
@@ -2156,6 +2164,9 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
                                status.HotplugVolume.AttachPodName = 
attachmentPod.Name
                                if len(attachmentPod.Status.ContainerStatuses) 
== 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
                                        status.HotplugVolume.AttachPodUID = 
attachmentPod.UID
+                               } else {
+                                       // Remove UID of old pod if a new one 
is available, but not yet ready
+                                       status.HotplugVolume.AttachPodUID = ""
                                }
                                if c.canMoveToAttachedPhase(status.Phase) {
                                        status.Phase = 
virtv1.HotplugVolumeAttachedToNode
@@ -2244,8 +2255,7 @@ func (c *VMIController) getFilesystemOverhead(pvc 
*k8sv1.PersistentVolumeClaim)
 }
 
 func (c *VMIController) canMoveToAttachedPhase(currentPhase 
virtv1.VolumePhase) bool {
-       return (currentPhase == "" || currentPhase == virtv1.VolumeBound || 
currentPhase == virtv1.VolumePending ||
-               currentPhase == virtv1.HotplugVolumeAttachedToNode)
+       return (currentPhase == "" || currentPhase == virtv1.VolumeBound || 
currentPhase == virtv1.VolumePending)
 }
 
 func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, 
attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
diff --git a/pkg/virt-controller/watch/vmi_test.go 
b/pkg/virt-controller/watch/vmi_test.go
index a9b173232..932326432 100644
--- a/pkg/virt-controller/watch/vmi_test.go
+++ b/pkg/virt-controller/watch/vmi_test.go
@@ -2700,6 +2700,58 @@ var _ = Describe("VirtualMachineInstance watcher", 
func() {
                                []string{SuccessfulCreatePodReason}),
                )
 
+               DescribeTable("Should properly calculate if it needs to handle 
hotplug volumes", func(hotplugVolumes []*virtv1.Volume, attachmentPods 
[]*k8sv1.Pod, match gomegaTypes.GomegaMatcher) {
+                       Expect(controller.needsHandleHotplug(hotplugVolumes, 
attachmentPods)).To(match)
+               },
+                       Entry("nil volumes, nil attachmentPods", nil, nil, 
BeFalse()),
+                       Entry("empty volumes, empty attachmentPods", 
[]*virtv1.Volume{}, []*k8sv1.Pod{}, BeFalse()),
+                       Entry("single volume, empty attachmentPods", 
[]*virtv1.Volume{
+                               {
+                                       Name: "test",
+                               },
+                       }, []*k8sv1.Pod{}, BeTrue()),
+                       Entry("no volume, single attachmentPod", 
[]*virtv1.Volume{}, makePods(0), BeTrue()),
+                       Entry("matching volume, single attachmentPod", 
[]*virtv1.Volume{
+                               {
+                                       Name: "volume0",
+                               },
+                       }, makePods(0), BeFalse()),
+                       Entry("mismatched volume, single attachmentPod", 
[]*virtv1.Volume{
+                               {
+                                       Name: "invalid",
+                               },
+                       }, makePods(0), BeTrue()),
+                       Entry("matching volume, multiple attachmentPods", 
[]*virtv1.Volume{
+                               {
+                                       Name: "volume0",
+                               },
+                       }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, 
BeTrue()),
+               )
+
+               DescribeTable("Should find active and old pods", 
func(hotplugVolumes []*virtv1.Volume, attachmentPods []*k8sv1.Pod, 
expectedActive *k8sv1.Pod, expectedOld []*k8sv1.Pod) {
+                       active, old := 
controller.getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
+                       Expect(active).To(Equal(expectedActive))
+                       Expect(old).To(ContainElements(expectedOld))
+               },
+                       Entry("nil volumes, nil attachmentPods", nil, nil, nil, 
nil),
+                       Entry("empty volumes, empty attachmentPods", 
[]*virtv1.Volume{}, []*k8sv1.Pod{}, nil, []*k8sv1.Pod{}),
+                       Entry("matching volume, single attachmentPod", 
[]*virtv1.Volume{
+                               {
+                                       Name: "volume0",
+                               },
+                       }, makePods(0), makePods(0)[0], []*k8sv1.Pod{}),
+                       Entry("matching volume, multiple attachmentPods, first 
pod matches", []*virtv1.Volume{
+                               {
+                                       Name: "volume0",
+                               },
+                       }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, 
makePods(0)[0], makePods(1)),
+                       Entry("matching volume, multiple attachmentPods, second 
pod matches", []*virtv1.Volume{
+                               {
+                                       Name: "volume1",
+                               },
+                       }, []*k8sv1.Pod{makePods(0)[0], makePods(1)[0]}, 
makePods(1)[0], makePods(0)),
+               )
+
                It("Should get default filesystem overhead if there are 
multiple CDI instances", func() {
                        cdi := cdiv1.CDI{
                                ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/virt-handler/hotplug-disk/mount.go 
b/pkg/virt-handler/hotplug-disk/mount.go
index 942301815..43504d48d 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -508,9 +508,10 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, 
allow bool, manager cg
 func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, 
deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
        if _, err := safepath.JoinNoFollow(basePath, deviceName); 
errors.Is(err, os.ErrNotExist) {
                return mknodCommand(basePath, deviceName, dev, 
blockDevicePermissions)
-       } else {
+       } else if err != nil {
                return err
        }
+       return nil
 }
 
 func (m *volumeMounter) mountFileSystemHotplugVolume(vmi 
*v1.VirtualMachineInstance, volume string, sourceUID types.UID, record 
*vmiMountTargetRecord, mountDirectory bool) error {
@@ -667,6 +668,10 @@ func (m *volumeMounter) Unmount(vmi 
*v1.VirtualMachineInstance) error {
                        var err error
                        if m.isBlockVolume(&vmi.Status, volumeStatus.Name) {
                                path, err = safepath.JoinNoFollow(basePath, 
volumeStatus.Name)
+                               if errors.Is(err, os.ErrNotExist) {
+                                       // already unmounted or never mounted
+                                       continue
+                               }
                        } else if m.isDirectoryMounted(&vmi.Status, 
volumeStatus.Name) {
                                path, err = 
m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID,
 volumeStatus.Name, false)
                                if os.IsExist(err) {
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
index a85976484..ba9c69100 100644
--- a/tests/storage/hotplug.go
+++ b/tests/storage/hotplug.go
@@ -724,6 +724,16 @@ var _ = SIGDescribe("Hotplug", func() {
                                for i := range testVolumes {
                                        verifyVolumeNolongerAccessible(vmi, 
targets[i])
                                }
+                               By("Verifying there are no sync errors")
+                               events, err := 
virtClient.CoreV1().Events(vmi.Namespace).List(context.Background(), 
metav1.ListOptions{})
+                               Expect(err).ToNot(HaveOccurred())
+                               for _, event := range events.Items {
+                                       if event.InvolvedObject.Kind == 
"VirtualMachineInstance" && event.InvolvedObject.UID == vmi.UID {
+                                               if event.Reason == 
string(v1.SyncFailed) {
+                                                       Fail(fmt.Sprintf("Found 
sync failed event %v", event))
+                                               }
+                                       }
+                               }
                        },
                                Entry("with VMs", addDVVolumeVM, 
removeVolumeVM, corev1.PersistentVolumeFilesystem, false),
                                Entry("with VMIs", addDVVolumeVMI, 
removeVolumeVMI, corev1.PersistentVolumeFilesystem, true),
-- 
2.41.0


>From b02ab03f39e7e888c27949d24c0e9b38963d9b6c Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Fri, 11 Aug 2023 15:00:59 -0500
Subject: [PATCH 2/4] Don't generate SynFail caused by a race condition.

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-handler/hotplug-disk/mount.go | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/pkg/virt-handler/hotplug-disk/mount.go 
b/pkg/virt-handler/hotplug-disk/mount.go
index 43504d48d..9a5d24747 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -313,12 +313,16 @@ func (m *volumeMounter) mountHotplugVolume(vmi 
*v1.VirtualMachineInstance, volum
                if m.isBlockVolume(&vmi.Status, volumeName) {
                        logger.V(4).Infof("Mounting block volume: %s", 
volumeName)
                        if err := m.mountBlockHotplugVolume(vmi, volumeName, 
sourceUID, record); err != nil {
-                               return fmt.Errorf("failed to mount block 
hotplug volume %s: %v", volumeName, err)
+                               if !errors.Is(err, os.ErrNotExist) {
+                                       return fmt.Errorf("failed to mount 
block hotplug volume %s: %v", volumeName, err)
+                               }
                        }
                } else {
                        logger.V(4).Infof("Mounting file system volume: %s", 
volumeName)
                        if err := m.mountFileSystemHotplugVolume(vmi, 
volumeName, sourceUID, record, mountDirectory); err != nil {
-                               return fmt.Errorf("failed to mount filesystem 
hotplug volume %s: %v", volumeName, err)
+                               if !errors.Is(err, os.ErrNotExist) {
+                                       return fmt.Errorf("failed to mount 
filesystem hotplug volume %s: %v", volumeName, err)
+                               }
                        }
                }
        }
-- 
2.41.0


>From 5012469d5179f01f5da9ae7c701949a57fb9d439 Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Fri, 11 Aug 2023 18:04:28 -0500
Subject: [PATCH 3/4] Address code review comments

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-controller/watch/vmi.go       | 6 ++----
 pkg/virt-handler/hotplug-disk/mount.go | 3 +--
 2 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 99af8b8cb..e031c35a8 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -1816,10 +1816,8 @@ func (c *VMIController) 
needsHandleHotplug(hotplugVolumes []*virtv1.Volume, hotp
                return true
        }
        // Determine if the ready volumes have changed compared to the current 
pod
-       for _, attachmentPod := range hotplugAttachmentPods {
-               if c.podVolumesMatchesReadyVolumes(attachmentPod, 
hotplugVolumes) {
-                       return false
-               }
+       if len(hotplugAttachmentPods) == 1 && 
c.podVolumesMatchesReadyVolumes(hotplugAttachmentPods[0], hotplugVolumes) {
+               return false
        }
        return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
 }
diff --git a/pkg/virt-handler/hotplug-disk/mount.go 
b/pkg/virt-handler/hotplug-disk/mount.go
index 9a5d24747..c0b55046c 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -512,10 +512,9 @@ func (m *volumeMounter) updateBlockMajorMinor(dev uint64, 
allow bool, manager cg
 func (m *volumeMounter) createBlockDeviceFile(basePath *safepath.Path, 
deviceName string, dev uint64, blockDevicePermissions os.FileMode) error {
        if _, err := safepath.JoinNoFollow(basePath, deviceName); 
errors.Is(err, os.ErrNotExist) {
                return mknodCommand(basePath, deviceName, dev, 
blockDevicePermissions)
-       } else if err != nil {
+       } else {
                return err
        }
-       return nil
 }
 
 func (m *volumeMounter) mountFileSystemHotplugVolume(vmi 
*v1.VirtualMachineInstance, volume string, sourceUID types.UID, record 
*vmiMountTargetRecord, mountDirectory bool) error {
-- 
2.41.0


>From 5abf17fef7ab5433ec7dd155a82b1575660b86d3 Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Mon, 14 Aug 2023 07:58:16 -0500
Subject: [PATCH 4/4] Update pkg/virt-handler/hotplug-disk/mount.go

Co-authored-by: Vasiliy Ulyanov <vulya...@suse.de>
Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-handler/hotplug-disk/mount.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pkg/virt-handler/hotplug-disk/mount.go 
b/pkg/virt-handler/hotplug-disk/mount.go
index c0b55046c..b1a11d93f 100644
--- a/pkg/virt-handler/hotplug-disk/mount.go
+++ b/pkg/virt-handler/hotplug-disk/mount.go
@@ -677,7 +677,7 @@ func (m *volumeMounter) Unmount(vmi 
*v1.VirtualMachineInstance) error {
                                }
                        } else if m.isDirectoryMounted(&vmi.Status, 
volumeStatus.Name) {
                                path, err = 
m.hotplugDiskManager.GetFileSystemDirectoryTargetPathFromHostView(virtlauncherUID,
 volumeStatus.Name, false)
-                               if os.IsExist(err) {
+                               if errors.Is(err, os.ErrNotExist) {
                                        // already unmounted or never mounted
                                        continue
                                }
-- 
2.41.0


++++++ 0013-Adapt-e2e-tests-to-CDI-1.57.0.patch ++++++
>From 61ca1e96363afe403465ed195b8cc808a4a04f06 Mon Sep 17 00:00:00 2001
From: Alex Kalenyuk <akale...@redhat.com>
Date: Wed, 12 Jul 2023 19:52:04 +0300
Subject: [PATCH 1/2] Don't wait for populator target PVC to be bound

Populator PVCs only achieve bound phase once the population is done,
as opposed to CDI population which was working on the target PVC directly.

Signed-off-by: Alex Kalenyuk <akale...@redhat.com>
---
 tests/storage/export.go | 70 ++---------------------------------------
 1 file changed, 3 insertions(+), 67 deletions(-)

diff --git a/tests/storage/export.go b/tests/storage/export.go
index d456e2fb1..4fab2aec1 100644
--- a/tests/storage/export.go
+++ b/tests/storage/export.go
@@ -48,7 +48,6 @@ import (
 
        k8sv1 "k8s.io/api/core/v1"
        networkingv1 "k8s.io/api/networking/v1"
-       storagev1 "k8s.io/api/storage/v1"
        "k8s.io/apimachinery/pkg/api/equality"
        "k8s.io/apimachinery/pkg/api/errors"
        "k8s.io/apimachinery/pkg/api/resource"
@@ -230,68 +229,6 @@ var _ = SIGDescribe("Export", func() {
                return tests.RunPod(pod)
        }
 
-       createTriggerPodForPvc := func(pvc *k8sv1.PersistentVolumeClaim) 
*k8sv1.Pod {
-               volumeName := pvc.GetName()
-               podName := fmt.Sprintf("bind-%s", volumeName)
-               pod := tests.RenderPod(podName, []string{"/bin/sh", "-c", 
"sleep 1"}, []string{})
-               pod.Spec.Volumes = append(pod.Spec.Volumes, k8sv1.Volume{
-                       Name: volumeName,
-                       VolumeSource: k8sv1.VolumeSource{
-                               PersistentVolumeClaim: 
&k8sv1.PersistentVolumeClaimVolumeSource{
-                                       ClaimName: pvc.GetName(),
-                               },
-                       },
-               })
-
-               volumeMode := pvc.Spec.VolumeMode
-               if volumeMode != nil && *volumeMode == 
k8sv1.PersistentVolumeBlock {
-                       addBlockVolume(pod, volumeName)
-               } else {
-                       addFilesystemVolume(pod, volumeName)
-               }
-               return tests.RunPodAndExpectCompletion(pod)
-       }
-
-       isWaitForFirstConsumer := func(storageClassName string) bool {
-               sc, err := 
virtClient.StorageV1().StorageClasses().Get(context.Background(), 
storageClassName, metav1.GetOptions{})
-               Expect(err).ToNot(HaveOccurred())
-               return sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == 
storagev1.VolumeBindingWaitForFirstConsumer
-       }
-
-       ensurePVCBound := func(pvc *k8sv1.PersistentVolumeClaim) {
-               namespace := pvc.Namespace
-               if !isWaitForFirstConsumer(*pvc.Spec.StorageClassName) {
-                       By("Checking for bound claim on non-WFFC storage")
-                       // Not WFFC, pvc will be bound
-                       Eventually(func() k8sv1.PersistentVolumeClaimPhase {
-                               pvc, err := 
virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), 
pvc.Name, metav1.GetOptions{})
-                               Expect(err).ToNot(HaveOccurred())
-                               return pvc.Status.Phase
-                       }, 30*time.Second, 
1*time.Second).Should(Equal(k8sv1.ClaimBound))
-                       return
-               }
-               By("Checking the PVC is pending for WFFC storage")
-               Eventually(func() k8sv1.PersistentVolumeClaimPhase {
-                       pvc, err := 
virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), 
pvc.Name, metav1.GetOptions{})
-                       Expect(err).ToNot(HaveOccurred())
-                       return pvc.Status.Phase
-               }, 15*time.Second, 
1*time.Second).Should(Equal(k8sv1.ClaimPending))
-
-               By("Creating trigger pod to bind WFFC storage")
-               triggerPod := createTriggerPodForPvc(pvc)
-               By("Checking the PVC was bound")
-               Eventually(func() k8sv1.PersistentVolumeClaimPhase {
-                       pvc, err := 
virtClient.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), 
pvc.Name, metav1.GetOptions{})
-                       Expect(err).ToNot(HaveOccurred())
-                       return pvc.Status.Phase
-               }, 30*time.Second, 
1*time.Second).Should(Equal(k8sv1.ClaimBound))
-               By("Deleting the trigger pod")
-               immediate := int64(0)
-               
Expect(virtClient.CoreV1().Pods(triggerPod.Namespace).Delete(context.Background(),
 triggerPod.Name, metav1.DeleteOptions{
-                       GracePeriodSeconds: &immediate,
-               })).To(Succeed())
-       }
-
        createExportTokenSecret := func(name, namespace string) *k8sv1.Secret {
                var err error
                secret := &k8sv1.Secret{
@@ -352,6 +289,7 @@ var _ = SIGDescribe("Export", func() {
                dv := libdv.NewDataVolume(
                        
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
 cdiv1.RegistryPullNode),
                        libdv.WithPVC(libdv.PVCWithStorageClass(sc), 
libdv.PVCWithVolumeMode(volumeMode)),
+                       libdv.WithForceBindAnnotation(),
                )
 
                dv, err = 
virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(),
 dv, metav1.CreateOptions{})
@@ -362,7 +300,6 @@ var _ = SIGDescribe("Export", func() {
                        pvc, err = 
virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(),
 dv.Name, metav1.GetOptions{})
                        return err
                }, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent 
volume associated with DV should be created")
-               ensurePVCBound(pvc)
 
                By("Making sure the DV is successful")
                libstorage.EventuallyDV(dv, 90, HaveSucceeded())
@@ -847,6 +784,7 @@ var _ = SIGDescribe("Export", func() {
                dv := libdv.NewDataVolume(
                        
libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros),
 cdiv1.RegistryPullNode),
                        libdv.WithPVC(libdv.PVCWithStorageClass(sc)),
+                       libdv.WithForceBindAnnotation(),
                )
 
                name := dv.Name
@@ -869,12 +807,10 @@ var _ = SIGDescribe("Export", func() {
                }, 60*time.Second, 
1*time.Second).Should(ContainElement(expectedCond), "export should report 
missing pvc")
 
                dv, err = 
virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(),
 dv, metav1.CreateOptions{})
-               var pvc *k8sv1.PersistentVolumeClaim
                Eventually(func() error {
-                       pvc, err = 
virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(),
 dv.Name, metav1.GetOptions{})
+                       _, err = 
virtClient.CoreV1().PersistentVolumeClaims(testsuite.GetTestNamespace(dv)).Get(context.Background(),
 dv.Name, metav1.GetOptions{})
                        return err
                }, 60*time.Second, 1*time.Second).Should(BeNil(), "persistent 
volume associated with DV should be created")
-               ensurePVCBound(pvc)
 
                By("Making sure the DV is successful")
                libstorage.EventuallyDV(dv, 90, HaveSucceeded())
-- 
2.41.0


>From 5b44741c1ca7df3b7121dff7db6a52f6599b7144 Mon Sep 17 00:00:00 2001
From: Alex Kalenyuk <akale...@redhat.com>
Date: Wed, 12 Jul 2023 19:57:57 +0300
Subject: [PATCH 2/2] Don't check for CloneOf/CloneRequest with populator
 target PVCs

These simply don't exist (and are not needed) with populators

Signed-off-by: Alex Kalenyuk <akale...@redhat.com>
---
 tests/storage/restore.go | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/tests/storage/restore.go b/tests/storage/restore.go
index dffd0f1fe..5a09ca839 100644
--- a/tests/storage/restore.go
+++ b/tests/storage/restore.go
@@ -1776,13 +1776,18 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", 
func() {
                                        }
                                        pvc, err := 
virtClient.CoreV1().PersistentVolumeClaims(vm.Namespace).Get(context.TODO(), 
pvcName, metav1.GetOptions{})
                                        Expect(err).ToNot(HaveOccurred())
+                                       if pvc.Spec.DataSourceRef != nil {
+                                               // These annotations only exist 
pre-k8s-populators flows
+                                               return
+                                       }
                                        for _, a := range 
[]string{"k8s.io/CloneRequest", "k8s.io/CloneOf"} {
                                                _, ok := pvc.Annotations[a]
                                                
Expect(ok).Should(Equal(shouldExist))
                                        }
                                }
 
-                               createVMFromSource := func() *v1.VirtualMachine 
{
+                               createNetworkCloneVMFromSource := func() 
*v1.VirtualMachine {
+                                       // TODO: consider ensuring network 
clone gets done here using StorageProfile CloneStrategy
                                        dataVolume := libdv.NewDataVolume(
                                                
libdv.WithPVCSource(sourceDV.Namespace, sourceDV.Name),
                                                
libdv.WithPVC(libdv.PVCWithStorageClass(snapshotStorageClass), 
libdv.PVCWithVolumeSize("1Gi")),
@@ -1796,7 +1801,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() 
{
                                }
 
                                DescribeTable("should restore a vm that boots 
from a network cloned datavolumetemplate", func(restoreToNewVM, deleteSourcePVC 
bool) {
-                                       vm, vmi = 
createAndStartVM(createVMFromSource())
+                                       vm, vmi = 
createAndStartVM(createNetworkCloneVMFromSource())
 
                                        checkCloneAnnotations(vm, true)
                                        if deleteSourcePVC {
@@ -1813,7 +1818,7 @@ var _ = SIGDescribe("VirtualMachineRestore Tests", func() 
{
                                )
 
                                DescribeTable("should restore a vm that boots 
from a network cloned datavolume (not template)", func(restoreToNewVM, 
deleteSourcePVC bool) {
-                                       vm = createVMFromSource()
+                                       vm = createNetworkCloneVMFromSource()
                                        dv := orphanDataVolumeTemplate(vm, 0)
 
                                        dv, err = 
virtClient.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Create(context.Background(),
 dv, metav1.CreateOptions{})
-- 
2.41.0


++++++ 0014-Export-create-populator-compatible-datavolumes-from-.patch ++++++
>From 28f503c1417df30de3c7db8c14ced7c8985c9612 Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Thu, 13 Jul 2023 14:33:29 -0500
Subject: [PATCH] Export create populator compatible datavolumes from VM

The generated DataVolumes were not compatible with populator
populated sources. In particular the populators would have
a datasource or datasourceRef set.

This commit clears the values so that the target CDI can
properly generate PVCs from the Datavolume

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/storage/export/export/export.go      | 3 +++
 pkg/storage/export/export/export_test.go | 4 ++++
 2 files changed, 7 insertions(+)

diff --git a/pkg/storage/export/export/export.go 
b/pkg/storage/export/export/export.go
index c1ba57174..51eb69df6 100644
--- a/pkg/storage/export/export/export.go
+++ b/pkg/storage/export/export/export.go
@@ -1429,6 +1429,9 @@ func (ctrl *VMExportController) 
createExportHttpDvFromPVC(namespace, name string
        if pvc != nil {
                pvc.Spec.VolumeName = ""
                pvc.Spec.StorageClassName = nil
+               // Don't copy datasources, will be populated by CDI with the 
datavolume
+               pvc.Spec.DataSource = nil
+               pvc.Spec.DataSourceRef = nil
                return &cdiv1.DataVolume{
                        ObjectMeta: metav1.ObjectMeta{
                                Name:      name,
diff --git a/pkg/storage/export/export/export_test.go 
b/pkg/storage/export/export/export_test.go
index 15941984d..a341bdca6 100644
--- a/pkg/storage/export/export/export_test.go
+++ b/pkg/storage/export/export/export_test.go
@@ -1310,12 +1310,16 @@ var _ = Describe("Export controller", func() {
 
        It("Should generate DataVolumes from VM", func() {
                pvc := createPVC("pvc", string(cdiv1.DataVolumeKubeVirt))
+               pvc.Spec.DataSource = &k8sv1.TypedLocalObjectReference{}
+               pvc.Spec.DataSourceRef = &k8sv1.TypedObjectReference{}
                pvcInformer.GetStore().Add(pvc)
                vm := createVMWithDVTemplateAndPVC()
                dvs := controller.generateDataVolumesFromVm(vm)
                Expect(dvs).To(HaveLen(1))
                Expect(dvs[0]).ToNot(BeNil())
                Expect(dvs[0].Name).To((Equal("pvc")))
+               Expect(dvs[0].Spec.PVC.DataSource).To(BeNil())
+               Expect(dvs[0].Spec.PVC.DataSourceRef).To(BeNil())
        })
 })
 
-- 
2.41.0

Reply via email to