Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package kubevirt for openSUSE:Factory 
checked in at 2023-08-01 15:38:45
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubevirt (Old)
 and      /work/SRC/openSUSE:Factory/.kubevirt.new.32662 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubevirt"

Tue Aug  1 15:38:45 2023 rev:61 rq:1101726 version:1.0.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubevirt/kubevirt.changes        2023-07-18 
22:09:09.695336911 +0200
+++ /work/SRC/openSUSE:Factory/.kubevirt.new.32662/kubevirt.changes     
2023-08-01 15:38:52.397947905 +0200
@@ -1,0 +2,10 @@
+Fri Jul 28 08:20:41 UTC 2023 - Vasily Ulyanov <vasily.ulya...@suse.com>
+
+- Support multiple watchdogs in the domain schema
+  0005-Support-multiple-watchdogs-in-the-domain-schema.patch
+- Fix leaking file descriptor
+  0006-isolation-close-file-when-exits.patch
+- Fix volume detach on hotplug attachment pod delete
+  0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch
+
+-------------------------------------------------------------------

New:
----
  0005-Support-multiple-watchdogs-in-the-domain-schema.patch
  0006-isolation-close-file-when-exits.patch
  0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubevirt.spec ++++++
--- /var/tmp/diff_new_pack.kaA7ZR/_old  2023-08-01 15:38:53.429954295 +0200
+++ /var/tmp/diff_new_pack.kaA7ZR/_new  2023-08-01 15:38:53.433954320 +0200
@@ -32,6 +32,9 @@
 Patch2:         0002-ksm-Access-sysfs-from-the-host-filesystem.patch
 Patch3:         0003-Virtiofs-Remove-duplicated-functional-tests.patch
 Patch4:         0004-tests-leave-some-space-for-metadata-on-the-backend-P.patch
+Patch5:         0005-Support-multiple-watchdogs-in-the-domain-schema.patch
+Patch6:         0006-isolation-close-file-when-exits.patch
+Patch7:         0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch
 BuildRequires:  glibc-devel-static
 BuildRequires:  golang-packaging
 BuildRequires:  pkgconfig

++++++ 0005-Support-multiple-watchdogs-in-the-domain-schema.patch ++++++
>From 12cb69406a3a33a3b38c97e35014fa905858fe72 Mon Sep 17 00:00:00 2001
From: Vasiliy Ulyanov <vulya...@suse.de>
Date: Wed, 19 Jul 2023 10:36:21 +0200
Subject: [PATCH] Support multiple watchdogs in the domain schema

Libvirt allows several watchdog devices since 9.1.0. The documentation
now states:

Having multiple watchdogs is usually not something very common, but be
aware that this might happen, for example, when an implicit watchdog
device is added as part of another device. For example the iTCO watchdog
being part of the ich9 southbridge, which is used with the q35 machine
type.

Signed-off-by: Vasiliy Ulyanov <vulya...@suse.de>
---
 pkg/virt-launcher/virtwrap/api/deepcopy_generated.go  | 10 ++++++----
 pkg/virt-launcher/virtwrap/api/schema.go              |  2 +-
 pkg/virt-launcher/virtwrap/api/schema_test.go         | 10 ++++++----
 pkg/virt-launcher/virtwrap/converter/converter.go     |  2 +-
 pkg/virt-launcher/virtwrap/converter/pci-placement.go |  4 ++--
 5 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go 
b/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
index b5cb529e2..c1d3a781a 100644
--- a/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
+++ b/pkg/virt-launcher/virtwrap/api/deepcopy_generated.go
@@ -736,10 +736,12 @@ func (in *Devices) DeepCopyInto(out *Devices) {
                        (*in)[i].DeepCopyInto(&(*out)[i])
                }
        }
-       if in.Watchdog != nil {
-               in, out := &in.Watchdog, &out.Watchdog
-               *out = new(Watchdog)
-               (*in).DeepCopyInto(*out)
+       if in.Watchdogs != nil {
+               in, out := &in.Watchdogs, &out.Watchdogs
+               *out = make([]Watchdog, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
        }
        if in.Rng != nil {
                in, out := &in.Rng, &out.Rng
diff --git a/pkg/virt-launcher/virtwrap/api/schema.go 
b/pkg/virt-launcher/virtwrap/api/schema.go
index 465c6c6c1..ff4e6e959 100644
--- a/pkg/virt-launcher/virtwrap/api/schema.go
+++ b/pkg/virt-launcher/virtwrap/api/schema.go
@@ -473,7 +473,7 @@ type Devices struct {
        Inputs      []Input            `xml:"input"`
        Serials     []Serial           `xml:"serial"`
        Consoles    []Console          `xml:"console"`
-       Watchdog    *Watchdog          `xml:"watchdog,omitempty"`
+       Watchdogs   []Watchdog         `xml:"watchdog,omitempty"`
        Rng         *Rng               `xml:"rng,omitempty"`
        Filesystems []FilesystemDevice `xml:"filesystem,omitempty"`
        Redirs      []RedirectedDevice `xml:"redirdev,omitempty"`
diff --git a/pkg/virt-launcher/virtwrap/api/schema_test.go 
b/pkg/virt-launcher/virtwrap/api/schema_test.go
index 8150ea8fd..c315cf13f 100644
--- a/pkg/virt-launcher/virtwrap/api/schema_test.go
+++ b/pkg/virt-launcher/virtwrap/api/schema_test.go
@@ -348,10 +348,12 @@ var _ = ginkgo.Describe("Schema", func() {
                exampleDomain.Spec.Devices.Consoles = []Console{
                        {Type: "pty"},
                }
-               exampleDomain.Spec.Devices.Watchdog = &Watchdog{
-                       Model:  "i6300esb",
-                       Action: "poweroff",
-                       Alias:  NewUserDefinedAlias("mywatchdog"),
+               exampleDomain.Spec.Devices.Watchdogs = []Watchdog{
+                       {
+                               Model:  "i6300esb",
+                               Action: "poweroff",
+                               Alias:  NewUserDefinedAlias("mywatchdog"),
+                       },
                }
                exampleDomain.Spec.Devices.Rng = &Rng{
                        Model:   v1.VirtIO,
diff --git a/pkg/virt-launcher/virtwrap/converter/converter.go 
b/pkg/virt-launcher/virtwrap/converter/converter.go
index db3c0a903..531a5ea71 100644
--- a/pkg/virt-launcher/virtwrap/converter/converter.go
+++ b/pkg/virt-launcher/virtwrap/converter/converter.go
@@ -1582,7 +1582,7 @@ func Convert_v1_VirtualMachineInstance_To_api_Domain(vmi 
*v1.VirtualMachineInsta
                if err != nil {
                        return err
                }
-               domain.Spec.Devices.Watchdog = newWatchdog
+               domain.Spec.Devices.Watchdogs = 
append(domain.Spec.Devices.Watchdogs, *newWatchdog)
        }
 
        if vmi.Spec.Domain.Devices.Rng != nil {
diff --git a/pkg/virt-launcher/virtwrap/converter/pci-placement.go 
b/pkg/virt-launcher/virtwrap/converter/pci-placement.go
index 38ca8354e..fbe17ba6e 100644
--- a/pkg/virt-launcher/virtwrap/converter/pci-placement.go
+++ b/pkg/virt-launcher/virtwrap/converter/pci-placement.go
@@ -53,8 +53,8 @@ func PlacePCIDevicesOnRootComplex(spec *api.DomainSpec) (err 
error) {
                        return err
                }
        }
-       if spec.Devices.Watchdog != nil {
-               spec.Devices.Watchdog.Address, err = 
assigner.PlacePCIDeviceAtNextSlot(spec.Devices.Watchdog.Address)
+       for i, watchdog := range spec.Devices.Watchdogs {
+               spec.Devices.Watchdogs[i].Address, err = 
assigner.PlacePCIDeviceAtNextSlot(watchdog.Address)
                if err != nil {
                        return err
                }
-- 
2.41.0


++++++ 0006-isolation-close-file-when-exits.patch ++++++
>From 8ae5fc3c4506c53adc5aae4cd20ad2d9ac4c035a Mon Sep 17 00:00:00 2001
From: grass-lu <284555...@qq.com>
Date: Mon, 24 Jul 2023 15:22:17 +0800
Subject: [PATCH 1/3] isolation:  close file when exits
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The file is not closed,and the sock connection has not been truly closed, and 
it occupy connection for libvirtd

Signed-off-by: grass-lu <284555...@qq.com>
---
 pkg/virt-handler/isolation/detector.go | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/pkg/virt-handler/isolation/detector.go 
b/pkg/virt-handler/isolation/detector.go
index 62f920025..9c282e231 100644
--- a/pkg/virt-handler/isolation/detector.go
+++ b/pkg/virt-handler/isolation/detector.go
@@ -231,6 +231,8 @@ func (s *socketBasedIsolationDetector) getPid(socket 
string) (int, error) {
        if err != nil {
                return -1, err
        }
+       defer ufile.Close()
+
        // This is the tricky part, which will give us the PID of the owning 
socket
        ucreds, err := syscall.GetsockoptUcred(int(ufile.Fd()), 
syscall.SOL_SOCKET, syscall.SO_PEERCRED)
        if err != nil {
-- 
2.41.0


++++++ 0007-Fix-volume-detach-on-hotplug-attachment-pod-delete.patch ++++++
>From 7a2b9109d82cced1603dfbd35ec7c1afbf3473bb Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Mon, 24 Jul 2023 08:26:04 -0500
Subject: [PATCH 1/2] Fix volume detach on hotplug attachment pod delete

When the hotplug attachment pod is deleted, the VMI
volumestatus goes back to bound, which triggers the
manager to detach the volume from the running VM
interrupting any IO on that volume. The pod is then
re-created and the volume gets re-attached and operation
can continue, but if the volume is mounted, it needs
to be re-mounted in the VM.

This commit modifies the logic so that if the volume is
ready, it cannot go back to bound if the attachment pod
disappears. This prevents the detachments and issues with
IO on the running VM.

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-controller/watch/vmi.go              |  19 +--
 .../virtwrap/converter/converter.go           |   2 +-
 tests/storage/hotplug.go                      | 122 +++++++++++++++++-
 3 files changed, 127 insertions(+), 16 deletions(-)

diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 03725ad46..801ffd141 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -2144,13 +2144,14 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
                        }
                        attachmentPod := 
c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
                        if attachmentPod == nil {
-                               status.HotplugVolume.AttachPodName = ""
-                               status.HotplugVolume.AttachPodUID = ""
-                               // Pod is gone, or hasn't been created yet, 
check for the PVC associated with the volume to set phase and message
-                               phase, reason, message := 
c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
-                               status.Phase = phase
-                               status.Message = message
-                               status.Reason = reason
+                               if status.Phase != virtv1.VolumeReady {
+                                       status.HotplugVolume.AttachPodUID = ""
+                                       // Pod is gone, or hasn't been created 
yet, check for the PVC associated with the volume to set phase and message
+                                       phase, reason, message := 
c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
+                                       status.Phase = phase
+                                       status.Message = message
+                                       status.Reason = reason
+                               }
                        } else {
                                status.HotplugVolume.AttachPodName = 
attachmentPod.Name
                                if len(attachmentPod.Status.ContainerStatuses) 
== 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
@@ -2239,8 +2240,8 @@ func (c *VMIController) getFilesystemOverhead(pvc 
*k8sv1.PersistentVolumeClaim)
 }
 
 func (c *VMIController) canMoveToAttachedPhase(currentPhase 
virtv1.VolumePhase) bool {
-       return currentPhase == "" || currentPhase == virtv1.VolumeBound || 
currentPhase == virtv1.VolumePending ||
-               currentPhase == virtv1.HotplugVolumeAttachedToNode
+       return (currentPhase == "" || currentPhase == virtv1.VolumeBound || 
currentPhase == virtv1.VolumePending ||
+               currentPhase == virtv1.HotplugVolumeAttachedToNode) && 
currentPhase != virtv1.VolumeReady
 }
 
 func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, 
attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
diff --git a/pkg/virt-launcher/virtwrap/converter/converter.go 
b/pkg/virt-launcher/virtwrap/converter/converter.go
index db3c0a903..5c43acd74 100644
--- a/pkg/virt-launcher/virtwrap/converter/converter.go
+++ b/pkg/virt-launcher/virtwrap/converter/converter.go
@@ -1526,7 +1526,7 @@ func Convert_v1_VirtualMachineInstance_To_api_Domain(vmi 
*v1.VirtualMachineInsta
                }
                volume := volumes[disk.Name]
                if volume == nil {
-                       return fmt.Errorf("No matching volume with name %s 
found", disk.Name)
+                       return fmt.Errorf("no matching volume with name %s 
found", disk.Name)
                }
 
                if _, ok := c.HotplugVolumes[disk.Name]; !ok {
diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go
index 45284ed49..a85976484 100644
--- a/tests/storage/hotplug.go
+++ b/tests/storage/hotplug.go
@@ -57,7 +57,6 @@ import (
        "kubevirt.io/kubevirt/tests/flags"
        "kubevirt.io/kubevirt/tests/framework/checks"
        "kubevirt.io/kubevirt/tests/framework/matcher"
-       . "kubevirt.io/kubevirt/tests/framework/matcher"
        "kubevirt.io/kubevirt/tests/libdv"
        "kubevirt.io/kubevirt/tests/libnode"
        "kubevirt.io/kubevirt/tests/libstorage"
@@ -503,7 +502,7 @@ var _ = SIGDescribe("Hotplug", func() {
 
                dvBlock, err = 
virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(dvBlock)).Create(context.Background(),
 dvBlock, metav1.CreateOptions{})
                Expect(err).ToNot(HaveOccurred())
-               libstorage.EventuallyDV(dvBlock, 240, HaveSucceeded())
+               libstorage.EventuallyDV(dvBlock, 240, matcher.HaveSucceeded())
                return dvBlock
        }
 
@@ -1120,7 +1119,7 @@ var _ = SIGDescribe("Hotplug", func() {
                                var err error
                                url := 
cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskCirros)
 
-                               storageClass, foundSC := 
libstorage.GetRWXFileSystemStorageClass()
+                               storageClass, foundSC := 
libstorage.GetRWOFileSystemStorageClass()
                                if !foundSC {
                                        Skip("Skip test when Filesystem storage 
is not present")
                                }
@@ -1131,7 +1130,6 @@ var _ = SIGDescribe("Hotplug", func() {
                                        libdv.WithPVC(
                                                
libdv.PVCWithStorageClass(storageClass),
                                                
libdv.PVCWithVolumeSize("256Mi"),
-                                               
libdv.PVCWithReadWriteManyAccessMode(),
                                        ),
                                        libdv.WithForceBindAnnotation(),
                                )
@@ -1140,7 +1138,7 @@ var _ = SIGDescribe("Hotplug", func() {
                                Expect(err).ToNot(HaveOccurred())
 
                                By("waiting for the dv import to pvc to finish")
-                               libstorage.EventuallyDV(dv, 180, 
HaveSucceeded())
+                               libstorage.EventuallyDV(dv, 180, 
matcher.HaveSucceeded())
 
                                By("rename disk image on PVC")
                                pvc, err := 
virtClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.Background(),
 dv.Name, metav1.GetOptions{})
@@ -1171,6 +1169,118 @@ var _ = SIGDescribe("Hotplug", func() {
                })
        })
 
+       Context("delete attachment pod several times", func() {
+               var (
+                       vm       *v1.VirtualMachine
+                       hpvolume *cdiv1.DataVolume
+               )
+
+               BeforeEach(func() {
+                       if !libstorage.HasCDI() {
+                               Skip("Skip tests when CDI is not present")
+                       }
+                       _, foundSC := libstorage.GetRWXBlockStorageClass()
+                       if !foundSC {
+                               Skip("Skip test when block RWX storage is not 
present")
+                       }
+               })
+
+               AfterEach(func() {
+                       if vm != nil {
+                               err := 
virtClient.VirtualMachine(vm.Namespace).Delete(context.Background(), vm.Name, 
&metav1.DeleteOptions{})
+                               Expect(err).ToNot(HaveOccurred())
+                               vm = nil
+                       }
+               })
+
+               deleteAttachmentPod := func(vmi *v1.VirtualMachineInstance) {
+                       podName := ""
+                       for _, volume := range vmi.Status.VolumeStatus {
+                               if volume.HotplugVolume != nil {
+                                       podName = 
volume.HotplugVolume.AttachPodName
+                                       break
+                               }
+                       }
+                       Expect(podName).ToNot(BeEmpty())
+                       foreGround := metav1.DeletePropagationForeground
+                       err := 
virtClient.CoreV1().Pods(vmi.Namespace).Delete(context.Background(), podName, 
metav1.DeleteOptions{
+                               GracePeriodSeconds: pointer.Int64(0),
+                               PropagationPolicy:  &foreGround,
+                       })
+                       Expect(err).ToNot(HaveOccurred())
+                       Eventually(func() bool {
+                               _, err := 
virtClient.CoreV1().Pods(vmi.Namespace).Get(context.Background(), podName, 
metav1.GetOptions{})
+                               return errors.IsNotFound(err)
+                       }, 300*time.Second, 1*time.Second).Should(BeTrue())
+               }
+
+               It("should remain active", func() {
+                       checkVolumeName := "checkvolume"
+                       volumeMode := corev1.PersistentVolumeBlock
+                       addVolumeFunc := addDVVolumeVMI
+                       var err error
+                       storageClass, _ := libstorage.GetRWXBlockStorageClass()
+
+                       blankDv := func() *cdiv1.DataVolume {
+                               return libdv.NewDataVolume(
+                                       
libdv.WithNamespace(testsuite.GetTestNamespace(nil)),
+                                       libdv.WithBlankImageSource(),
+                                       libdv.WithPVC(
+                                               
libdv.PVCWithStorageClass(storageClass),
+                                               
libdv.PVCWithVolumeSize(cd.BlankVolumeSize),
+                                               
libdv.PVCWithReadWriteManyAccessMode(),
+                                               
libdv.PVCWithVolumeMode(volumeMode),
+                                       ),
+                                       libdv.WithForceBindAnnotation(),
+                               )
+                       }
+                       vmi := libvmi.NewCirros()
+                       vm := tests.NewRandomVirtualMachine(vmi, true)
+                       vm, err = 
virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(),
 vm)
+                       Expect(err).ToNot(HaveOccurred())
+
+                       Eventually(func() bool {
+                               vm, err := 
virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Get(context.Background(),
 vm.Name, &metav1.GetOptions{})
+                               Expect(err).ToNot(HaveOccurred())
+                               return vm.Status.Ready
+                       }, 300*time.Second, 1*time.Second).Should(BeTrue())
+                       By("creating blank hotplug volumes")
+                       hpvolume = blankDv()
+                       dv, err := 
virtClient.CdiClient().CdiV1beta1().DataVolumes(hpvolume.Namespace).Create(context.Background(),
 hpvolume, metav1.CreateOptions{})
+                       Expect(err).ToNot(HaveOccurred())
+                       By("waiting for the dv import to pvc to finish")
+                       libstorage.EventuallyDV(dv, 180, 
matcher.HaveSucceeded())
+                       vmi, err = 
virtClient.VirtualMachineInstance(vm.Namespace).Get(context.Background(), 
vmi.Name, &metav1.GetOptions{})
+                       Expect(err).ToNot(HaveOccurred())
+
+                       By("hotplugging the volume check volume")
+                       addVolumeFunc(vmi.Name, vmi.Namespace, checkVolumeName, 
hpvolume.Name, v1.DiskBusSCSI, false, "")
+                       vmi, err = 
virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), 
vmi.Name, &metav1.GetOptions{})
+                       Expect(err).ToNot(HaveOccurred())
+                       verifyVolumeAndDiskVMIAdded(virtClient, vmi, 
checkVolumeName)
+                       verifyVolumeStatus(vmi, v1.VolumeReady, "", 
checkVolumeName)
+                       getVmiConsoleAndLogin(vmi)
+
+                       By("verifying the volume is useable and creating some 
data on it")
+                       verifyHotplugAttachedAndUseable(vmi, 
[]string{checkVolumeName})
+                       targets := getTargetsFromVolumeStatus(vmi, 
checkVolumeName)
+                       Expect(targets).ToNot(BeEmpty())
+                       verifyWriteReadData(vmi, targets[0])
+                       vmi, err = 
virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), 
vmi.Name, &metav1.GetOptions{})
+                       Expect(err).ToNot(HaveOccurred())
+                       By("deleting the attachment pod a few times, try to 
make the currently attach volume break")
+                       for i := 0; i < 10; i++ {
+                               deleteAttachmentPod(vmi)
+                               vmi, err = 
virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), 
vmi.Name, &metav1.GetOptions{})
+                               Expect(err).ToNot(HaveOccurred())
+                       }
+                       By("verifying the volume has not been disturbed in the 
VM")
+                       targets = getTargetsFromVolumeStatus(vmi, 
checkVolumeName)
+                       Expect(targets).ToNot(BeEmpty())
+                       verifyWriteReadData(vmi, targets[0])
+               })
+       })
+
        Context("with limit range in namespace", func() {
                var (
                        sc                         string
@@ -1195,7 +1305,7 @@ var _ = SIGDescribe("Hotplug", func() {
                        vm.Spec.Template.Spec.Domain.Resources.Limits = 
corev1.ResourceList{}
                        
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceMemory] = 
*memLimitQuantity
                        
vm.Spec.Template.Spec.Domain.Resources.Limits[corev1.ResourceCPU] = 
*cpuLimitQuantity
-                       vm.Spec.Running = pointer.BoolPtr(true)
+                       vm.Spec.Running = pointer.Bool(true)
                        vm, err := 
virtClient.VirtualMachine(testsuite.GetTestNamespace(vm)).Create(context.Background(),
 vm)
                        Expect(err).ToNot(HaveOccurred())
                        Eventually(func() bool {
-- 
2.41.0


>From 14854d800acaf6c17a487b60d28d4eb32bb8d9d2 Mon Sep 17 00:00:00 2001
From: Alexander Wels <aw...@redhat.com>
Date: Tue, 25 Jul 2023 07:20:13 -0500
Subject: [PATCH 2/2] Address code review comments

Remove unneeded phase check, and move other check into
its own function in case we need more elaborate checks

Signed-off-by: Alexander Wels <aw...@redhat.com>
---
 pkg/virt-controller/watch/vmi.go | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go
index 801ffd141..9afaee4f0 100644
--- a/pkg/virt-controller/watch/vmi.go
+++ b/pkg/virt-controller/watch/vmi.go
@@ -2144,9 +2144,9 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
                        }
                        attachmentPod := 
c.findAttachmentPodByVolumeName(volume.Name, attachmentPods)
                        if attachmentPod == nil {
-                               if status.Phase != virtv1.VolumeReady {
+                               if !c.volumeReady(status.Phase) {
                                        status.HotplugVolume.AttachPodUID = ""
-                                       // Pod is gone, or hasn't been created 
yet, check for the PVC associated with the volume to set phase and message
+                                       // Volume is not hotplugged in VM and 
Pod is gone, or hasn't been created yet, check for the PVC associated with the 
volume to set phase and message
                                        phase, reason, message := 
c.getVolumePhaseMessageReason(&vmi.Spec.Volumes[i], vmi.Namespace)
                                        status.Phase = phase
                                        status.Message = message
@@ -2216,6 +2216,10 @@ func (c *VMIController) updateVolumeStatus(vmi 
*virtv1.VirtualMachineInstance, v
        return nil
 }
 
+func (c *VMIController) volumeReady(phase virtv1.VolumePhase) bool {
+       return phase == virtv1.VolumeReady
+}
+
 func (c *VMIController) getFilesystemOverhead(pvc 
*k8sv1.PersistentVolumeClaim) (cdiv1.Percent, error) {
        // To avoid conflicts, we only allow having one CDI instance
        if cdiInstances := len(c.cdiInformer.GetStore().List()); cdiInstances 
!= 1 {
@@ -2241,7 +2245,7 @@ func (c *VMIController) getFilesystemOverhead(pvc 
*k8sv1.PersistentVolumeClaim)
 
 func (c *VMIController) canMoveToAttachedPhase(currentPhase 
virtv1.VolumePhase) bool {
        return (currentPhase == "" || currentPhase == virtv1.VolumeBound || 
currentPhase == virtv1.VolumePending ||
-               currentPhase == virtv1.HotplugVolumeAttachedToNode) && 
currentPhase != virtv1.VolumeReady
+               currentPhase == virtv1.HotplugVolumeAttachedToNode)
 }
 
 func (c *VMIController) findAttachmentPodByVolumeName(volumeName string, 
attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
-- 
2.41.0

Reply via email to