Drop yet another unnecessary magic page value from KVM, as there's zero
reason to use a poisoned pointer to indicate "no page".  If KVM uses a
NULL page pointer, the kernel will explode just as quickly as if KVM uses
a poisoned pointer.  Never mind the fact that such usage would be a
blatant and egregious KVM bug.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 include/linux/kvm_host.h | 4 ----
 virt/kvm/kvm_main.c      | 4 ++--
 2 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f42e030f69a4..a5dcb72bab00 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -273,16 +273,12 @@ enum {
        READING_SHADOW_PAGE_TABLES,
 };
 
-#define KVM_UNMAPPED_PAGE      ((void *) 0x500 + POISON_POINTER_DELTA)
-
 struct kvm_host_map {
        /*
         * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
         * a 'struct page' for it. When using mem= kernel parameter some memory
         * can be used as guest memory but they are not managed by host
         * kernel).
-        * If 'pfn' is not managed by the host kernel, this field is
-        * initialized to KVM_UNMAPPED_PAGE.
         */
        struct page *page;
        void *hva;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 67a50b87bb87..3d717a131906 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3088,7 +3088,7 @@ void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
 
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
-       map->page = KVM_UNMAPPED_PAGE;
+       map->page = NULL;
        map->hva = NULL;
        map->gfn = gfn;
 
@@ -3114,7 +3114,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct 
kvm_host_map *map, bool dirty)
        if (!map->hva)
                return;
 
-       if (map->page != KVM_UNMAPPED_PAGE)
+       if (map->page)
                kunmap(map->page);
 #ifdef CONFIG_HAS_IOMEM
        else
-- 
2.46.0.rc1.232.g9752f9e123-goog

Reply via email to