On 2/21/19 4:50 PM, Roger Pau Monne wrote:
> This is in preparation for also changing p2m_entry_modify to return an
> error code.
> 
> No functional change intended.
> 
> Signed-off-by: Roger Pau Monné <roger....@citrix.com>
> ---
[snip]
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index 04e9d81cf6..254c5dfd19 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -184,6 +184,8 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>      l1_pgentry_t *p2m_entry, new_entry;
>      void *next;
>      unsigned int flags;
> +    int rc;
> +    mfn_t mfn = INVALID_MFN;
>  
>      if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
>                                        shift, max)) )
> @@ -194,7 +196,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>      /* PoD/paging: Not present doesn't imply empty. */
>      if ( !flags )
>      {
> -        mfn_t mfn = p2m_alloc_ptp(p2m, level);
> +        mfn = p2m_alloc_ptp(p2m, level);
>  
>          if ( mfn_eq(mfn, INVALID_MFN) )
>              return -ENOMEM;
> @@ -202,13 +204,14 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>  
>          p2m_add_iommu_flags(&new_entry, level, 
> IOMMUF_readable|IOMMUF_writable);
> -        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> +        rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> +        if ( rc )
> +            ASSERT_UNREACHABLE();
>      }
>      else if ( flags & _PAGE_PSE )
>      {
>          /* Split superpages pages into smaller ones. */
>          unsigned long pfn = l1e_get_pfn(*p2m_entry);
> -        mfn_t mfn;
>          l1_pgentry_t *l1_entry;
>          unsigned int i;
>  
> @@ -250,18 +253,37 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          {
>              new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * 
> PAGETABLE_ORDER)),
>                                       flags);
> -            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
> +            rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 
> level);
> +            if ( rc )
> +            {
> +                ASSERT_UNREACHABLE();
> +                break;
> +            }
>          }
>  
>          unmap_domain_page(l1_entry);
>  
> -        new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> -        p2m_add_iommu_flags(&new_entry, level, 
> IOMMUF_readable|IOMMUF_writable);
> -        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
> +        if ( !rc )
> +        {
> +            new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> +            p2m_add_iommu_flags(&new_entry, level,
> +                                IOMMUF_readable|IOMMUF_writable);
> +            rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
> +                                      level + 1);
> +            if ( rc )
> +                ASSERT_UNREACHABLE();
> +        }
>      }
>      else
>          ASSERT(flags & _PAGE_PRESENT);
>  
> +    if ( rc )
> +    {
> +        ASSERT(mfn_valid(mfn));
> +        p2m_free_ptp(p2m, mfn_to_page(mfn));
> +        return rc;
> +    }
> +

I think the idiomatic way to deal with this would be to have a label at
the end that everything jumps to something like the attached?  That way
you don't have to spend mental effort making sure that nothing happens
between the error and the clean-up call.

Everything else looks good to me; thanks for doing this.

 -George
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 254c5dfd19..1eb22bd06e 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -206,7 +206,10 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
         if ( rc )
+        {
             ASSERT_UNREACHABLE();
+            goto free_ptp;
+        }
     }
     else if ( flags & _PAGE_PSE )
     {
@@ -257,39 +260,37 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
             if ( rc )
             {
                 ASSERT_UNREACHABLE();
-                break;
+                unmap_domain_page(l1_entry);
+                goto free_ptp;
             }
         }
 
         unmap_domain_page(l1_entry);
 
-        if ( !rc )
-        {
-            new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
-            p2m_add_iommu_flags(&new_entry, level,
-                                IOMMUF_readable|IOMMUF_writable);
-            rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
-                                      level + 1);
-            if ( rc )
-                ASSERT_UNREACHABLE();
+        new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
+        p2m_add_iommu_flags(&new_entry, level,
+                            IOMMUF_readable|IOMMUF_writable);
+        rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
+                                  level + 1);
+        if ( rc ) {
+            ASSERT_UNREACHABLE();
+            goto free_ptp;
         }
     }
     else
         ASSERT(flags & _PAGE_PRESENT);
 
-    if ( rc )
-    {
-        ASSERT(mfn_valid(mfn));
-        p2m_free_ptp(p2m, mfn_to_page(mfn));
-        return rc;
-    }
-
     next = map_domain_page(l1e_get_mfn(*p2m_entry));
     if ( unmap )
         unmap_domain_page(*table);
     *table = next;
 
     return 0;
+
+ free_ptp:
+    ASSERT(mfn_valid(mfn));
+    p2m_free_ptp(p2m, mfn_to_page(mfn));
+    return rc;
 }
 
 /*
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to