[Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-05 Thread Andre Przywara
Allow a guest to provide the address and size for the memory regions
it has reserved for the GICv3 pending and property tables.
We sanitise the various fields of the respective redistributor
registers and map those pages into Xen's address space to have easy
access.
This introduces a function to read and write from and to guest memory,
to be later able to access the tables located there.
This vgic_access_guest_memory() function has been written by Vijaya Kumar
as part of an earlier series.

Signed-off-by: Andre Przywara 
---
 xen/arch/arm/vgic-v3.c   | 152 ++-
 xen/arch/arm/vgic.c  |  39 +++
 xen/include/asm-arm/domain.h |   6 +-
 xen/include/asm-arm/vgic.h   |   3 +
 4 files changed, 182 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 2a14305..0623803 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -19,12 +19,14 @@
  */
 
 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -228,12 +230,21 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, 
mmio_info_t *info,
 goto read_reserved;
 
 case VREG64(GICR_PROPBASER):
-/* LPI's not implemented */
-goto read_as_zero_64;
+if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+
+spin_lock(&v->arch.vgic.lock);
+*r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
+spin_unlock(&v->arch.vgic.lock);
+return 1;
 
 case VREG64(GICR_PENDBASER):
-/* LPI's not implemented */
-goto read_as_zero_64;
+if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+
+spin_lock(&v->arch.vgic.lock);
+*r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
+*r &= ~GICR_PENDBASER_PTZ;   /* WO, reads as 0 */
+spin_unlock(&v->arch.vgic.lock);
+return 1;
 
 case 0x0080:
 goto read_reserved;
@@ -301,11 +312,6 @@ bad_width:
 domain_crash_synchronous();
 return 0;
 
-read_as_zero_64:
-if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-*r = 0;
-return 1;
-
 read_as_zero_32:
 if ( dabt.size != DABT_WORD ) goto bad_width;
 *r = 0;
@@ -330,11 +336,95 @@ read_unknown:
 return 1;
 }
 
+static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
+int field_shift,
+uint64_t (*sanitise_fn)(uint64_t))
+{
+uint64_t field = (reg & field_mask) >> field_shift;
+
+field = sanitise_fn(field) << field_shift;
+
+return (reg & ~field_mask) | field;
+}
+
+/* We want to avoid outer shareable. */
+static uint64_t vgic_sanitise_shareability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_OuterShareable:
+return GIC_BASER_InnerShareable;
+default:
+return field;
+}
+}
+
+/* Avoid any inner non-cacheable mapping. */
+static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_CACHE_nCnB:
+case GIC_BASER_CACHE_nC:
+return GIC_BASER_CACHE_RaWb;
+default:
+return field;
+}
+}
+
+/* Non-cacheable or same-as-inner are OK. */
+static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_CACHE_SameAsInner:
+case GIC_BASER_CACHE_nC:
+return field;
+default:
+return GIC_BASER_CACHE_nC;
+}
+}
+
+static uint64_t sanitize_propbaser(uint64_t reg)
+{
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
+  GICR_PROPBASER_SHAREABILITY_SHIFT,
+  vgic_sanitise_shareability);
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
+  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
+  vgic_sanitise_inner_cacheability);
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
+  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
+  vgic_sanitise_outer_cacheability);
+
+reg &= ~GICR_PROPBASER_RES0_MASK;
+
+return reg;
+}
+
+static uint64_t sanitize_pendbaser(uint64_t reg)
+{
+reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
+  GICR_PENDBASER_SHAREABILITY_SHIFT,
+  vgic_sanitise_shareability);
+reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
+  GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
+  vgic_sanitise_inner_cacheability);
+reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
+  GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
+  vgic_sanitise_outer_cacheability);
+
+reg &= ~GICR_PE

Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-05 Thread Stefano Stabellini
On Thu, 6 Apr 2017, Andre Przywara wrote:
> Allow a guest to provide the address and size for the memory regions
> it has reserved for the GICv3 pending and property tables.
> We sanitise the various fields of the respective redistributor
> registers and map those pages into Xen's address space to have easy
> access.
> This introduces a function to read and write from and to guest memory,
> to be later able to access the tables located there.
> This vgic_access_guest_memory() function has been written by Vijaya Kumar
> as part of an earlier series.
> 
> Signed-off-by: Andre Przywara 
> ---
>  xen/arch/arm/vgic-v3.c   | 152 
> ++-
>  xen/arch/arm/vgic.c  |  39 +++
>  xen/include/asm-arm/domain.h |   6 +-
>  xen/include/asm-arm/vgic.h   |   3 +
>  4 files changed, 182 insertions(+), 18 deletions(-)
> 
> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
> index 2a14305..0623803 100644
> --- a/xen/arch/arm/vgic-v3.c
> +++ b/xen/arch/arm/vgic-v3.c
> @@ -19,12 +19,14 @@
>   */
>  
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -228,12 +230,21 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu 
> *v, mmio_info_t *info,
>  goto read_reserved;
>  
>  case VREG64(GICR_PROPBASER):
> -/* LPI's not implemented */
> -goto read_as_zero_64;
> +if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +
> +spin_lock(&v->arch.vgic.lock);
> +*r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
> +spin_unlock(&v->arch.vgic.lock);
> +return 1;
>  
>  case VREG64(GICR_PENDBASER):
> -/* LPI's not implemented */
> -goto read_as_zero_64;
> +if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> +
> +spin_lock(&v->arch.vgic.lock);
> +*r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
> +*r &= ~GICR_PENDBASER_PTZ;   /* WO, reads as 0 */
> +spin_unlock(&v->arch.vgic.lock);
> +return 1;
>  
>  case 0x0080:
>  goto read_reserved;
> @@ -301,11 +312,6 @@ bad_width:
>  domain_crash_synchronous();
>  return 0;
>  
> -read_as_zero_64:
> -if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
> -*r = 0;
> -return 1;
> -
>  read_as_zero_32:
>  if ( dabt.size != DABT_WORD ) goto bad_width;
>  *r = 0;
> @@ -330,11 +336,95 @@ read_unknown:
>  return 1;
>  }
>  
> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
> +int field_shift,
> +uint64_t (*sanitise_fn)(uint64_t))
> +{
> +uint64_t field = (reg & field_mask) >> field_shift;
> +
> +field = sanitise_fn(field) << field_shift;
> +
> +return (reg & ~field_mask) | field;
> +}
> +
> +/* We want to avoid outer shareable. */
> +static uint64_t vgic_sanitise_shareability(uint64_t field)
> +{
> +switch ( field )
> +{
> +case GIC_BASER_OuterShareable:
> +return GIC_BASER_InnerShareable;
> +default:
> +return field;
> +}
> +}
> +
> +/* Avoid any inner non-cacheable mapping. */
> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
> +{
> +switch ( field )
> +{
> +case GIC_BASER_CACHE_nCnB:
> +case GIC_BASER_CACHE_nC:
> +return GIC_BASER_CACHE_RaWb;
> +default:
> +return field;
> +}
> +}
> +
> +/* Non-cacheable or same-as-inner are OK. */
> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
> +{
> +switch ( field )
> +{
> +case GIC_BASER_CACHE_SameAsInner:
> +case GIC_BASER_CACHE_nC:
> +return field;
> +default:
> +return GIC_BASER_CACHE_nC;
> +}
> +}
> +
> +static uint64_t sanitize_propbaser(uint64_t reg)
> +{
> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
> +  GICR_PROPBASER_SHAREABILITY_SHIFT,
> +  vgic_sanitise_shareability);
> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
> +  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
> +  vgic_sanitise_inner_cacheability);
> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
> +  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
> +  vgic_sanitise_outer_cacheability);
> +
> +reg &= ~GICR_PROPBASER_RES0_MASK;
> +
> +return reg;
> +}
> +
> +static uint64_t sanitize_pendbaser(uint64_t reg)
> +{
> +reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
> +  GICR_PENDBASER_SHAREABILITY_SHIFT,
> +  vgic_sanitise_shareability);
> +reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
> +  

Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-06 Thread Julien Grall

Hi Stefano,

On 06/04/17 00:55, Stefano Stabellini wrote:

On Thu, 6 Apr 2017, Andre Przywara wrote:

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index cd9a2a5..9b0dc3d 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -589,6 +590,44 @@ void vgic_free_virq(struct domain *d, unsigned int virq)
 clear_bit(virq, d->arch.vgic.allocated_irqs);
 }

+int vgic_access_guest_memory(struct domain *d, paddr_t gpa, void *addr,
+ uint32_t size, bool_t is_write)


Because there are no callers of this function, I think it breaks the
build.


That would have been true if the function was not exported ;)

Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-06 Thread Andre Przywara
Hi,

On 06/04/17 00:55, Stefano Stabellini wrote:
> On Thu, 6 Apr 2017, Andre Przywara wrote:
>> Allow a guest to provide the address and size for the memory regions
>> it has reserved for the GICv3 pending and property tables.
>> We sanitise the various fields of the respective redistributor
>> registers and map those pages into Xen's address space to have easy
>> access.
>> This introduces a function to read and write from and to guest memory,
>> to be later able to access the tables located there.
>> This vgic_access_guest_memory() function has been written by Vijaya Kumar
>> as part of an earlier series.
>>
>> Signed-off-by: Andre Przywara 
>> ---
>>  xen/arch/arm/vgic-v3.c   | 152 
>> ++-
>>  xen/arch/arm/vgic.c  |  39 +++
>>  xen/include/asm-arm/domain.h |   6 +-
>>  xen/include/asm-arm/vgic.h   |   3 +
>>  4 files changed, 182 insertions(+), 18 deletions(-)
>>
>> diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
>> index 2a14305..0623803 100644
>> --- a/xen/arch/arm/vgic-v3.c
>> +++ b/xen/arch/arm/vgic-v3.c
>> @@ -19,12 +19,14 @@
>>   */
>>  
>>  #include 
>> +#include 
>>  #include 
>>  #include 
>>  #include 
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>  #include 
>>  #include 
>>  #include 
>> @@ -228,12 +230,21 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu 
>> *v, mmio_info_t *info,
>>  goto read_reserved;
>>  
>>  case VREG64(GICR_PROPBASER):
>> -/* LPI's not implemented */
>> -goto read_as_zero_64;
>> +if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +
>> +spin_lock(&v->arch.vgic.lock);
>> +*r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);
>> +spin_unlock(&v->arch.vgic.lock);
>> +return 1;
>>  
>>  case VREG64(GICR_PENDBASER):
>> -/* LPI's not implemented */
>> -goto read_as_zero_64;
>> +if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> +
>> +spin_lock(&v->arch.vgic.lock);
>> +*r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
>> +*r &= ~GICR_PENDBASER_PTZ;   /* WO, reads as 0 */
>> +spin_unlock(&v->arch.vgic.lock);
>> +return 1;
>>  
>>  case 0x0080:
>>  goto read_reserved;
>> @@ -301,11 +312,6 @@ bad_width:
>>  domain_crash_synchronous();
>>  return 0;
>>  
>> -read_as_zero_64:
>> -if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
>> -*r = 0;
>> -return 1;
>> -
>>  read_as_zero_32:
>>  if ( dabt.size != DABT_WORD ) goto bad_width;
>>  *r = 0;
>> @@ -330,11 +336,95 @@ read_unknown:
>>  return 1;
>>  }
>>  
>> +static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
>> +int field_shift,
>> +uint64_t (*sanitise_fn)(uint64_t))
>> +{
>> +uint64_t field = (reg & field_mask) >> field_shift;
>> +
>> +field = sanitise_fn(field) << field_shift;
>> +
>> +return (reg & ~field_mask) | field;
>> +}
>> +
>> +/* We want to avoid outer shareable. */
>> +static uint64_t vgic_sanitise_shareability(uint64_t field)
>> +{
>> +switch ( field )
>> +{
>> +case GIC_BASER_OuterShareable:
>> +return GIC_BASER_InnerShareable;
>> +default:
>> +return field;
>> +}
>> +}
>> +
>> +/* Avoid any inner non-cacheable mapping. */
>> +static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
>> +{
>> +switch ( field )
>> +{
>> +case GIC_BASER_CACHE_nCnB:
>> +case GIC_BASER_CACHE_nC:
>> +return GIC_BASER_CACHE_RaWb;
>> +default:
>> +return field;
>> +}
>> +}
>> +
>> +/* Non-cacheable or same-as-inner are OK. */
>> +static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
>> +{
>> +switch ( field )
>> +{
>> +case GIC_BASER_CACHE_SameAsInner:
>> +case GIC_BASER_CACHE_nC:
>> +return field;
>> +default:
>> +return GIC_BASER_CACHE_nC;
>> +}
>> +}
>> +
>> +static uint64_t sanitize_propbaser(uint64_t reg)
>> +{
>> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
>> +  GICR_PROPBASER_SHAREABILITY_SHIFT,
>> +  vgic_sanitise_shareability);
>> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
>> +  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
>> +  vgic_sanitise_inner_cacheability);
>> +reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
>> +  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
>> +  vgic_sanitise_outer_cacheability);
>> +
>> +reg &= ~GICR_PROPBASER_RES0_MASK;
>> +
>> +return reg;
>> +}
>> +
>> +static uint64_t sanitize_pendbaser(uint64_t reg)
>> +{
>> +reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
>> + 

Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-06 Thread Julien Grall

Hi Andre,

On 06/04/17 12:25, Andre Przywara wrote:

On 06/04/17 00:55, Stefano Stabellini wrote:

On Thu, 6 Apr 2017, Andre Przywara wrote:

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index cd9a2a5..9b0dc3d 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -589,6 +590,44 @@ void vgic_free_virq(struct domain *d, unsigned int virq)
 clear_bit(virq, d->arch.vgic.allocated_irqs);
 }

+int vgic_access_guest_memory(struct domain *d, paddr_t gpa, void *addr,
+ uint32_t size, bool_t is_write)


Because there are no callers of this function, I think it breaks the
build.


How so? This is a non-static function in a .c file.
But indeed this function is prematurely introduced, we only need it two
patches later.
Fixed that.


I would actually prefer a separate patch for that and you likely need to 
add the Signed-off-by from Vijay as he was the author of the code.


Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-06 Thread Andre Przywara
Hi,

On 06/04/17 12:24, Julien Grall wrote:
> Hi Andre,
> 
> On 06/04/17 12:25, Andre Przywara wrote:
>> On 06/04/17 00:55, Stefano Stabellini wrote:
>>> On Thu, 6 Apr 2017, Andre Przywara wrote:
 diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
 index cd9a2a5..9b0dc3d 100644
 --- a/xen/arch/arm/vgic.c
 +++ b/xen/arch/arm/vgic.c
 @@ -20,6 +20,7 @@
  #include 
  #include 
  #include 
 +#include 
  #include 
  #include 
  #include 
 @@ -589,6 +590,44 @@ void vgic_free_virq(struct domain *d, unsigned
 int virq)
  clear_bit(virq, d->arch.vgic.allocated_irqs);
  }

 +int vgic_access_guest_memory(struct domain *d, paddr_t gpa, void
 *addr,
 + uint32_t size, bool_t is_write)
>>>
>>> Because there are no callers of this function, I think it breaks the
>>> build.
>>
>> How so? This is a non-static function in a .c file.
>> But indeed this function is prematurely introduced, we only need it two
>> patches later.
>> Fixed that.
> 
> I would actually prefer a separate patch for that and you likely need to
> add the Signed-off-by from Vijay as he was the author of the code.

Had just the same idea

Cheers,
Andre.

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v5 15/30] ARM: vGICv3: handle virtual LPI pending and property tables

2017-04-06 Thread Julien Grall

Hi Andre,

On 04/06/2017 12:19 AM, Andre Przywara wrote:

Allow a guest to provide the address and size for the memory regions
it has reserved for the GICv3 pending and property tables.
We sanitise the various fields of the respective redistributor
registers and map those pages into Xen's address space to have easy
access.
This introduces a function to read and write from and to guest memory,
to be later able to access the tables located there.
This vgic_access_guest_memory() function has been written by Vijaya Kumar
as part of an earlier series.

Signed-off-by: Andre Przywara 
---
 xen/arch/arm/vgic-v3.c   | 152 ++-
 xen/arch/arm/vgic.c  |  39 +++
 xen/include/asm-arm/domain.h |   6 +-
 xen/include/asm-arm/vgic.h   |   3 +
 4 files changed, 182 insertions(+), 18 deletions(-)

diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 2a14305..0623803 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -19,12 +19,14 @@
  */

 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -228,12 +230,21 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, 
mmio_info_t *info,
 goto read_reserved;

 case VREG64(GICR_PROPBASER):
-/* LPI's not implemented */
-goto read_as_zero_64;
+if ( !vgic_reg64_check_access(dabt) ) goto bad_width;


As discussed f2f, I would like to see this code gated with "if has_its" 
for now.


By that I mean:

if ( !has_its )
  goto read_as_zero_64;


+
+spin_lock(&v->arch.vgic.lock);


The locking looks wrong to me. rdist_probase is per domain but you take 
the vCPU vgic lock.


You likely want to take the domain vgic lock. e.g:

vgic_lock(v);


+*r = vgic_reg64_extract(v->domain->arch.vgic.rdist_propbase, info);


NIT: It would simplify the code if you introduce a temporary variable d 
to store v->domain.



+spin_unlock(&v->arch.vgic.lock);
+return 1;

 case VREG64(GICR_PENDBASER):
-/* LPI's not implemented */
-goto read_as_zero_64;


Same here.


+if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
+
+spin_lock(&v->arch.vgic.lock);
+*r = vgic_reg64_extract(v->arch.vgic.rdist_pendbase, info);
+*r &= ~GICR_PENDBASER_PTZ;   /* WO, reads as 0 */
+spin_unlock(&v->arch.vgic.lock);
+return 1;

 case 0x0080:
 goto read_reserved;
@@ -301,11 +312,6 @@ bad_width:
 domain_crash_synchronous();
 return 0;

-read_as_zero_64:
-if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-*r = 0;
-return 1;
-
 read_as_zero_32:
 if ( dabt.size != DABT_WORD ) goto bad_width;
 *r = 0;
@@ -330,11 +336,95 @@ read_unknown:
 return 1;
 }

+static uint64_t vgic_sanitise_field(uint64_t reg, uint64_t field_mask,
+int field_shift,
+uint64_t (*sanitise_fn)(uint64_t))
+{
+uint64_t field = (reg & field_mask) >> field_shift;
+
+field = sanitise_fn(field) << field_shift;
+
+return (reg & ~field_mask) | field;
+}
+
+/* We want to avoid outer shareable. */
+static uint64_t vgic_sanitise_shareability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_OuterShareable:
+return GIC_BASER_InnerShareable;
+default:
+return field;
+}
+}
+
+/* Avoid any inner non-cacheable mapping. */
+static uint64_t vgic_sanitise_inner_cacheability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_CACHE_nCnB:
+case GIC_BASER_CACHE_nC:
+return GIC_BASER_CACHE_RaWb;
+default:
+return field;
+}
+}
+
+/* Non-cacheable or same-as-inner are OK. */
+static uint64_t vgic_sanitise_outer_cacheability(uint64_t field)
+{
+switch ( field )
+{
+case GIC_BASER_CACHE_SameAsInner:
+case GIC_BASER_CACHE_nC:
+return field;
+default:
+return GIC_BASER_CACHE_nC;
+}
+}
+
+static uint64_t sanitize_propbaser(uint64_t reg)
+{
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
+  GICR_PROPBASER_SHAREABILITY_SHIFT,
+  vgic_sanitise_shareability);
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
+  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
+  vgic_sanitise_inner_cacheability);
+reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
+  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
+  vgic_sanitise_outer_cacheability);
+
+reg &= ~GICR_PROPBASER_RES0_MASK;
+
+return reg;
+}
+
+static uint64_t sanitize_pendbaser(uint64_t reg)
+{
+reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
+  GICR_PENDBASER_SHAREABILITY_SHIFT,
+  vgi