Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-08-06 Thread Christoffer Dall
On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> This patch adds the following registers for access via the
> KVM_{GET,SET}_ONE_REG interface:
> 
>  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
>  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
>  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> 
> In order to adapt gracefully to future architectural extensions,
> the registers are divided up into slices as noted above:  the i
> parameter denotes the slice index.
> 
> For simplicity, bits or slices that exceed the maximum vector
> length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> read as zero for KVM_GET_ONE_REG.
> 
> For the current architecture, only slice i = 0 is significant.  The
> interface design allows i to increase to up to 31 in the future if
> required by future architectural amendments.
> 
> The registers are only visible for vcpus that have SVE enabled.
> They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> have SVE.  In all cases, surplus slices are not enumerated by
> KVM_GET_REG_LIST.
> 
> Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> redirected to access the underlying vcpu SVE register storage as
> appropriate.  In order to make this more straightforward, register
> accesses that straddle register boundaries are no longer guaranteed
> to succeed.  (Support for such use was never deliberate, and
> userspace does not currently seem to be relying on it.)

Could you add documentation to Documentation/virtual/kvm/api.txt for
this as well under the KVM_SET_ONE_REG definitions explaining the use
for arm64 ?

Thanks,
-Christoffer

> 
> Signed-off-by: Dave Martin 
> ---
>  arch/arm64/include/uapi/asm/kvm.h |  10 ++
>  arch/arm64/kvm/guest.c| 219 
> +++---
>  2 files changed, 216 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/include/uapi/asm/kvm.h 
> b/arch/arm64/include/uapi/asm/kvm.h
> index 4e76630..f54a9b0 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -213,6 +213,16 @@ struct kvm_arch_memory_slot {
>KVM_REG_ARM_FW | ((r) & 0x))
>  #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
>  
> +/* SVE registers */
> +#define KVM_REG_ARM64_SVE(0x15 << KVM_REG_ARM_COPROC_SHIFT)
> +#define KVM_REG_ARM64_SVE_ZREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> +  KVM_REG_SIZE_U2048 |   \
> +  ((n) << 5) | (i))
> +#define KVM_REG_ARM64_SVE_PREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> +  KVM_REG_SIZE_U256 |\
> +  ((n) << 5) | (i) | 0x400)
> +#define KVM_REG_ARM64_SVE_FFR(i) KVM_REG_ARM64_SVE_PREG(16, i)
> +
>  /* Device Control API: ARM VGIC */
>  #define KVM_DEV_ARM_VGIC_GRP_ADDR0
>  #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS   1
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index 4a9d77c..005394b 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -23,14 +23,19 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> +#include 
> +#include 
>  
>  #include "trace.h"
>  
> @@ -57,6 +62,106 @@ static u64 core_reg_offset_from_id(u64 id)
>   return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
>  }
>  
> +static bool is_zreg(const struct kvm_one_reg *reg)
> +{
> + return  reg->id >= KVM_REG_ARM64_SVE_ZREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS, 0x1f);
> +}
> +
> +static bool is_preg(const struct kvm_one_reg *reg)
> +{
> + return  reg->id >= KVM_REG_ARM64_SVE_PREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_FFR(0x1f);
> +}
> +
> +static unsigned int sve_reg_num(const struct kvm_one_reg *reg)
> +{
> + return (reg->id >> 5) & 0x1f;
> +}
> +
> +static unsigned int sve_reg_index(const struct kvm_one_reg *reg)
> +{
> + return reg->id & 0x1f;
> +}
> +
> +struct reg_bounds_struct {
> + char *kptr;
> + size_t start_offset;
> + size_t copy_count;
> + size_t flush_count;
> +};
> +
> +static int copy_bounded_reg_to_user(void __user *uptr,
> + const struct reg_bounds_struct *b)
> +{
> + if (copy_to_user(uptr, b->kptr, b->copy_count) ||
> + clear_user((char __user *)uptr + b->copy_count, b->flush_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int copy_bounded_reg_from_user(const struct reg_bounds_struct *b,
> +   const void __user *uptr)
> +{
> + if (copy_from_user(b->kptr, uptr, b->copy_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int fpsimd_vreg_bounds(struct 

Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-08-03 Thread Dave Martin
On Fri, Aug 03, 2018 at 05:11:09PM +0200, Andrew Jones wrote:
> On Fri, Aug 03, 2018 at 03:57:59PM +0100, Dave Martin wrote:
> > On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> > > On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > > > This patch adds the following registers for access via the
> > > > KVM_{GET,SET}_ONE_REG interface:
> > > > 
> > > >  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
> > > >  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
> > > >  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> > > > 
> > > > In order to adapt gracefully to future architectural extensions,
> > > > the registers are divided up into slices as noted above:  the i
> > > > parameter denotes the slice index.
> > > > 
> > > > For simplicity, bits or slices that exceed the maximum vector
> > > > length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> > > > read as zero for KVM_GET_ONE_REG.
> > > > 
> > > > For the current architecture, only slice i = 0 is significant.  The
> > > > interface design allows i to increase to up to 31 in the future if
> > > > required by future architectural amendments.
> > > > 
> > > > The registers are only visible for vcpus that have SVE enabled.
> > > > They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> > > > have SVE.  In all cases, surplus slices are not enumerated by
> > > > KVM_GET_REG_LIST.
> > > > 
> > > > Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> > > > redirected to access the underlying vcpu SVE register storage as
> > > > appropriate.  In order to make this more straightforward, register
> > > > accesses that straddle register boundaries are no longer guaranteed
> > > > to succeed.  (Support for such use was never deliberate, and
> > > > userspace does not currently seem to be relying on it.)
> > > > 
> > > > Signed-off-by: Dave Martin 
> > 
> > [...]
> > 
> > > > diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> > 
> > [...]
> > 
> > > > +static int sve_reg_bounds(struct reg_bounds_struct *b,
> > > > + const struct kvm_vcpu *vcpu,
> > > > + const struct kvm_one_reg *reg)
> > > > +{
> > 
> > [...]
> > 
> > > > +   b->kptr += start;
> > > > +
> > > > +   if (copy_limit < start)
> > > > +   copy_limit = start;
> > > > +   else if (copy_limit > limit)
> > > > +   copy_limit = limit;
> > > 
> > >  copy_limit = clamp(copy_limit, start, limit)
> > 
> > Hmmm, having looked in detail in the definition of clamp(), I'm not sure
> > I like it that much -- it can introduce type issues that are not readily
> > apparent to the reader.
> > 
> > gcc can warn about signed/unsigned comparisons, which is the only issue
> > where clamp() genuinely helps AFAICT, but this requires -Wsign-compare
> > (which is not enabled by default, nor with -Wall).  Great.
> > 
> > I can use clamp() if you feel strongly about it, but otherwise I tend
> > prefer my subtleties to be in plain sight rather than buried inside a
> > macro, unless there is a serious verbosity impact from not using the
> > macro (here, I would say there isn't, since it's just a single
> > instance).
> >
> 
> Would clamp_t, with an appropriate type, satisfy your concerns?

clamp_t() seems worse actually, since it replaces the typechecking
that is the main benefit of clamp() with explicit, unsafe typecasts.


To save just a few lines of code, I wasn't sure it was really worth
opening this can of worms...

Cheers
---Dave
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-08-03 Thread Andrew Jones
On Fri, Aug 03, 2018 at 03:57:59PM +0100, Dave Martin wrote:
> On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> > On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > > This patch adds the following registers for access via the
> > > KVM_{GET,SET}_ONE_REG interface:
> > > 
> > >  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
> > >  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
> > >  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> > > 
> > > In order to adapt gracefully to future architectural extensions,
> > > the registers are divided up into slices as noted above:  the i
> > > parameter denotes the slice index.
> > > 
> > > For simplicity, bits or slices that exceed the maximum vector
> > > length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> > > read as zero for KVM_GET_ONE_REG.
> > > 
> > > For the current architecture, only slice i = 0 is significant.  The
> > > interface design allows i to increase to up to 31 in the future if
> > > required by future architectural amendments.
> > > 
> > > The registers are only visible for vcpus that have SVE enabled.
> > > They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> > > have SVE.  In all cases, surplus slices are not enumerated by
> > > KVM_GET_REG_LIST.
> > > 
> > > Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> > > redirected to access the underlying vcpu SVE register storage as
> > > appropriate.  In order to make this more straightforward, register
> > > accesses that straddle register boundaries are no longer guaranteed
> > > to succeed.  (Support for such use was never deliberate, and
> > > userspace does not currently seem to be relying on it.)
> > > 
> > > Signed-off-by: Dave Martin 
> 
> [...]
> 
> > > diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> 
> [...]
> 
> > > +static int sve_reg_bounds(struct reg_bounds_struct *b,
> > > +   const struct kvm_vcpu *vcpu,
> > > +   const struct kvm_one_reg *reg)
> > > +{
> 
> [...]
> 
> > > + b->kptr += start;
> > > +
> > > + if (copy_limit < start)
> > > + copy_limit = start;
> > > + else if (copy_limit > limit)
> > > + copy_limit = limit;
> > 
> >  copy_limit = clamp(copy_limit, start, limit)
> 
> Hmmm, having looked in detail in the definition of clamp(), I'm not sure
> I like it that much -- it can introduce type issues that are not readily
> apparent to the reader.
> 
> gcc can warn about signed/unsigned comparisons, which is the only issue
> where clamp() genuinely helps AFAICT, but this requires -Wsign-compare
> (which is not enabled by default, nor with -Wall).  Great.
> 
> I can use clamp() if you feel strongly about it, but otherwise I tend
> prefer my subtleties to be in plain sight rather than buried inside a
> macro, unless there is a serious verbosity impact from not using the
> macro (here, I would say there isn't, since it's just a single
> instance).
>

Would clamp_t, with an appropriate type, satisfy your concerns?

Thanks,
drew
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-08-03 Thread Dave Martin
On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > This patch adds the following registers for access via the
> > KVM_{GET,SET}_ONE_REG interface:
> > 
> >  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
> >  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
> >  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> > 
> > In order to adapt gracefully to future architectural extensions,
> > the registers are divided up into slices as noted above:  the i
> > parameter denotes the slice index.
> > 
> > For simplicity, bits or slices that exceed the maximum vector
> > length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> > read as zero for KVM_GET_ONE_REG.
> > 
> > For the current architecture, only slice i = 0 is significant.  The
> > interface design allows i to increase to up to 31 in the future if
> > required by future architectural amendments.
> > 
> > The registers are only visible for vcpus that have SVE enabled.
> > They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> > have SVE.  In all cases, surplus slices are not enumerated by
> > KVM_GET_REG_LIST.
> > 
> > Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> > redirected to access the underlying vcpu SVE register storage as
> > appropriate.  In order to make this more straightforward, register
> > accesses that straddle register boundaries are no longer guaranteed
> > to succeed.  (Support for such use was never deliberate, and
> > userspace does not currently seem to be relying on it.)
> > 
> > Signed-off-by: Dave Martin 

[...]

> > diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c

[...]

> > +static int sve_reg_bounds(struct reg_bounds_struct *b,
> > + const struct kvm_vcpu *vcpu,
> > + const struct kvm_one_reg *reg)
> > +{

[...]

> > +   b->kptr += start;
> > +
> > +   if (copy_limit < start)
> > +   copy_limit = start;
> > +   else if (copy_limit > limit)
> > +   copy_limit = limit;
> 
>  copy_limit = clamp(copy_limit, start, limit)

Hmmm, having looked in detail in the definition of clamp(), I'm not sure
I like it that much -- it can introduce type issues that are not readily
apparent to the reader.

gcc can warn about signed/unsigned comparisons, which is the only issue
where clamp() genuinely helps AFAICT, but this requires -Wsign-compare
(which is not enabled by default, nor with -Wall).  Great.

I can use clamp() if you feel strongly about it, but otherwise I tend
prefer my subtleties to be in plain sight rather than buried inside a
macro, unless there is a serious verbosity impact from not using the
macro (here, I would say there isn't, since it's just a single
instance).

[...]

Cheers
---Dave
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-07-26 Thread Dave Martin
On Wed, Jul 25, 2018 at 07:20:57PM +0200, Andrew Jones wrote:
> On Wed, Jul 25, 2018 at 03:06:21PM +0100, Dave Martin wrote:
> > On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> > > On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > > > +
> > > > +   if (usize % sizeof(u32))
> > > > +   return -EINVAL;
> > > 
> > 
> > Currently we don't enforce the register size to be a multiple of 32 bits,
> > but I'm trying to establish a stronger position.  Passing different
> > register sizes feels like an abuse of the API and there is no evidence
> > that qemu or kvmtool is relying on this so far.  The ability to pass
> > a misaligned register ID and/or slurp multiple vcpu registers (or parts
> > of registers) is once call really seems like it works by accident today
> > and seems not to be intentional design.  Rather, it exposes kernel
> > implementation details, which is best avoided.
> > 
> > It would be better to make this a global check for usize % 32 == 0
> > though, rather than burying it in fpsimd_vreg_bounds().
> > 
> > Opinions?
> 
> There's only one reason to not start enforcing it globally on arm/arm64,
> and that's that it's not documented that way. Changing it would be an API
> change, rather than just an API fix. It's probably a safe change, but...

I agree, though there are few direct users of this API, and I couldn't
come up with a scenario where anyone in their right mind would access
the core regs struct with access sizes <= 16 bits, and I've seen no
evidence so far of the API being used in this way.

So it would be nice to close this hole before it springs a leak.

I'll keep if for now, but flag it up for attention in the repost.
I'm happy to drop it if people care strongly enough.

> > > > +
> > > > +   usize /= sizeof(u32);
> > > > +
> > > > +   if ((uoffset <= start && usize <= start - uoffset) ||
> > > > +   uoffset >= limit)
> > > > +   return -ENOENT; /* not a vreg */
> > > > +
> > > > +   BUILD_BUG_ON(uoffset > limit);
> > > 
> > > Hmm, a build bug on uoffset can't be right, it's not a constant.
> > > 
> > > > +   if (uoffset < start || usize > limit - uoffset)
> > > > +   return -EINVAL; /* overlaps vregs[] bounds */
> > 
> > uoffset is not compile-time constant, but (uoffset > limit) is compile-
> > time constant, because the previous if() returns from the function
> > otherwise.
> > 
> > gcc seems to do the right thing here: the code compiles as-is, but
> > if the prior if() is commented out then the BUILD_BUG_ON() fires
> > because (uoffset > limit) is no longer compile-time constant.
> 
> Oh, interesting.
> 
> > 
> > 
> > This is a defensively-coded bounds check, where
> > 
> > if (A + B > C)
> > 
> > is transformed to
> > 
> > if (C >= B && A > C - B)
> > 
> > The former is susceptible to overflow in (A + B), whereas the latter is
> > not.  We might be able to hide the risk with type casts, but that trades
> > one kind of fragility for another IMHO.
> > 
> > In this patch, the C >= B part is subsumed into the previous if(), but
> > because this is non-obvious I dropped the BUILD_BUG_ON() in as a hint
> > to maintainers that we really do depend on a property of the previous
> > check, so although it may look like the checks could be swapped over
> > with no ill effects, really that is not safe.
> 
> I'm glad our maintainers can pick up on hints like that :-) Maybe you can
> add a comment for mortals like me though.

Hint taken...  I'll add a comment.  No doubt I'd eventually forget why 
the BUILD_BUG_ON() was there too.

> > Maybe the BUILD_BUG_ON() is superfluous, but I would prefer at least
> > to keep a comment here.
> > 
> > What do you think.
> >
> 
> Comment plus build-bug or just comment works for me.
> 
> > 
> > OTOH, if we can show conclusively that we can avoid overflow here
> > then the code can be simplified.  But I would want to be confident
> > that this is really safe not just now but also under future maintenance.
> > 
> 
> I agree with thoroughly checking user input. Maybe we can create/use
> some helper functions to do it. Those helpers can then get reused
> elsewhere, helping to keep ourselves sane the next time we need to
> do similar sanity checks.

It's a bit tricky to get right, because it all depends on the
combination of types being used in the expression.

I might have a think about how to do this, but for now I don't want to
introduce more churn.

Cheers
---Dave
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-07-25 Thread Andrew Jones
On Wed, Jul 25, 2018 at 03:06:21PM +0100, Dave Martin wrote:
> On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> > On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > > +
> > > + if (usize % sizeof(u32))
> > > + return -EINVAL;
> > 
> 
> Currently we don't enforce the register size to be a multiple of 32 bits,
> but I'm trying to establish a stronger position.  Passing different
> register sizes feels like an abuse of the API and there is no evidence
> that qemu or kvmtool is relying on this so far.  The ability to pass
> a misaligned register ID and/or slurp multiple vcpu registers (or parts
> of registers) is once call really seems like it works by accident today
> and seems not to be intentional design.  Rather, it exposes kernel
> implementation details, which is best avoided.
> 
> It would be better to make this a global check for usize % 32 == 0
> though, rather than burying it in fpsimd_vreg_bounds().
> 
> Opinions?

There's only one reason to not start enforcing it globally on arm/arm64,
and that's that it's not documented that way. Changing it would be an API
change, rather than just an API fix. It's probably a safe change, but...

> 
> > > +
> > > + usize /= sizeof(u32);
> > > +
> > > + if ((uoffset <= start && usize <= start - uoffset) ||
> > > + uoffset >= limit)
> > > + return -ENOENT; /* not a vreg */
> > > +
> > > + BUILD_BUG_ON(uoffset > limit);
> > 
> > Hmm, a build bug on uoffset can't be right, it's not a constant.
> > 
> > > + if (uoffset < start || usize > limit - uoffset)
> > > + return -EINVAL; /* overlaps vregs[] bounds */
> 
> uoffset is not compile-time constant, but (uoffset > limit) is compile-
> time constant, because the previous if() returns from the function
> otherwise.
> 
> gcc seems to do the right thing here: the code compiles as-is, but
> if the prior if() is commented out then the BUILD_BUG_ON() fires
> because (uoffset > limit) is no longer compile-time constant.

Oh, interesting.

> 
> 
> This is a defensively-coded bounds check, where
> 
>   if (A + B > C)
> 
> is transformed to
> 
>   if (C >= B && A > C - B)
> 
> The former is susceptible to overflow in (A + B), whereas the latter is
> not.  We might be able to hide the risk with type casts, but that trades
> one kind of fragility for another IMHO.
> 
> In this patch, the C >= B part is subsumed into the previous if(), but
> because this is non-obvious I dropped the BUILD_BUG_ON() in as a hint
> to maintainers that we really do depend on a property of the previous
> check, so although it may look like the checks could be swapped over
> with no ill effects, really that is not safe.

I'm glad our maintainers can pick up on hints like that :-) Maybe you can
add a comment for mortals like me though.

> 
> 
> Maybe the BUILD_BUG_ON() is superfluous, but I would prefer at least
> to keep a comment here.
> 
> What do you think.
>

Comment plus build-bug or just comment works for me.

> 
> OTOH, if we can show conclusively that we can avoid overflow here
> then the code can be simplified.  But I would want to be confident
> that this is really safe not just now but also under future maintenance.
> 

I agree with thoroughly checking user input. Maybe we can create/use
some helper functions to do it. Those helpers can then get reused
elsewhere, helping to keep ourselves sane the next time we need to
do similar sanity checks.

Thanks,
drew
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-07-25 Thread Dave Martin
On Thu, Jul 19, 2018 at 03:04:33PM +0200, Andrew Jones wrote:
> On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> > This patch adds the following registers for access via the
> > KVM_{GET,SET}_ONE_REG interface:
> > 
> >  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
> >  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
> >  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> > 
> > In order to adapt gracefully to future architectural extensions,
> > the registers are divided up into slices as noted above:  the i
> > parameter denotes the slice index.
> > 
> > For simplicity, bits or slices that exceed the maximum vector
> > length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> > read as zero for KVM_GET_ONE_REG.
> > 
> > For the current architecture, only slice i = 0 is significant.  The
> > interface design allows i to increase to up to 31 in the future if
> > required by future architectural amendments.
> > 
> > The registers are only visible for vcpus that have SVE enabled.
> > They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> > have SVE.  In all cases, surplus slices are not enumerated by
> > KVM_GET_REG_LIST.
> > 
> > Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> > redirected to access the underlying vcpu SVE register storage as
> > appropriate.  In order to make this more straightforward, register
> > accesses that straddle register boundaries are no longer guaranteed
> > to succeed.  (Support for such use was never deliberate, and
> > userspace does not currently seem to be relying on it.)
> > 
> > Signed-off-by: Dave Martin 
> > ---
> >  arch/arm64/include/uapi/asm/kvm.h |  10 ++
> >  arch/arm64/kvm/guest.c| 219 
> > +++---
> >  2 files changed, 216 insertions(+), 13 deletions(-)
> > 
> > diff --git a/arch/arm64/include/uapi/asm/kvm.h 
> > b/arch/arm64/include/uapi/asm/kvm.h
> > index 4e76630..f54a9b0 100644
> > --- a/arch/arm64/include/uapi/asm/kvm.h
> > +++ b/arch/arm64/include/uapi/asm/kvm.h
> > @@ -213,6 +213,16 @@ struct kvm_arch_memory_slot {
> >  KVM_REG_ARM_FW | ((r) & 0x))
> >  #define KVM_REG_ARM_PSCI_VERSION   KVM_REG_ARM_FW_REG(0)
> >  
> > +/* SVE registers */
> > +#define KVM_REG_ARM64_SVE  (0x15 << KVM_REG_ARM_COPROC_SHIFT)
> > +#define KVM_REG_ARM64_SVE_ZREG(n, i)   (KVM_REG_ARM64 | 
> > KVM_REG_ARM64_SVE | \
> > +KVM_REG_SIZE_U2048 |   \
> > +((n) << 5) | (i))
> > +#define KVM_REG_ARM64_SVE_PREG(n, i)   (KVM_REG_ARM64 | 
> > KVM_REG_ARM64_SVE | \
> > +KVM_REG_SIZE_U256 |\
> > +((n) << 5) | (i) | 0x400)
> > +#define KVM_REG_ARM64_SVE_FFR(i)   KVM_REG_ARM64_SVE_PREG(16, i)
> > +
> >  /* Device Control API: ARM VGIC */
> >  #define KVM_DEV_ARM_VGIC_GRP_ADDR  0
> >  #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
> > diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> > index 4a9d77c..005394b 100644
> > --- a/arch/arm64/kvm/guest.c
> > +++ b/arch/arm64/kvm/guest.c
> > @@ -23,14 +23,19 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> > +#include 
> >  
> >  #include "trace.h"
> >  
> > @@ -57,6 +62,106 @@ static u64 core_reg_offset_from_id(u64 id)
> > return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
> >  }
> >  
> > +static bool is_zreg(const struct kvm_one_reg *reg)
> > +{
> > +   return  reg->id >= KVM_REG_ARM64_SVE_ZREG(0, 0) &&
> > +   reg->id <= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS, 0x1f);
> > +}
> > +
> > +static bool is_preg(const struct kvm_one_reg *reg)
> > +{
> > +   return  reg->id >= KVM_REG_ARM64_SVE_PREG(0, 0) &&
> > +   reg->id <= KVM_REG_ARM64_SVE_FFR(0x1f);
> > +}
> > +
> > +static unsigned int sve_reg_num(const struct kvm_one_reg *reg)
> > +{
> > +   return (reg->id >> 5) & 0x1f;
> > +}
> > +
> > +static unsigned int sve_reg_index(const struct kvm_one_reg *reg)
> > +{
> > +   return reg->id & 0x1f;
> > +}
> > +
> > +struct reg_bounds_struct {
> > +   char *kptr;
> > +   size_t start_offset;
> 
> Maybe start_offset gets used in a later patch, but it doesn't seem
> to be used here.

Good spot.  It looks like I was originally going to have kptr point to
the base of the thing to be copied, rather than the exact start
location, with start_offset providing the necessary offset from the
base... then at some point I changed my mind.

I need to check through the code to see whether the code is consistent
now.  fpsimd_vreg_bounds() assigns this member, but nothing uses it
subsequently :/

[...]

> > +static int fpsimd_vreg_bounds(struct reg_bounds_struct *b,
> > + struct kvm_vcpu 

Re: [RFC PATCH 14/16] KVM: arm64/sve: Add SVE support to register access ioctl interface

2018-07-19 Thread Andrew Jones
On Thu, Jun 21, 2018 at 03:57:38PM +0100, Dave Martin wrote:
> This patch adds the following registers for access via the
> KVM_{GET,SET}_ONE_REG interface:
> 
>  * KVM_REG_ARM64_SVE_ZREG(n, i) (n = 0..31) (in 2048-bit slices)
>  * KVM_REG_ARM64_SVE_PREG(n, i) (n = 0..15) (in 256-bit slices)
>  * KVM_REG_ARM64_SVE_FFR(i) (in 256-bit slices)
> 
> In order to adapt gracefully to future architectural extensions,
> the registers are divided up into slices as noted above:  the i
> parameter denotes the slice index.
> 
> For simplicity, bits or slices that exceed the maximum vector
> length supported for the vcpu are ignored for KVM_SET_ONE_REG, and
> read as zero for KVM_GET_ONE_REG.
> 
> For the current architecture, only slice i = 0 is significant.  The
> interface design allows i to increase to up to 31 in the future if
> required by future architectural amendments.
> 
> The registers are only visible for vcpus that have SVE enabled.
> They are not enumerated by KVM_GET_REG_LIST on vcpus that do not
> have SVE.  In all cases, surplus slices are not enumerated by
> KVM_GET_REG_LIST.
> 
> Accesses to the FPSIMD registers via KVM_REG_ARM_CORE are
> redirected to access the underlying vcpu SVE register storage as
> appropriate.  In order to make this more straightforward, register
> accesses that straddle register boundaries are no longer guaranteed
> to succeed.  (Support for such use was never deliberate, and
> userspace does not currently seem to be relying on it.)
> 
> Signed-off-by: Dave Martin 
> ---
>  arch/arm64/include/uapi/asm/kvm.h |  10 ++
>  arch/arm64/kvm/guest.c| 219 
> +++---
>  2 files changed, 216 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/include/uapi/asm/kvm.h 
> b/arch/arm64/include/uapi/asm/kvm.h
> index 4e76630..f54a9b0 100644
> --- a/arch/arm64/include/uapi/asm/kvm.h
> +++ b/arch/arm64/include/uapi/asm/kvm.h
> @@ -213,6 +213,16 @@ struct kvm_arch_memory_slot {
>KVM_REG_ARM_FW | ((r) & 0x))
>  #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
>  
> +/* SVE registers */
> +#define KVM_REG_ARM64_SVE(0x15 << KVM_REG_ARM_COPROC_SHIFT)
> +#define KVM_REG_ARM64_SVE_ZREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> +  KVM_REG_SIZE_U2048 |   \
> +  ((n) << 5) | (i))
> +#define KVM_REG_ARM64_SVE_PREG(n, i) (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
> +  KVM_REG_SIZE_U256 |\
> +  ((n) << 5) | (i) | 0x400)
> +#define KVM_REG_ARM64_SVE_FFR(i) KVM_REG_ARM64_SVE_PREG(16, i)
> +
>  /* Device Control API: ARM VGIC */
>  #define KVM_DEV_ARM_VGIC_GRP_ADDR0
>  #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS   1
> diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
> index 4a9d77c..005394b 100644
> --- a/arch/arm64/kvm/guest.c
> +++ b/arch/arm64/kvm/guest.c
> @@ -23,14 +23,19 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> +#include 
> +#include 
>  
>  #include "trace.h"
>  
> @@ -57,6 +62,106 @@ static u64 core_reg_offset_from_id(u64 id)
>   return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
>  }
>  
> +static bool is_zreg(const struct kvm_one_reg *reg)
> +{
> + return  reg->id >= KVM_REG_ARM64_SVE_ZREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS, 0x1f);
> +}
> +
> +static bool is_preg(const struct kvm_one_reg *reg)
> +{
> + return  reg->id >= KVM_REG_ARM64_SVE_PREG(0, 0) &&
> + reg->id <= KVM_REG_ARM64_SVE_FFR(0x1f);
> +}
> +
> +static unsigned int sve_reg_num(const struct kvm_one_reg *reg)
> +{
> + return (reg->id >> 5) & 0x1f;
> +}
> +
> +static unsigned int sve_reg_index(const struct kvm_one_reg *reg)
> +{
> + return reg->id & 0x1f;
> +}
> +
> +struct reg_bounds_struct {
> + char *kptr;
> + size_t start_offset;

Maybe start_offset gets used in a later patch, but it doesn't seem
to be used here.

> + size_t copy_count;
> + size_t flush_count;
> +};
> +
> +static int copy_bounded_reg_to_user(void __user *uptr,
> + const struct reg_bounds_struct *b)
> +{
> + if (copy_to_user(uptr, b->kptr, b->copy_count) ||
> + clear_user((char __user *)uptr + b->copy_count, b->flush_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int copy_bounded_reg_from_user(const struct reg_bounds_struct *b,
> +   const void __user *uptr)
> +{
> + if (copy_from_user(b->kptr, uptr, b->copy_count))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int fpsimd_vreg_bounds(struct reg_bounds_struct *b,
> +   struct kvm_vcpu *vcpu,
> +