[PATCH] kvmppc: Implement H_LOGICAL_CI_{LOAD,STORE} in KVM

2015-02-01 Thread David Gibson
On POWER, storage caching is usually configured via the MMU - attributes
such as cache-inhibited are stored in the TLB and the hashed page table.

This makes correctly performing cache inhibited IO accesses awkward when
the MMU is turned off (real mode).  Some CPU models provide special
registers to control the cache attributes of real mode load and stores but
this is not at all consistent.  This is a problem in particular for SLOF,
the firmware used on KVM guests, which runs entirely in real mode, but
which needs to do IO to load the kernel.

To simplify this qemu implements two special hypercalls, H_LOGICAL_CI_LOAD
and H_LOGICAL_CI_STORE which simulate a cache-inhibited load or store to
a logical address (aka guest physical address).  SLOF uses these for IO.

However, because these are implemented within qemu, not the host kernel,
these bypass any IO devices emulated within KVM itself.  The simplest way
to see this problem is to attempt to boot a KVM guest from a virtio-blk
device with iothread / dataplane enabled.  The iothread code relies on an
in kernel implementation of the virtio queue notification, which is not
triggered by the IO hcalls, and so the guest will stall in SLOF unable to
load the guest OS.

This patch addresses this by providing in-kernel implementations of the
2 hypercalls, which correctly scan the KVM IO bus.  Any access to an
address not handled by the KVM IO bus will cause a VM exit, hitting the
qemu implementation as before.

Note that a userspace change is also required, in order to enable these
new hcall implementations with KVM_CAP_PPC_ENABLE_HCALL.

Signed-off-by: David Gibson 
---
 arch/powerpc/include/asm/kvm_book3s.h |  3 ++
 arch/powerpc/kvm/book3s.c | 76 +++
 arch/powerpc/kvm/book3s_hv.c  | 16 
 arch/powerpc/kvm/book3s_pr_papr.c | 28 +
 4 files changed, 123 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 942c7b1..578e550 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -292,6 +292,9 @@ static inline bool kvmppc_supports_magic_page(struct 
kvm_vcpu *vcpu)
return !is_kvmppc_hv_enabled(vcpu->kvm);
 }
 
+extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
+
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R30x113724FA
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 888bf46..792c7cf 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -820,6 +820,82 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
 #endif
 }
 
+int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
+{
+   unsigned long size = kvmppc_get_gpr(vcpu, 4);
+   unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+   u64 buf;
+   int ret;
+
+   if (!is_power_of_2(size) || (size > sizeof(buf)))
+   return H_TOO_HARD;
+
+   ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, size, &buf);
+   if (ret != 0)
+   return H_TOO_HARD;
+
+   switch (size) {
+   case 1:
+   kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
+   break;
+
+   case 2:
+   kvmppc_set_gpr(vcpu, 4, *(u16 *)&buf);
+   break;
+
+   case 4:
+   kvmppc_set_gpr(vcpu, 4, *(u32 *)&buf);
+   break;
+
+   case 8:
+   kvmppc_set_gpr(vcpu, 4, *(u64 *)&buf);
+   break;
+
+   default:
+   BUG();
+   }
+
+   return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load); /* For use by the kvm-pr module */
+
+int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
+{
+   unsigned long size = kvmppc_get_gpr(vcpu, 4);
+   unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+   unsigned long val = kvmppc_get_gpr(vcpu, 6);
+   u64 buf;
+   int ret;
+
+   switch (size) {
+   case 1:
+   *(u8 *)&buf = val;
+   break;
+
+   case 2:
+   *(u16 *)&buf = val;
+   break;
+
+   case 4:
+   *(u32 *)&buf = val;
+   break;
+
+   case 8:
+   *(u64 *)&buf = val;
+   break;
+
+   default:
+   return H_TOO_HARD;
+   }
+
+   ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, size, &buf);
+   if (ret != 0)
+   return H_TOO_HARD;
+
+   return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store); /* For use by the kvm-pr module 
*/
+
 int kvmppc_core_check_processor_compat(void)
 {
/*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a..1013019 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -706,6 +706,20 @@ int kvmppc_pseries_do_hcall(struct kvm_vcp

Re: [PATCH 1/4] stubs for xsavec support

2015-02-01 Thread Jan Kiszka
On 2015-01-08 11:32, Paolo Bonzini wrote:
> These are needed for KVM changes in 3.18.
> 
> Recent kernels added a separate feature word for XSAVE features, and KVM's
> CPUID code is relying on the new definition.  Except for cpu_has_xsaves,
> it's never accessing the feature itself: wrap cpu_has_xsaves with
> kvm_cpu_has_xsaves, and then there is no problem with out-of-bounds
> accesses.
> 
> Signed-off-by: Paolo Bonzini 
> ---
>  external-module-compat-comm.h |  4 
>  external-module-compat.c  | 11 +++
>  sync  | 14 --
>  x86/external-module-compat.h  | 37 +
>  4 files changed, 64 insertions(+), 2 deletions(-)
> 

...

> diff --git a/x86/external-module-compat.h b/x86/external-module-compat.h
> index dec53b6..87cf76a 100644
> --- a/x86/external-module-compat.h
> +++ b/x86/external-module-compat.h
> @@ -428,6 +428,23 @@ static inline int rdmsrl_safe(unsigned msr, unsigned 
> long long *p)
>  #define X86_FEATURE_MPX  (9*32+14) /* Memory Protection 
> Extension */
>  #endif
>  
> +#if X86_FEATURE_XSAVEOPT < 10 * 32
> +#undef X86_FEATURE_XSAVEOPT
> +#endif
> +#define X86_FEATURE_XSAVEOPT (10*32+0) /* XSAVEOPT instruction */

This causes redefinition warnings if the condition is not met. Was the
plan to put the define before the #endif?

Jan

-- 
Siemens AG, Corporate Technology, CT RTC ITP SES-DE
Corporate Competence Center Embedded Linux
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH target] vhost/scsi: vhost_skip_iovec_bytes() can be static

2015-02-01 Thread Fam Zheng
On Mon, 02/02 14:25, kbuild test robot wrote:
> drivers/vhost/scsi.c:1081:5: sparse: symbol 'vhost_skip_iovec_bytes' was not 
> declared. Should it be static?
> 
> Signed-off-by: Fengguang Wu 
> ---
>  scsi.c |2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
> index d888bd9..8ac003f 100644
> --- a/drivers/vhost/scsi.c
> +++ b/drivers/vhost/scsi.c
> @@ -1078,7 +1078,7 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
>   pr_err("Faulted on virtio_scsi_cmd_resp\n");
>  }
>  
> -int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
> +static int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
>  struct iovec *iov_in, size_t off_in,
>  struct iovec **iov_out, size_t *off_out)

Probably keep the parameter list lines aligned?

Fam
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[target:for-next 16/21] drivers/vhost/scsi.c:1081:5: sparse: symbol 'vhost_skip_iovec_bytes' was not declared. Should it be static?

2015-02-01 Thread kbuild test robot
tree:   git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git 
for-next
head:   2936f1d4f3e8247bd519feba7892371d5e4c6603
commit: 105acf608f25d5e0d9fef669299a5438b7b114ee [16/21] vhost/scsi: Add 
ANY_LAYOUT vhost_skip_iovec_bytes helper
reproduce:
  # apt-get install sparse
  git checkout 105acf608f25d5e0d9fef669299a5438b7b114ee
  make ARCH=x86_64 allmodconfig
  make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

>> drivers/vhost/scsi.c:1081:5: sparse: symbol 'vhost_skip_iovec_bytes' was not 
>> declared. Should it be static?
   drivers/vhost/scsi.c:969:1: warning: 'vhost_scsi_mapal' defined but not used 
[-Wunused-function]
vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, int max_niov,
^

Please review and possibly fold the followup patch.

---
0-DAY kernel test infrastructureOpen Source Technology Center
http://lists.01.org/mailman/listinfo/kbuild Intel Corporation
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH target] vhost/scsi: vhost_skip_iovec_bytes() can be static

2015-02-01 Thread kbuild test robot
drivers/vhost/scsi.c:1081:5: sparse: symbol 'vhost_skip_iovec_bytes' was not 
declared. Should it be static?

Signed-off-by: Fengguang Wu 
---
 scsi.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index d888bd9..8ac003f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1078,7 +1078,7 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
pr_err("Faulted on virtio_scsi_cmd_resp\n");
 }
 
-int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
+static int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
   struct iovec *iov_in, size_t off_in,
   struct iovec **iov_out, size_t *off_out)
 {
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v2 01/11] lib/iovec: Add memcpy_fromiovec_out library function

2015-02-01 Thread Al Viro
On Mon, Feb 02, 2015 at 04:44:12AM +, Al Viro wrote:
> On Mon, Feb 02, 2015 at 04:06:24AM +, Nicholas A. Bellinger wrote:
> > From: Nicholas Bellinger 
> > 
> > This patch adds a new memcpy_fromiovec_out() library function which modifies
> > the passed *iov following memcpy_fromiovec(), but also returns the next 
> > current
> > iovec pointer via **iov_out.
> > 
> > This is useful for vhost ANY_LAYOUT support when guests are allowed to 
> > generate
> > incoming virtio request headers combined with subsequent SGL payloads into a
> > single iovec.
> 
> Please, don't.  Just use copy_from_iter(); you are open-coding an uglier
> variant of such.

PS: see vfs.git#for-davem (or postings on netdev with the same stuff).
I really hope to bury memcpy_...iovec...() crap for good; please, don't
reintroduce more of it.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH-v2 01/11] lib/iovec: Add memcpy_fromiovec_out library function

2015-02-01 Thread Al Viro
On Mon, Feb 02, 2015 at 04:06:24AM +, Nicholas A. Bellinger wrote:
> From: Nicholas Bellinger 
> 
> This patch adds a new memcpy_fromiovec_out() library function which modifies
> the passed *iov following memcpy_fromiovec(), but also returns the next 
> current
> iovec pointer via **iov_out.
> 
> This is useful for vhost ANY_LAYOUT support when guests are allowed to 
> generate
> incoming virtio request headers combined with subsequent SGL payloads into a
> single iovec.

Please, don't.  Just use copy_from_iter(); you are open-coding an uglier
variant of such.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 06/11] vhost/scsi: Add ANY_LAYOUT vhost_skip_iovec_bytes helper

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch adds a vhost_skip_iovec_bytes() helper for skipping ahead
a number of bytes into the passed *iov_in + off_in, saving the current
**iov_out + off_out so it may be used by the caller.

This is useful for virtio-scsi READs when needing to skip ahead of
the starting response header bytes, and when T10_PI is enabled to
skip ahead of any preceeding protection payload to the start of
data payload.

It also checks max_niov to ensure the passed number of bytes does
not exceed what vhost_get_vq_desc() reports as the total number of
iovecs into vhost_virtqueue->iov[] for a individual request.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 40 
 1 file changed, 40 insertions(+)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ecbd567..d888bd9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1078,6 +1078,46 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
pr_err("Faulted on virtio_scsi_cmd_resp\n");
 }
 
+int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
+  struct iovec *iov_in, size_t off_in,
+  struct iovec **iov_out, size_t *off_out)
+{
+   int i = 0;
+
+   *off_out = 0;
+
+   if (!bytes)
+   return 0;
+
+   while (bytes) {
+   size_t iov_len = iov_in[i].iov_len - off_in;
+   size_t len = min(iov_len, bytes);
+
+   if (bytes -= len) {
+   if (++i == max_niov) {
+   pr_err("%s exceeded max_niov: %d\n",
+  __func__, max_niov);
+   return -EINVAL;
+   }
+   off_in = 0;
+   continue;
+   }
+   if (iov_len > len) {
+   *iov_out = &iov_in[i];
+   *off_out = len;
+   } else if (iov_len == len) {
+   if (++i == max_niov) {
+   pr_err("%s exceeded max_niov: %d\n",
+   __func__, max_niov);
+   return -EINVAL;
+   }
+   *iov_out = &iov_in[i];
+   *off_out = 0;
+   }
+   }
+   return i;
+}
+
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 11/11] vhost/scsi: Global tcm_vhost -> vhost_scsi rename

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

There is a large amount of code that still references the original
'tcm_vhost' naming conventions, instead of modern 'vhost_scsi'.

Go ahead and do a global rename to make the usage consistent.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 658 +--
 1 file changed, 329 insertions(+), 329 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6a785d8..57a6f0a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -51,13 +51,13 @@
 
 #include "vhost.h"
 
-#define TCM_VHOST_VERSION  "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-#define TCM_VHOST_DEFAULT_TAGS 256
-#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_UPAGES 2048
-#define TCM_VHOST_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_VERSION  "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_DEFAULT_TAGS 256
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
 struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
@@ -66,7 +66,7 @@ struct vhost_scsi_inflight {
struct kref kref;
 };
 
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc;
/* virtio-scsi initiator task attribute */
@@ -80,7 +80,7 @@ struct tcm_vhost_cmd {
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
u32 tvc_prot_sgl_count;
-   /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+   /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
u32 tvc_lun;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl;
@@ -93,13 +93,13 @@ struct tcm_vhost_cmd {
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue *tvc_vq;
/* Pointer to vhost nexus memory */
-   struct tcm_vhost_nexus *tvc_nexus;
+   struct vhost_scsi_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd;
-   /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+   /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
struct work_struct work;
/* Copy of the incoming SCSI command descriptor block (CDB) */
-   unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+   unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
@@ -108,53 +108,53 @@ struct tcm_vhost_cmd {
struct vhost_scsi_inflight *inflight;
 };
 
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
/* Pointer to TCM session for I_T Nexus */
struct se_session *tvn_se_sess;
 };
 
-struct tcm_vhost_nacl {
+struct vhost_scsi_nacl {
/* Binary World Wide unique Port Name for Vhost Initiator port */
u64 iport_wwpn;
/* ASCII formatted WWPN for Sas Initiator port */
-   char iport_name[TCM_VHOST_NAMELEN];
-   /* Returned by tcm_vhost_make_nodeacl() */
+   char iport_name[VHOST_SCSI_NAMELEN];
+   /* Returned by vhost_scsi_make_nodeacl() */
struct se_node_acl se_node_acl;
 };
 
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus 
shutdown */
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by 
tv_tpg_mutex */
int tv_tpg_vhost_count;
-   /* list for tcm_vhost_list */
+   /* list for vhost_scsi_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
-   struct tcm_vhost_nexus *tpg_nexus;
-   /* Pointer back to tcm_vhost_tport */
-   struct tcm_vhost_tport *tport;
-   /* Returned by tcm_vhost_make_tpg() */
+   struct vhost_scsi_nexus *tpg_nexus;
+   /* Pointer back to vhost_scsi_tport */
+   struct vhost_scsi_tport *tport;
+   /* Returned by vhost_scsi_make_tpg() */
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
 };
 
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
/* SCSI protocol the tport is providing */
u8 tport_proto_id;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn;
/* ASCII formatted WWPN for Vhos

[PATCH-v2 08/11] vhost/scsi: Set VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

Signal support of VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits
required for virtio-scsi 1.0 spec layout requirements.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a20a5fd..25a07a9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -171,7 +171,9 @@ enum {
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-  (1ULL << VIRTIO_SCSI_F_T10_PI)
+  (1ULL << VIRTIO_SCSI_F_T10_PI) |
+  (1ULL << VIRTIO_F_ANY_LAYOUT) |
+  (1ULL << VIRTIO_F_VERSION_1)
 };
 
 #define VHOST_SCSI_MAX_TARGET  256
@@ -1693,7 +1695,10 @@ static void vhost_scsi_handle_kick(struct vhost_work 
*work)
poll.work);
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
 
-   vhost_scsi_handle_vq(vs, vq);
+   if (vhost_has_feature(vq, VIRTIO_F_ANY_LAYOUT))
+   vhost_scsi_handle_vqal(vs, vq);
+   else
+   vhost_scsi_handle_vq(vs, vq);
 }
 
 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 07/11] vhost/scsi: Add ANY_LAYOUT vhost_virtqueue callback

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch adds ANY_LAYOUT support with a new vqs[].vq.handle_kick()
callback in vhost_scsi_handle_vqal().

It calculates data_direction + exp_data_len for the new tcm_vhost_cmd
descriptor by walking both outgoing + incoming iovecs, assuming the
layout of outgoing request header + T10_PI + Data payload comes first.

It also uses memcpy_fromiovec_out() to copy leading virtio-scsi
request header that may or may not include SCSI CDB, that returns a
re-calculated iovec to start of T10_PI or Data SGL memory.

v2 changes:
  - Fix up vhost_scsi_handle_vqal comments
  - Minor vhost_scsi_handle_vqal simplifications
  - Add missing minimum virtio-scsi response buffer size check
  - Fix pi_bytes* error message typo
  - Convert to use vhost_skip_iovec_bytes() common code
  - Add max_niov sanity checks vs. out + in offset into vq

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 271 +++
 1 file changed, 271 insertions(+)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index d888bd9..a20a5fd 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1119,6 +1119,277 @@ int vhost_skip_iovec_bytes(size_t bytes, int max_niov,
 }
 
 static void
+vhost_scsi_handle_vqal(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+   struct tcm_vhost_tpg **vs_tpg, *tpg;
+   struct virtio_scsi_cmd_req v_req;
+   struct virtio_scsi_cmd_req_pi v_req_pi;
+   struct tcm_vhost_cmd *cmd;
+   struct iovec *iov_out, *prot_iov, *data_iov;
+   u64 tag;
+   u32 exp_data_len, data_direction;
+   unsigned out, in, i;
+   int head, ret, prot_bytes, max_niov;
+   size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+   size_t out_size, in_size, data_off, prot_off;
+   u16 lun;
+   u8 *target, *lunp, task_attr;
+   bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
+   void *req, *cdb;
+
+   mutex_lock(&vq->mutex);
+   /*
+* We can handle the vq only after the endpoint is setup by calling the
+* VHOST_SCSI_SET_ENDPOINT ioctl.
+*/
+   vs_tpg = vq->private_data;
+   if (!vs_tpg)
+   goto out;
+
+   vhost_disable_notify(&vs->dev, vq);
+
+   for (;;) {
+   head = vhost_get_vq_desc(vq, vq->iov,
+ARRAY_SIZE(vq->iov), &out, &in,
+NULL, NULL);
+   pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+head, out, in);
+   /* On error, stop handling until the next kick. */
+   if (unlikely(head < 0))
+   break;
+   /* Nothing new?  Wait for eventfd to tell us they refilled. */
+   if (head == vq->num) {
+   if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+   vhost_disable_notify(&vs->dev, vq);
+   continue;
+   }
+   break;
+   }
+   /*
+* Check for a sane response buffer so we can report early
+* errors back to the guest.
+*/
+   if (unlikely(vq->iov[out].iov_len < rsp_size)) {
+   vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
+   " size, got %zu bytes\n", vq->iov[out].iov_len);
+   break;
+   }
+   /*
+* Setup pointers and values based upon different virtio-scsi
+* request header if T10_PI is enabled in KVM guest.
+*/
+   if (t10_pi) {
+   req = &v_req_pi;
+   req_size = sizeof(v_req_pi);
+   lunp = &v_req_pi.lun[0];
+   target = &v_req_pi.lun[1];
+   } else {
+   req = &v_req;
+   req_size = sizeof(v_req);
+   lunp = &v_req.lun[0];
+   target = &v_req.lun[1];
+   }
+   /*
+* Determine data_direction for ANY_LAYOUT by calculating the
+* total outgoing iovec sizes / incoming iovec sizes vs.
+* virtio-scsi request / response headers respectively.
+*
+* FIXME: Not correct for BIDI operation
+*/
+   out_size = in_size = 0;
+   for (i = 0; i < out; i++)
+   out_size += vq->iov[i].iov_len;
+   for (i = out; i < out + in; i++)
+   in_size += vq->iov[i].iov_len;
+   /*
+* Any associated T10_PI bytes for the outgoing / incoming
+* payloads are included in calculation of exp_data_len here.
+*/
+   if (

[PATCH-v2 04/11] vhost/scsi: Change vhost_scsi_map_to_sgl to accept iov ptr + len

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch changes vhost_scsi_map_to_sgl() parameters to accept virtio
iovec ptr + len when determing pages_nr.

This is currently done with iov_num_pages() -> PAGE_ALIGN, so allow
the same parameters as well.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 37 +++--
 1 file changed, 15 insertions(+), 22 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 9c5ac23..049e603 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -220,10 +220,10 @@ static struct workqueue_struct *tcm_vhost_workqueue;
 static DEFINE_MUTEX(tcm_vhost_mutex);
 static LIST_HEAD(tcm_vhost_list);
 
-static int iov_num_pages(struct iovec *iov)
+static int iov_num_pages(void __user *iov_base, size_t iov_len)
 {
-   return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
-  ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+   return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
+  ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
 static void tcm_vhost_done_inflight(struct kref *kref)
@@ -777,25 +777,18 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct 
tcm_vhost_tpg *tpg,
  * Returns the number of scatterlist entries used or -errno on error.
  */
 static int
-vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd,
+ void __user *ptr,
+ size_t len,
  struct scatterlist *sgl,
- unsigned int sgl_count,
- struct iovec *iov,
- struct page **pages,
  bool write)
 {
-   unsigned int npages = 0, pages_nr, offset, nbytes;
+   unsigned int npages = 0, offset, nbytes;
+   unsigned int pages_nr = iov_num_pages(ptr, len);
struct scatterlist *sg = sgl;
-   void __user *ptr = iov->iov_base;
-   size_t len = iov->iov_len;
+   struct page **pages = cmd->tvc_upages;
int ret, i;
 
-   pages_nr = iov_num_pages(iov);
-   if (pages_nr > sgl_count) {
-   pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-  " sgl_count: %u\n", pages_nr, sgl_count);
-   return -ENOBUFS;
-   }
if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
   " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
@@ -840,7 +833,7 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
int ret, i;
 
for (i = 0; i < niov; i++)
-   sgl_count += iov_num_pages(&iov[i]);
+   sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len);
 
if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
@@ -856,8 +849,8 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
 
for (i = 0; i < niov; i++) {
-   ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-   cmd->tvc_upages, write);
+   ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, 
iov[i].iov_len,
+   sg, write);
if (ret < 0) {
for (i = 0; i < cmd->tvc_sgl_count; i++) {
struct page *page = sg_page(&cmd->tvc_sgl[i]);
@@ -884,7 +877,7 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
int ret, i;
 
for (i = 0; i < niov; i++)
-   prot_sgl_count += iov_num_pages(&iov[i]);
+   prot_sgl_count += iov_num_pages(iov[i].iov_base, 
iov[i].iov_len);
 
if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
@@ -899,8 +892,8 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
cmd->tvc_prot_sgl_count = prot_sgl_count;
 
for (i = 0; i < niov; i++) {
-   ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, 
&iov[i],
-   cmd->tvc_upages, write);
+   ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, 
iov[i].iov_len,
+   prot_sg, write);
if (ret < 0) {
for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
struct page *page = 
sg_page(&cmd->tvc_prot_sgl[i]);
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 01/11] lib/iovec: Add memcpy_fromiovec_out library function

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch adds a new memcpy_fromiovec_out() library function which modifies
the passed *iov following memcpy_fromiovec(), but also returns the next current
iovec pointer via **iov_out.

This is useful for vhost ANY_LAYOUT support when guests are allowed to generate
incoming virtio request headers combined with subsequent SGL payloads into a
single iovec.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 include/linux/uio.h |  2 ++
 lib/iovec.c | 35 +++
 2 files changed, 37 insertions(+)

diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1c5e453..3e4473d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -136,6 +136,8 @@ size_t csum_and_copy_to_iter(void *addr, size_t bytes, 
__wsum *csum, struct iov_
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct 
iov_iter *i);
 
 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+int memcpy_fromiovec_out(unsigned char *kdata, struct iovec *iov,
+struct iovec **iov_out, int len);
 int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
 int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/lib/iovec.c b/lib/iovec.c
index 2d99cb4..24c8148 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -28,6 +28,41 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec 
*iov, int len)
 EXPORT_SYMBOL(memcpy_fromiovec);
 
 /*
+ * Copy iovec to kernel, modifying the passed *iov entries.
+ *
+ * Save **iov_out for the caller to use upon return, that may either
+ * contain the current entry with a re-calculated iov_base + iov_len
+ * or next unmodified entry.
+ *
+ * Also note that any iovec entries preceeding the final *iov_out are
+ * zeroed by copy_from_user().
+ *
+ * Returns -EFAULT on error.
+ */
+
+int memcpy_fromiovec_out(unsigned char *kdata, struct iovec *iov,
+struct iovec **iov_out, int len)
+{
+   while (len > 0) {
+   if (iov->iov_len) {
+   int copy = min_t(unsigned int, len, iov->iov_len);
+   if (copy_from_user(kdata, iov->iov_base, copy))
+   return -EFAULT;
+   len -= copy;
+   kdata += copy;
+   iov->iov_base += copy;
+   iov->iov_len -= copy;
+   }
+   if (!iov->iov_len)
+   iov++;
+   }
+   *iov_out = iov;
+
+   return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovec_out);
+
+/*
  * Copy kernel to iovec. Returns -EFAULT on error.
  */
 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 10/11] vhost/scsi: Drop left-over scsi_tcq.h include

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

With the recent removal of MSG_*_TAG defines in commit 68d81f40,
vhost-scsi is now using TCM_*_TAG and doesn't depend upon host
side scsi_tcq.h definitions anymore.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index b17b5a0..6a785d8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -38,7 +38,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 03/11] vhost/scsi: Fix incorrect early vhost_scsi_handle_vq failures

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch fixes vhost_scsi_handle_vq() failure cases that result in BUG_ON()
getting triggered when vhost_scsi_free_cmd() is called, and ->tvc_se_cmd has
not been initialized by target_submit_cmd_map_sgls().

It changes tcm_vhost_release_cmd() to use tcm_vhost_cmd->tvc_nexus for obtaining
se_session pointer reference.  Also, avoid calling put_page() on NULL sg->page
entries in vhost_scsi_map_to_sgl() failure path.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 52 +---
 1 file changed, 29 insertions(+), 23 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a03ac41..9c5ac23 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -460,7 +460,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
 {
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd);
-   struct se_session *se_sess = se_cmd->se_sess;
+   struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
int i;
 
if (tv_cmd->tvc_sgl_count) {
@@ -859,9 +859,11 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
cmd->tvc_upages, write);
if (ret < 0) {
-   for (i = 0; i < cmd->tvc_sgl_count; i++)
-   put_page(sg_page(&cmd->tvc_sgl[i]));
-
+   for (i = 0; i < cmd->tvc_sgl_count; i++) {
+   struct page *page = sg_page(&cmd->tvc_sgl[i]);
+   if (page)
+   put_page(page);
+   }
cmd->tvc_sgl_count = 0;
return ret;
}
@@ -900,9 +902,11 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, 
&iov[i],
cmd->tvc_upages, write);
if (ret < 0) {
-   for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
-   put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
+   for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
+   struct page *page = 
sg_page(&cmd->tvc_prot_sgl[i]);
+   if (page)
+   put_page(page);
+   }
cmd->tvc_prot_sgl_count = 0;
return ret;
}
@@ -1060,12 +1064,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
if (unlikely(vq->iov[0].iov_len < req_size)) {
pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
   req_size, vq->iov[0].iov_len);
-   break;
+   vhost_scsi_send_bad_target(vs, vq, head, out);
+   continue;
}
ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
if (unlikely(ret)) {
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
-   break;
+   vhost_scsi_send_bad_target(vs, vq, head, out);
+   continue;
}
 
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
@@ -1096,14 +1102,16 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero 
do_pi_niov"
", but wrong data_direction\n");
-   goto err_cmd;
+   vhost_scsi_send_bad_target(vs, vq, 
head, out);
+   continue;
}
prot_bytes = vhost32_to_cpu(vq, 
v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero 
di_pi_niov"
", but wrong data_direction\n");
-   goto err_cmd;
+   vhost_scsi_send_bad_target(vs, vq, 
head, out);
+   continue;
}
prot_bytes = vhost32_to_cpu(vq, 
v_req_pi.pi_bytesin);
}
@@ -1143,7 +1151,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
  

[PATCH-v2 00/11] vhost/scsi: Add ANY_LAYOUT + VERSION_1 support

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

Hi MST, Paolo, & Co,

This -v2 series adds vhost/scsi ANY_LAYOUT + VERSION_1 host feature bit
support.

It adds a new vhost_virtqueue ->handle_kick() callback to determine the
start of protection and data payloads iovecs past starting virtio-scsi
request and response headers, based upon data_direction.

It assumes request/CDB and response/sense_buffer headers may span more
than a single iovec using lib/iovec.c logic, and adds a new addition of
memcpy_fromiovec_out() to return the current re-calcuated **iov_out.

It also allows virtio-scsi headers + T10_PI + Data SGL payloads to span
the same iovec when pinning user-space memory via get_user_pages_fast()
code.  (Not tested yet)

It's currently functioning against v3.19-rc1 virtio-scsi LLD in T10_PI
mode with ANY_LAYOUT -> VERSION_1 guest feature bits enabled, using the
layout following existing convention with protection/data SGL payloads
residing within seperate iovecs.

Here's how the changelog is looking:

v2 changes:
  - Update memcpy_fromiovec_out usage comment
  - Clear ->tvc_sgl_count for vhost_scsi_mapal failure
  - Make vhost_scsi_mapal + vhost_scsi_calc_sgls accept max_niov
  - Update vhost_scsi_handle_vqal comments
  - Minor vhost_scsi_handle_vqal simplifications
  - Add missing minimum virtio-scsi response buffer size check
  - Fix pi_bytes* error message typo
  - Convert to use vhost_skip_iovec_bytes() common code
  - Add max_niov sanity checks vs. out + in offset into vq
  - Drop legacy pre virtio v1.0 !ANY_LAYOUT code

Also included in patch #11 is an over-due change to rename code in scsi.c
to line up with modern vhost_scsi naming convention.

Please review.

Thank you,

--nab

Nicholas Bellinger (11):
  lib/iovec: Add memcpy_fromiovec_out library function
  vhost/scsi: Convert completion path to use memcpy_toiovecend
  vhost/scsi: Fix incorrect early vhost_scsi_handle_vq failures
  vhost/scsi: Change vhost_scsi_map_to_sgl to accept iov ptr + len
  vhost/scsi: Add ANY_LAYOUT iov -> sgl mapping prerequisites
  vhost/scsi: Add ANY_LAYOUT vhost_skip_iovec_bytes helper
  vhost/scsi: Add ANY_LAYOUT vhost_virtqueue callback
  vhost/scsi: Set VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits
  vhost/scsi: Drop legacy pre virtio v1.0 !ANY_LAYOUT logic
  vhost/scsi: Drop left-over scsi_tcq.h include
  vhost/scsi: Global tcm_vhost -> vhost_scsi rename

 drivers/vhost/scsi.c | 1125 +++---
 include/linux/uio.h  |2 +
 lib/iovec.c  |   35 ++
 3 files changed, 646 insertions(+), 516 deletions(-)

-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 05/11] vhost/scsi: Add ANY_LAYOUT iov -> sgl mapping prerequisites

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

This patch adds ANY_LAYOUT prerequisites logic for accepting a set of
protection + data payloads via iovec + offset.  Also includes helpers
for calcuating SGLs + invoking vhost_scsi_map_to_sgl() with a known
number of iovecs.

Required by ANY_LAYOUT processing when struct iovec may be offset into
the first outgoing virtio-scsi request header.

v2 changes:
  - Clear ->tvc_sgl_count for vhost_scsi_mapal failure
  - Make vhost_scsi_mapal + vhost_scsi_calc_sgls accept max_niov
  - Minor cleanups

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 115 +++
 1 file changed, 115 insertions(+)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 049e603..ecbd567 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -909,6 +909,121 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
return 0;
 }
 
+static int
+vhost_scsi_calc_sgls(struct iovec *iov, size_t off, size_t bytes,
+int *niov, int max_niov, int max_sgls)
+{
+   size_t tmp = 0;
+   int sgl_count = 0;
+
+   *niov = 0;
+
+   while (tmp < bytes) {
+   void __user *base = iov[*niov].iov_base + off;
+   size_t len = iov[(*niov)++].iov_len - off;
+
+   if (*niov > max_niov) {
+   pr_err("%s: current *niov %d exceeds max_niov: %d\n",
+  __func__, *niov, max_niov);
+   return -EINVAL;
+   }
+   sgl_count += iov_num_pages(base, len);
+   tmp += min(len, bytes);
+   off = 0;
+   }
+   if (sgl_count > max_sgls) {
+   pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+  " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+   return -ENOBUFS;
+   }
+   return sgl_count;
+}
+
+static int
+vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write,
+ struct iovec *iov, size_t iov_off, int niov,
+ struct scatterlist *sg, int sg_count)
+{
+   int i, ret;
+
+   for (i = 0; i < niov; i++) {
+   void __user *base = iov[i].iov_base + iov_off;
+   size_t len = iov[i].iov_len - iov_off;
+
+   ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
+   if (ret < 0) {
+   for (i = 0; i < sg_count; i++) {
+   struct page *page = sg_page(&sg[i]);
+   if (page)
+   put_page(page);
+   }
+   return ret;
+   }
+   sg += ret;
+   iov_off = 0;
+   }
+   return 0;
+}
+
+static int
+vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, int max_niov,
+size_t prot_bytes, struct iovec *prot_iov, size_t prot_off,
+size_t data_bytes, struct iovec *data_iov, size_t data_off)
+{
+   int sgl_count = 0, niov, ret;
+   bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
+
+   if (prot_bytes) {
+   if (!prot_iov) {
+   pr_err("%s: prot_iov is NULL, but prot_bytes: %zu"
+  "present\n", __func__, prot_bytes);
+   return -EINVAL;
+   }
+   sgl_count = vhost_scsi_calc_sgls(prot_iov, prot_off, prot_bytes,
+&niov, max_niov,
+TCM_VHOST_PREALLOC_PROT_SGLS);
+   if (sgl_count < 0)
+   return sgl_count;
+
+   sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+   cmd->tvc_prot_sgl_count = sgl_count;
+   pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+
+   ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iov, prot_off,
+   niov, cmd->tvc_prot_sgl,
+   cmd->tvc_prot_sgl_count);
+   if (ret < 0) {
+   cmd->tvc_prot_sgl_count = 0;
+   return ret;
+   }
+   max_niov -= niov;
+   }
+   if (!data_iov) {
+   pr_err("%s: data_iov is NULL, but data_bytes: %zu present\n",
+  __func__, data_bytes);
+   return -EINVAL;
+   }
+   sgl_count = vhost_scsi_calc_sgls(data_iov, data_off, data_bytes,
+&niov, max_niov,
+TCM_VHOST_PREALLOC_SGLS);
+   if (sgl_count < 0)
+   return sgl_count;
+
+   sg_init_table(cmd->tvc_sgl, sgl_count);
+   cmd->tvc_sgl_count = sgl_count;
+   pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+   

[PATCH-v2 02/11] vhost/scsi: Convert completion path to use memcpy_toiovecend

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

Required for ANY_LAYOUT support when the incoming virtio-scsi response
header + fixed size sense buffer payload may span more than a single
iovec entry.

This changes existing code to save cmd->tvc_resp_iod instead of the
first single iovec base pointer from &vq->iov[out].

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb..a03ac41 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -87,8 +87,8 @@ struct tcm_vhost_cmd {
struct scatterlist *tvc_sgl;
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
-   /* Pointer to response */
-   struct virtio_scsi_cmd_resp __user *tvc_resp;
+   /* Pointer to response header iovec */
+   struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -703,7 +703,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work 
*work)
 se_cmd->scsi_sense_length);
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
   se_cmd->scsi_sense_length);
-   ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+   ret = memcpy_toiovecend(cmd->tvc_resp_iov, (unsigned char 
*)&v_rsp,
+   0, sizeof(v_rsp));
if (likely(ret == 0)) {
struct vhost_scsi_virtqueue *q;
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
@@ -1159,7 +1160,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
 
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
-   cmd->tvc_resp = vq->iov[out].iov_base;
+   cmd->tvc_resp_iov = &vq->iov[out];
 
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cmd->tvc_cdb[0], cmd->tvc_lun);
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-v2 09/11] vhost/scsi: Drop legacy pre virtio v1.0 !ANY_LAYOUT logic

2015-02-01 Thread Nicholas A. Bellinger
From: Nicholas Bellinger 

With the new ANY_LAYOUT logic in place for vhost_scsi_handle_vqal(),
there is no longer a reason to keep around the legacy code with
!ANY_LAYOUT assumptions.

Go ahead and drop the pre virtio 1.0 logic in vhost_scsi_handle_vq()
and associated helpers.

Cc: Michael S. Tsirkin 
Cc: Paolo Bonzini 
Signed-off-by: Nicholas Bellinger 
---
 drivers/vhost/scsi.c | 339 +--
 1 file changed, 1 insertion(+), 338 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 25a07a9..b17b5a0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -825,93 +825,6 @@ out:
 }
 
 static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
- struct iovec *iov,
- int niov,
- bool write)
-{
-   struct scatterlist *sg = cmd->tvc_sgl;
-   unsigned int sgl_count = 0;
-   int ret, i;
-
-   for (i = 0; i < niov; i++)
-   sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len);
-
-   if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-   pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
-   " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-   sgl_count, TCM_VHOST_PREALLOC_SGLS);
-   return -ENOBUFS;
-   }
-
-   pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
-   sg_init_table(sg, sgl_count);
-   cmd->tvc_sgl_count = sgl_count;
-
-   pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
-
-   for (i = 0; i < niov; i++) {
-   ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, 
iov[i].iov_len,
-   sg, write);
-   if (ret < 0) {
-   for (i = 0; i < cmd->tvc_sgl_count; i++) {
-   struct page *page = sg_page(&cmd->tvc_sgl[i]);
-   if (page)
-   put_page(page);
-   }
-   cmd->tvc_sgl_count = 0;
-   return ret;
-   }
-   sg += ret;
-   sgl_count -= ret;
-   }
-   return 0;
-}
-
-static int
-vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
-  struct iovec *iov,
-  int niov,
-  bool write)
-{
-   struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
-   unsigned int prot_sgl_count = 0;
-   int ret, i;
-
-   for (i = 0; i < niov; i++)
-   prot_sgl_count += iov_num_pages(iov[i].iov_base, 
iov[i].iov_len);
-
-   if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
-   pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
-   " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
-   prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
-   return -ENOBUFS;
-   }
-
-   pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
-prot_sg, prot_sgl_count);
-   sg_init_table(prot_sg, prot_sgl_count);
-   cmd->tvc_prot_sgl_count = prot_sgl_count;
-
-   for (i = 0; i < niov; i++) {
-   ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, 
iov[i].iov_len,
-   prot_sg, write);
-   if (ret < 0) {
-   for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
-   struct page *page = 
sg_page(&cmd->tvc_prot_sgl[i]);
-   if (page)
-   put_page(page);
-   }
-   cmd->tvc_prot_sgl_count = 0;
-   return ret;
-   }
-   prot_sg += ret;
-   prot_sgl_count -= ret;
-   }
-   return 0;
-}
-
-static int
 vhost_scsi_calc_sgls(struct iovec *iov, size_t off, size_t bytes,
 int *niov, int max_niov, int max_sgls)
 {
@@ -1391,253 +1304,6 @@ out:
mutex_unlock(&vq->mutex);
 }
 
-static void
-vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
-{
-   struct tcm_vhost_tpg **vs_tpg;
-   struct virtio_scsi_cmd_req v_req;
-   struct virtio_scsi_cmd_req_pi v_req_pi;
-   struct tcm_vhost_tpg *tpg;
-   struct tcm_vhost_cmd *cmd;
-   u64 tag;
-   u32 exp_data_len, data_first, data_num, data_direction, prot_first;
-   unsigned out, in, i;
-   int head, ret, data_niov, prot_niov, prot_bytes;
-   size_t req_size;
-   u16 lun;
-   u8 *target, *lunp, task_attr;
-   bool hdr_pi;
-   void *req, *cdb;
-
-   mutex_lock(&vq->mutex);
-   /*
-* We can handle the vq only after the endpoint is setup by calling the
-* VHOST_SCSI_SET_ENDPOINT ioctl.
-*/
-   vs_tpg = vq->private_data;
-   if (!vs_tpg)
- 

RE: [v3 21/26] x86, irq: Define a global vector for VT-d Posted-Interrupts

2015-02-01 Thread Wu, Feng


> -Original Message-
> From: H. Peter Anvin [mailto:h...@zytor.com]
> Sent: Saturday, January 31, 2015 2:19 AM
> To: Wu, Feng; t...@linutronix.de; mi...@redhat.com; x...@kernel.org;
> g...@kernel.org; pbonz...@redhat.com; dw...@infradead.org;
> j...@8bytes.org; alex.william...@redhat.com; jiang@linux.intel.com
> Cc: eric.au...@linaro.org; linux-ker...@vger.kernel.org;
> io...@lists.linux-foundation.org; kvm@vger.kernel.org
> Subject: Re: [v3 21/26] x86, irq: Define a global vector for VT-d
> Posted-Interrupts
> 
> On 12/12/2014 07:14 AM, Feng Wu wrote:
> > Currently, we use a global vector as the Posted-Interrupts
> > Notification Event for all the vCPUs in the system. We need
> > to introduce another global vector for VT-d Posted-Interrtups,
> > which will be used to wakeup the sleep vCPU when an external
> > interrupt from a direct-assigned device happens for that vCPU.
> >
> > Signed-off-by: Feng Wu 
> >
> 
> >  #ifdef CONFIG_HAVE_KVM
> > +void (*wakeup_handler_callback)(void) = NULL;
> > +EXPORT_SYMBOL_GPL(wakeup_handler_callback);
> > +
> 
> Stylistic nitpick: we generally don't explicitly initialize
> global/static pointer variables to NULL (that happens automatically anyway.)
> 
> Other than that,
> 
> Acked-by: H. Peter Anvin 

Thanks a lot for your review, Peter!

Thanks,
Feng
N�r��yb�X��ǧv�^�)޺{.n�+h����ܨ}���Ơz�&j:+v���zZ+��+zf���h���~i���z��w���?�&�)ߢf

[PATCH 06/18] arm/arm64: get rid of get_sp()

2015-02-01 Thread Andrew Jones
get_sp() only worked by accident, because gcc inlined calls
to it. It should have always been explicitly inlined. It was
also only added for debugging, and not in any use now.
Furthermore, while we will have need for a "get_sp", we'll
add it back with a new name, current_stack_pointer, in order
to be consistent with Linux.

Signed-off-by: Andrew Jones 
---
 lib/arm/asm/processor.h   | 1 -
 lib/arm/processor.c   | 6 --
 lib/arm64/asm/processor.h | 1 -
 lib/arm64/processor.c | 6 --
 4 files changed, 14 deletions(-)

diff --git a/lib/arm/asm/processor.h b/lib/arm/asm/processor.h
index a56f8d1fc9797..9c37db66640e8 100644
--- a/lib/arm/asm/processor.h
+++ b/lib/arm/asm/processor.h
@@ -23,7 +23,6 @@ typedef void (*exception_fn)(struct pt_regs *);
 extern void install_exception_handler(enum vector v, exception_fn fn);
 
 extern void show_regs(struct pt_regs *regs);
-extern void *get_sp(void);
 
 static inline unsigned long current_cpsr(void)
 {
diff --git a/lib/arm/processor.c b/lib/arm/processor.c
index 5ad999c6b378b..d2fd597fcd139 100644
--- a/lib/arm/processor.c
+++ b/lib/arm/processor.c
@@ -64,12 +64,6 @@ void show_regs(struct pt_regs *regs)
}
 }
 
-void *get_sp(void)
-{
-   register unsigned long sp asm("sp");
-   return (void *)sp;
-}
-
 static exception_fn exception_handlers[EXCPTN_MAX];
 
 void install_exception_handler(enum vector v, exception_fn fn)
diff --git a/lib/arm64/asm/processor.h b/lib/arm64/asm/processor.h
index a33f70afb3a3e..d287f55b8dac6 100644
--- a/lib/arm64/asm/processor.h
+++ b/lib/arm64/asm/processor.h
@@ -51,7 +51,6 @@ extern void default_vector_handler(enum vector v, struct 
pt_regs *regs,
   unsigned int esr);
 
 extern void show_regs(struct pt_regs *regs);
-extern void *get_sp(void);
 extern bool get_far(unsigned int esr, unsigned long *far);
 
 static inline unsigned long current_level(void)
diff --git a/lib/arm64/processor.c b/lib/arm64/processor.c
index efc78353d7ec0..7f61b3fff281f 100644
--- a/lib/arm64/processor.c
+++ b/lib/arm64/processor.c
@@ -78,12 +78,6 @@ void show_regs(struct pt_regs *regs)
printf("\n");
 }
 
-void *get_sp(void)
-{
-   register unsigned long sp asm("sp");
-   return (void *)sp;
-}
-
 bool get_far(unsigned int esr, unsigned long *far)
 {
unsigned int ec = esr >> ESR_EL1_EC_SHIFT;
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 09/18] arm/arm64: maintain per thread exception handlers

2015-02-01 Thread Andrew Jones
Add exception handlers to thread info. And, since we allow threads
running in user mode to install exception handlers too (a convenience
for unit test developers), check for handlers on the user mode stack
thread info too. But, unit test developers will likely also expect the
installation of exception handlers done in kernel mode, before switching
to user mode, to work. So, if there's no handler in the thread info
hanging off the user mode stack, then still check the kernel mode stack
thread info for one.

Use THREAD_SIZE == PAGE_SIZE, when PAGE_SIZE is larger than 16K.
This is for arm64, which uses 64K pages, because the exception
handler arrays are 8K together, making the stack too small with
THREAD_SIZE == 16K.

Signed-off-by: Andrew Jones 
---
 arm/flat.lds  |  2 +-
 lib/arm/asm/thread_info.h | 17 ++--
 lib/arm/processor.c   | 21 ++-
 lib/arm64/asm/processor.h |  1 +
 lib/arm64/processor.c | 52 ++-
 5 files changed, 71 insertions(+), 22 deletions(-)

diff --git a/arm/flat.lds b/arm/flat.lds
index df80d3678e556..c01f01e7b8682 100644
--- a/arm/flat.lds
+++ b/arm/flat.lds
@@ -19,7 +19,7 @@ SECTIONS
 . += 64K;
 . = ALIGN(64K);
 /*
- * stack depth is ~16K, see THREAD_SIZE
+ * stack depth is 16K for arm and PAGE_SIZE for arm64, see THREAD_SIZE
  * sp must be 16 byte aligned for arm64, and 8 byte aligned for arm
  * sp must always be strictly less than the true stacktop
  */
diff --git a/lib/arm/asm/thread_info.h b/lib/arm/asm/thread_info.h
index 17997e21d1274..5f7104f7c234f 100644
--- a/lib/arm/asm/thread_info.h
+++ b/lib/arm/asm/thread_info.h
@@ -7,8 +7,15 @@
  *
  * This work is licensed under the terms of the GNU LGPL, version 2.
  */
-
-#define THREAD_SIZE16384
+#include 
+#include 
+
+#define __MIN_THREAD_SIZE  16384
+#if PAGE_SIZE > __MIN_THREAD_SIZE
+#define THREAD_SIZEPAGE_SIZE
+#else
+#define THREAD_SIZE__MIN_THREAD_SIZE
+#endif
 #define THREAD_START_SP(THREAD_SIZE - 16)
 
 #define TIF_USER_MODE  (1U << 0)
@@ -16,6 +23,12 @@
 struct thread_info {
int cpu;
unsigned int flags;
+#ifdef __arm__
+   exception_fn exception_handlers[EXCPTN_MAX];
+#else
+   vector_fn vector_handlers[VECTOR_MAX];
+   exception_fn exception_handlers[VECTOR_MAX][EC_MAX];
+#endif
char ext[0];/* allow unit tests to add extended info */
 };
 
diff --git a/lib/arm/processor.c b/lib/arm/processor.c
index 8a514a29c063b..1cef46ab28647 100644
--- a/lib/arm/processor.c
+++ b/lib/arm/processor.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static const char *processor_modes[] = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" ,
@@ -64,18 +65,28 @@ void show_regs(struct pt_regs *regs)
}
 }
 
-static exception_fn exception_handlers[EXCPTN_MAX];
-
 void install_exception_handler(enum vector v, exception_fn fn)
 {
+   struct thread_info *ti = current_thread_info();
+
if (v < EXCPTN_MAX)
-   exception_handlers[v] = fn;
+   ti->exception_handlers[v] = fn;
 }
 
 void do_handle_exception(enum vector v, struct pt_regs *regs)
 {
-   if (v < EXCPTN_MAX && exception_handlers[v]) {
-   exception_handlers[v](regs);
+   struct thread_info *ti = thread_info_sp(regs->ARM_sp);
+
+   if (ti->flags & TIF_USER_MODE) {
+   if (v < EXCPTN_MAX && ti->exception_handlers[v]) {
+   ti->exception_handlers[v](regs);
+   return;
+   }
+   ti = current_thread_info();
+   }
+
+   if (v < EXCPTN_MAX && ti->exception_handlers[v]) {
+   ti->exception_handlers[v](regs);
return;
}
 
diff --git a/lib/arm64/asm/processor.h b/lib/arm64/asm/processor.h
index c1326351d201f..228a21c7f8856 100644
--- a/lib/arm64/asm/processor.h
+++ b/lib/arm64/asm/processor.h
@@ -49,6 +49,7 @@ extern void install_exception_handler(enum vector v, unsigned 
int ec,
  exception_fn fn);
 extern void default_vector_handler(enum vector v, struct pt_regs *regs,
   unsigned int esr);
+extern void vector_handlers_default_init(vector_fn *handlers);
 
 extern void show_regs(struct pt_regs *regs);
 extern bool get_far(unsigned int esr, unsigned long *far);
diff --git a/lib/arm64/processor.c b/lib/arm64/processor.c
index 152767eecf062..c240ce33c3d0c 100644
--- a/lib/arm64/processor.c
+++ b/lib/arm64/processor.c
@@ -9,6 +9,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static const char *vector_names[] = {
"el1t_sync",
@@ -128,44 +129,66 @@ static void bad_exception(enum vector v, struct pt_regs 
*regs,
abort();
 }
 
-static exception_fn exception_handlers[VECTOR_MAX][EC_MAX];
-
 void install_exception_handler(enum vector v, unsigned int ec, exception_fn fn)
 {
+   struct thread_inf

[PATCH 03/18] arm: fixups: add barriers, actually set MAIR

2015-02-01 Thread Andrew Jones
Sprinkle in some more isbs after context-changing operations,
as the ARM ARM states we should. I haven't seen any problems
without them, but we should do it right. Also, *actually* set
the MAIR in asm_mmu_enable. We were reading, not writing...
Luckily this was just spotted while adding the isbs, rather
than leading to a nightmare debug session some day...

Signed-off-by: Andrew Jones 
---
 arm/cstart.S| 8 ++--
 lib/arm/processor.c | 1 +
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/arm/cstart.S b/arm/cstart.S
index da496e9eae7e0..39fac8f1e1bd8 100644
--- a/arm/cstart.S
+++ b/arm/cstart.S
@@ -50,6 +50,7 @@ start:
 .macro set_mode_stack mode, stack
add \stack, #S_FRAME_SIZE
msr cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT)
+   isb
mov sp, \stack
 .endm
 
@@ -70,6 +71,7 @@ exceptions_init:
set_mode_stack  FIQ_MODE, r1
 
msr cpsr_cxsf, r2   @ back to svc mode
+   isb
mov pc, lr
 
 .text
@@ -96,12 +98,13 @@ asm_mmu_enable:
 
/* MAIR */
ldr r2, =PRRR
-   mrc p15, 0, r2, c10, c2, 0
+   mcr p15, 0, r2, c10, c2, 0
ldr r2, =NMRR
-   mrc p15, 0, r2, c10, c2, 1
+   mcr p15, 0, r2, c10, c2, 1
 
/* TTBR0 */
mcrrp15, 0, r0, r1, c2
+   isb
 
/* SCTLR */
mrc p15, 0, r2, c1, c0, 0
@@ -109,6 +112,7 @@ asm_mmu_enable:
orr r2, #CR_I
orr r2, #CR_M
mcr p15, 0, r2, c1, c0, 0
+   isb
 
mov pc, lr
 
diff --git a/lib/arm/processor.c b/lib/arm/processor.c
index da4163664a835..f8bd94cbb8bc6 100644
--- a/lib/arm/processor.c
+++ b/lib/arm/processor.c
@@ -115,6 +115,7 @@ void start_usr(void (*func)(void *arg), void *arg, unsigned 
long sp_usr)
"bicr0, #" xstr(MODE_MASK) "\n"
"orrr0, #" xstr(USR_MODE) "\n"
"msrcpsr_c, r0\n"
+   "isb\n"
"movr0, %0\n"
"movsp, %1\n"
"movpc, %2\n"
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 10/18] arm/arm64: add simple cpumask API

2015-02-01 Thread Andrew Jones
On smp, cpumasks become quite useful. Add a simple implementation,
along with implementations of bitops it needs.

Signed-off-by: Andrew Jones 
---
 config/config-arm-common.mak |   1 +
 lib/arm/asm/bitops.h |  53 +++
 lib/arm/asm/cpumask.h| 118 +++
 lib/arm/bitops.c |  81 +
 lib/arm64/asm/bitops.h   |  51 +++
 lib/arm64/asm/cpumask.h  |   1 +
 6 files changed, 305 insertions(+)
 create mode 100644 lib/arm/asm/bitops.h
 create mode 100644 lib/arm/asm/cpumask.h
 create mode 100644 lib/arm/bitops.c
 create mode 100644 lib/arm64/asm/bitops.h
 create mode 100644 lib/arm64/asm/cpumask.h

diff --git a/config/config-arm-common.mak b/config/config-arm-common.mak
index b01e9ab836b2d..94eac8967e234 100644
--- a/config/config-arm-common.mak
+++ b/config/config-arm-common.mak
@@ -34,6 +34,7 @@ cflatobjs += lib/chr-testdev.o
 cflatobjs += lib/arm/io.o
 cflatobjs += lib/arm/setup.o
 cflatobjs += lib/arm/mmu.o
+cflatobjs += lib/arm/bitops.o
 
 libeabi = lib/arm/libeabi.a
 eabiobjs = lib/arm/eabi_compat.o
diff --git a/lib/arm/asm/bitops.h b/lib/arm/asm/bitops.h
new file mode 100644
index 0..8049634be0485
--- /dev/null
+++ b/lib/arm/asm/bitops.h
@@ -0,0 +1,53 @@
+#ifndef _ASMARM_BITOPS_H_
+#define _ASMARM_BITOPS_H_
+/*
+ * Adapated from
+ *   include/linux/bitops.h
+ *   arch/arm/lib/bitops.h
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones 
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+
+#define BITS_PER_LONG  32
+#define BIT(nr)(1UL << (nr))
+#define BIT_MASK(nr)   (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)   ((nr) / BITS_PER_LONG)
+
+#define ATOMIC_BITOP(insn, mask, word) \
+({ \
+   unsigned long tmp1, tmp2;   \
+   asm volatile(   \
+   "1: ldrex   %0, [%2]\n" \
+   insn"   %0, %0, %3\n"   \
+   "   strex   %1, %0, [%2]\n" \
+   "   cmp %1, #0\n"   \
+   "   bne 1b\n"   \
+   : "=&r" (tmp1), "=&r" (tmp2)\
+   : "r" (word), "r" (mask)\
+   : "cc");\
+})
+
+#define ATOMIC_TESTOP(insn, mask, word, old)   \
+({ \
+   unsigned long tmp1, tmp2;   \
+   asm volatile(   \
+   "1: ldrex   %0, [%3]\n" \
+   "   and %1, %0, %4\n"   \
+   insn"   %0, %0, %4\n"   \
+   "   strex   %2, %0, [%3]\n" \
+   "   cmp %2, #0\n"   \
+   "   bne 1b\n"   \
+   : "=&r" (tmp1), "=&r" (old), "=&r" (tmp2)   \
+   : "r" (word), "r" (mask)\
+   : "cc");\
+})
+
+extern void set_bit(int nr, volatile unsigned long *addr);
+extern void clear_bit(int nr, volatile unsigned long *addr);
+extern int test_bit(int nr, const volatile unsigned long *addr);
+extern int test_and_set_bit(int nr, volatile unsigned long *addr);
+extern int test_and_clear_bit(int nr, volatile unsigned long *addr);
+
+#endif /* _ASMARM_BITOPS_H_ */
diff --git a/lib/arm/asm/cpumask.h b/lib/arm/asm/cpumask.h
new file mode 100644
index 0..85b8e4b51a403
--- /dev/null
+++ b/lib/arm/asm/cpumask.h
@@ -0,0 +1,118 @@
+#ifndef _ASMARM_CPUMASK_H_
+#define _ASMARM_CPUMASK_H_
+/*
+ * Simple cpumask implementation
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones 
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include 
+#include 
+
+#define CPUMASK_NR_LONGS ((NR_CPUS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+typedef struct cpumask {
+   unsigned long bits[CPUMASK_NR_LONGS];
+} cpumask_t;
+
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+static inline void cpumask_set_cpu(int cpu, cpumask_t *mask)
+{
+   set_bit(cpu, cpumask_bits(mask));
+}
+
+static inline void cpumask_clear_cpu(int cpu, cpumask_t *mask)
+{
+   clear_bit(cpu, cpumask_bits(mask));
+}
+
+static inline int cpumask_test_cpu(int cpu, const cpumask_t *mask)
+{
+   return test_bit(cpu, cpumask_bits(mask));
+}
+
+static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *mask)
+{
+   return test_and_set_bit(cpu, cpumask_bits(mask));
+}
+
+static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *mask)
+{
+   return 

[PATCH 14/18] arm/arm64: add some PSCI API

2015-02-01 Thread Andrew Jones
Signed-off-by: Andrew Jones 
---
 config/config-arm-common.mak |  1 +
 lib/arm/asm/psci.h   | 13 +
 lib/arm/psci.c   | 30 ++
 lib/arm64/asm/psci.h | 13 +
 4 files changed, 57 insertions(+)
 create mode 100644 lib/arm/asm/psci.h
 create mode 100644 lib/arm/psci.c
 create mode 100644 lib/arm64/asm/psci.h

diff --git a/config/config-arm-common.mak b/config/config-arm-common.mak
index 94eac8967e234..13f5338a35a02 100644
--- a/config/config-arm-common.mak
+++ b/config/config-arm-common.mak
@@ -35,6 +35,7 @@ cflatobjs += lib/arm/io.o
 cflatobjs += lib/arm/setup.o
 cflatobjs += lib/arm/mmu.o
 cflatobjs += lib/arm/bitops.o
+cflatobjs += lib/arm/psci.o
 
 libeabi = lib/arm/libeabi.a
 eabiobjs = lib/arm/eabi_compat.o
diff --git a/lib/arm/asm/psci.h b/lib/arm/asm/psci.h
new file mode 100644
index 0..e2e66b47de480
--- /dev/null
+++ b/lib/arm/asm/psci.h
@@ -0,0 +1,13 @@
+#ifndef _ASMARM_PSCI_H_
+#define _ASMARM_PSCI_H_
+#include 
+#include 
+
+#define PSCI_INVOKE_ARG_TYPE   u32
+#define PSCI_FN_CPU_ON PSCI_0_2_FN_CPU_ON
+
+extern int psci_invoke(u32 function_id, u32 arg0, u32 arg1, u32 arg2);
+extern int psci_cpu_on(unsigned long cpuid, unsigned long entry_point);
+extern void psci_sys_reset(void);
+
+#endif /* _ASMARM_PSCI_H_ */
diff --git a/lib/arm/psci.c b/lib/arm/psci.c
new file mode 100644
index 0..027c4f66f1815
--- /dev/null
+++ b/lib/arm/psci.c
@@ -0,0 +1,30 @@
+/*
+ * PSCI API
+ * From arch/arm[64]/kernel/psci.c
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones 
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include 
+
+#define T PSCI_INVOKE_ARG_TYPE
+__attribute__((noinline))
+int psci_invoke(T function_id, T arg0, T arg1, T arg2)
+{
+   asm volatile(
+   "hvc #0"
+   : "+r" (function_id)
+   : "r" (arg0), "r" (arg1), "r" (arg2));
+   return function_id;
+}
+
+int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+   return psci_invoke(PSCI_FN_CPU_ON, cpuid, entry_point, 0);
+}
+
+void psci_sys_reset(void)
+{
+   psci_invoke(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
diff --git a/lib/arm64/asm/psci.h b/lib/arm64/asm/psci.h
new file mode 100644
index 0..c481be4bd6bab
--- /dev/null
+++ b/lib/arm64/asm/psci.h
@@ -0,0 +1,13 @@
+#ifndef _ASMARM64_PSCI_H_
+#define _ASMARM64_PSCI_H_
+#include 
+#include 
+
+#define PSCI_INVOKE_ARG_TYPE   u64
+#define PSCI_FN_CPU_ON PSCI_0_2_FN64_CPU_ON
+
+extern int psci_invoke(u64 function_id, u64 arg0, u64 arg1, u64 arg2);
+extern int psci_cpu_on(unsigned long cpuid, unsigned long entry_point);
+extern void psci_sys_reset(void);
+
+#endif /* _ASMARM64_PSCI_H_ */
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/18] arm/arm64: make mmu_on per cpu

2015-02-01 Thread Andrew Jones
We introduced a variable called mmu_on to the mmu implementation
because unit tests may want to run with the mmu off, yet still
call into common code that could lead to {Load,Store}-Exclusive
instructions - which is illegal. So, the mmu_on variable was added
and made query-able (through mmu_enabled()) in order to guard those
paths. But, mmu_on is really a per cpu concept, so for smp we need
to change it. As it's just a bool, we can easily make it per cpu by
changing it into a cpumask. We rename it more appropriately too.

Signed-off-by: Andrew Jones 
---
 lib/arm/asm/mmu-api.h |  1 +
 lib/arm/mmu.c | 15 ---
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/lib/arm/asm/mmu-api.h b/lib/arm/asm/mmu-api.h
index f2511e3dc7dee..68dc707d67241 100644
--- a/lib/arm/asm/mmu-api.h
+++ b/lib/arm/asm/mmu-api.h
@@ -2,6 +2,7 @@
 #define __ASMARM_MMU_API_H_
 extern pgd_t *mmu_idmap;
 extern bool mmu_enabled(void);
+extern void mmu_set_enabled(void);
 extern void mmu_enable(pgd_t *pgtable);
 extern void mmu_enable_idmap(void);
 extern void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset);
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index 1c024538663ce..732000a8eb088 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -6,16 +6,25 @@
  * This work is licensed under the terms of the GNU LGPL, version 2.
  */
 #include 
+#include 
+#include 
 #include 
 
 extern unsigned long etext;
 
 pgd_t *mmu_idmap;
 
-static bool mmu_on;
+static cpumask_t mmu_enabled_cpumask;
 bool mmu_enabled(void)
 {
-   return mmu_on;
+   struct thread_info *ti = current_thread_info();
+   return cpumask_test_cpu(ti->cpu, &mmu_enabled_cpumask);
+}
+
+void mmu_set_enabled(void)
+{
+   struct thread_info *ti = current_thread_info();
+   cpumask_set_cpu(ti->cpu, &mmu_enabled_cpumask);
 }
 
 extern void asm_mmu_enable(phys_addr_t pgtable);
@@ -23,7 +32,7 @@ void mmu_enable(pgd_t *pgtable)
 {
asm_mmu_enable(__pa(pgtable));
flush_tlb_all();
-   mmu_on = true;
+   mmu_set_enabled();
 }
 
 void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 12/18] arm64: implement spinlocks

2015-02-01 Thread Andrew Jones
We put this off, as it wasn't necessary without smp. Now it
is. Only need to do this for arm64, as we've already done it
already for arm.

Signed-off-by: Andrew Jones 
---
 config/config-arm64.mak  |  1 +
 lib/arm64/asm/spinlock.h |  8 ++--
 lib/arm64/spinlock.c | 43 +++
 3 files changed, 46 insertions(+), 6 deletions(-)
 create mode 100644 lib/arm64/spinlock.c

diff --git a/config/config-arm64.mak b/config/config-arm64.mak
index 5f8550eb511e8..d61b703c8140e 100644
--- a/config/config-arm64.mak
+++ b/config/config-arm64.mak
@@ -9,6 +9,7 @@ kernel_offset = 0x8
 
 cstart.o = $(TEST_DIR)/cstart64.o
 cflatobjs += lib/arm64/processor.o
+cflatobjs += lib/arm64/spinlock.o
 
 # arm64 specific tests
 tests =
diff --git a/lib/arm64/asm/spinlock.h b/lib/arm64/asm/spinlock.h
index 36b7b44fa4edf..43b2634b46459 100644
--- a/lib/arm64/asm/spinlock.h
+++ b/lib/arm64/asm/spinlock.h
@@ -5,11 +5,7 @@ struct spinlock {
int v;
 };
 
-static inline void spin_lock(struct spinlock *lock __unused)
-{
-}
-static inline void spin_unlock(struct spinlock *lock __unused)
-{
-}
+extern void spin_lock(struct spinlock *lock);
+extern void spin_unlock(struct spinlock *lock);
 
 #endif /* _ASMARM64_SPINLOCK_H_ */
diff --git a/lib/arm64/spinlock.c b/lib/arm64/spinlock.c
new file mode 100644
index 0..68b68b75ba60d
--- /dev/null
+++ b/lib/arm64/spinlock.c
@@ -0,0 +1,43 @@
+/*
+ * spinlocks
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones 
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include 
+#include 
+#include 
+
+void spin_lock(struct spinlock *lock)
+{
+   u32 val, fail;
+
+   smp_mb();
+
+   if (!mmu_enabled()) {
+   lock->v = 1;
+   return;
+   }
+
+   do {
+   asm volatile(
+   "1: ldaxr   %w0, [%2]\n"
+   "   cbnz%w0, 1b\n"
+   "   mov %0, #1\n"
+   "   stxr%w1, %w0, [%2]\n"
+   : "=&r" (val), "=&r" (fail)
+   : "r" (&lock->v)
+   : "cc" );
+   } while (fail);
+   smp_mb();
+}
+
+void spin_unlock(struct spinlock *lock)
+{
+   if (mmu_enabled())
+   asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
+   else
+   lock->v = 0;
+   smp_mb();
+}
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 17/18] arm/arm64: add smp_boot_secondary

2015-02-01 Thread Andrew Jones
Add a common entry point, present/online cpu masks, and
smp_boot_secondary() to support booting secondary cpus.
Adds a bit more PSCI API that we need too. We also
adjust THREAD_START_SP for arm to make some room for
exception stacks.

Signed-off-by: Andrew Jones 
---
 arm/cstart.S | 32 +
 arm/cstart64.S   | 25 +++
 config/config-arm-common.mak |  1 +
 lib/arm/asm-offsets.c|  3 +++
 lib/arm/asm/psci.h   |  2 ++
 lib/arm/asm/smp.h| 56 +++
 lib/arm/asm/thread_info.h| 11 +
 lib/arm/psci.c   | 19 +++
 lib/arm/setup.c  |  8 +--
 lib/arm/smp.c| 57 
 lib/arm64/asm-offsets.c  |  2 ++
 lib/arm64/asm/psci.h |  2 ++
 lib/arm64/asm/smp.h  |  1 +
 13 files changed, 212 insertions(+), 7 deletions(-)
 create mode 100644 lib/arm/asm/smp.h
 create mode 100644 lib/arm/smp.c
 create mode 100644 lib/arm64/asm/smp.h

diff --git a/arm/cstart.S b/arm/cstart.S
index 08a0b3ecc61f6..574802670ee17 100644
--- a/arm/cstart.S
+++ b/arm/cstart.S
@@ -32,6 +32,7 @@ start:
push{r0-r1}
 
/* set up vector table and mode stacks */
+   ldr r0, =exception_stacks
bl  exceptions_init
 
/* complete setup */
@@ -62,13 +63,12 @@ exceptions_init:
mcr p15, 0, r2, c12, c0, 0  @ write VBAR
 
mrs r2, cpsr
-   ldr r1, =exception_stacks
 
/* first frame reserved for svc mode */
-   set_mode_stack  UND_MODE, r1
-   set_mode_stack  ABT_MODE, r1
-   set_mode_stack  IRQ_MODE, r1
-   set_mode_stack  FIQ_MODE, r1
+   set_mode_stack  UND_MODE, r0
+   set_mode_stack  ABT_MODE, r0
+   set_mode_stack  IRQ_MODE, r0
+   set_mode_stack  FIQ_MODE, r0
 
msr cpsr_cxsf, r2   @ back to svc mode
isb
@@ -76,6 +76,28 @@ exceptions_init:
 
 .text
 
+.global secondary_entry
+secondary_entry:
+   /* set up vector table and mode stacks */
+   ldr r4, =secondary_data
+   ldr r0, [r4, #SECONDARY_DATA_ESTACKS]
+   bl  exceptions_init
+
+   /* enable the MMU */
+   mov r1, #0
+   ldr r0, =mmu_idmap
+   ldr r0, [r0]
+   bl  asm_mmu_enable
+
+   /* set the stack */
+   ldr sp, [r4, #SECONDARY_DATA_STACK]
+
+   /* finish init in C code */
+   bl  secondary_cinit
+
+   /* r0 is now the entry function, run it */
+   mov pc, r0
+
 .globl halt
 halt:
 1: wfi
diff --git a/arm/cstart64.S b/arm/cstart64.S
index 58e4040cfb40f..b4d7f1939793b 100644
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -55,6 +55,31 @@ exceptions_init:
 
 .text
 
+.globl secondary_entry
+secondary_entry:
+   /* Enable FP/ASIMD */
+   mov x0, #(3 << 20)
+   msr cpacr_el1, x0
+
+   /* set up exception handling */
+   bl  exceptions_init
+
+   /* enable the MMU */
+   adr x0, mmu_idmap
+   ldr x0, [x0]
+   bl  asm_mmu_enable
+
+   /* set the stack */
+   adr x1, secondary_data
+   ldr x0, [x1, #SECONDARY_DATA_STACK]
+   mov sp, x0
+
+   /* finish init in C code */
+   bl  secondary_cinit
+
+   /* x0 is now the entry function, run it */
+   br  x0
+
 .globl halt
 halt:
 1: wfi
diff --git a/config/config-arm-common.mak b/config/config-arm-common.mak
index 13f5338a35a02..314261ef60cf7 100644
--- a/config/config-arm-common.mak
+++ b/config/config-arm-common.mak
@@ -36,6 +36,7 @@ cflatobjs += lib/arm/setup.o
 cflatobjs += lib/arm/mmu.o
 cflatobjs += lib/arm/bitops.o
 cflatobjs += lib/arm/psci.o
+cflatobjs += lib/arm/smp.o
 
 libeabi = lib/arm/libeabi.a
 eabiobjs = lib/arm/eabi_compat.o
diff --git a/lib/arm/asm-offsets.c b/lib/arm/asm-offsets.c
index 1ee9da070f609..b0932c62703b8 100644
--- a/lib/arm/asm-offsets.c
+++ b/lib/arm/asm-offsets.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include 
 
 int main(void)
 {
@@ -30,5 +31,7 @@ int main(void)
OFFSET(S_PSR, pt_regs, ARM_cpsr);
OFFSET(S_OLD_R0, pt_regs, ARM_ORIG_r0);
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+   OFFSET(SECONDARY_DATA_STACK, secondary_data, stack);
+   OFFSET(SECONDARY_DATA_ESTACKS, secondary_data, exception_stacks);
return 0;
 }
diff --git a/lib/arm/asm/psci.h b/lib/arm/asm/psci.h
index e2e66b47de480..c5fe78184b5ac 100644
--- a/lib/arm/asm/psci.h
+++ b/lib/arm/asm/psci.h
@@ -9,5 +9,7 @@
 extern int psci_invoke(u32 function_id, u32 arg0, u32 arg1, u32 arg2);
 extern int psci_cpu_on(unsigned long cpuid, unsigned long entry_point);
 extern void psci_sys_reset(void);
+extern int cpu_psci_cpu_boot(unsigned int cpu);
+extern void cpu_psci_cpu_die(unsigned int cpu);
 
 #endif /* _ASMARM_PSCI_H_ */
diff --git a/lib/arm/asm/smp.h b/lib/arm/asm/smp.h
new file mode 100644
index 0..46c05a3f67

[PATCH 04/18] arm64: fixup: use id_aa64mmfr0_el1 to set tcr

2015-02-01 Thread Andrew Jones
We shouldn't assume we can set tcr_el1.ips to 42 bits. Set
it based on what we read from id_aa64mmfr0_el1. Didn't see
a problem, but might as well be correct.

Signed-off-by: Andrew Jones 
---
 arm/cstart64.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arm/cstart64.S b/arm/cstart64.S
index 5151f4c77d745..9047e7ef14646 100644
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -92,7 +92,7 @@ asm_mmu_enable:
 TCR_TG0_64K | TCR_TG1_64K |\
 TCR_IRGN_WBWA | TCR_ORGN_WBWA |\
 TCR_SHARED
-   mov x2, #3  // 011 is 42 bits
+   mrs x2, id_aa64mmfr0_el1
bfi x1, x2, #32, #3
msr tcr_el1, x1
 
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 18/18] arm/arm64: Add smp selftest

2015-02-01 Thread Andrew Jones
Signed-off-by: Andrew Jones 
---
 arm/selftest.c| 60 +++
 arm/unittests.cfg | 11 --
 2 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/arm/selftest.c b/arm/selftest.c
index d77495747b08a..fc9ec609d875e 100644
--- a/arm/selftest.c
+++ b/arm/selftest.c
@@ -7,11 +7,16 @@
  */
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
 #include 
+#include 
+#include 
+#include 
+#include 
 
 static void assert_args(int num_args, int needed_args)
 {
@@ -297,6 +302,45 @@ static void check_vectors(void *arg __unused)
exit(report_summary());
 }
 
+static bool psci_check(void)
+{
+   const struct fdt_property *method;
+   int node, len, ver;
+
+   node = fdt_node_offset_by_compatible(dt_fdt(), -1, "arm,psci-0.2");
+   if (node < 0) {
+   printf("PSCI v0.2 compatibility required\n");
+   return false;
+   }
+
+   method = fdt_get_property(dt_fdt(), node, "method", &len);
+   if (method == NULL) {
+   printf("bad psci device tree node\n");
+   return false;
+   }
+
+   if (len < 4 || strcmp(method->data, "hvc") != 0) {
+   printf("psci method must be hvc\n");
+   return false;
+   }
+
+   ver = psci_invoke(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+   printf("PSCI version %d.%d\n", PSCI_VERSION_MAJOR(ver),
+  PSCI_VERSION_MINOR(ver));
+
+   return true;
+}
+
+static cpumask_t smp_reported;
+static void cpu_report(void)
+{
+   int cpu = smp_processor_id();
+
+   report("CPU%d online", true, cpu);
+   cpumask_set_cpu(cpu, &smp_reported);
+   halt();
+}
+
 int main(int argc, char **argv)
 {
report_prefix_push("selftest");
@@ -316,6 +360,22 @@ int main(int argc, char **argv)
void *sp = memalign(THREAD_SIZE, THREAD_SIZE);
start_usr(check_vectors, NULL,
(unsigned long)sp + THREAD_START_SP);
+
+   } else if (strcmp(argv[0], "smp") == 0) {
+
+   int cpu;
+
+   report("PSCI version", psci_check());
+
+   for_each_present_cpu(cpu) {
+   if (cpu == 0)
+   continue;
+   smp_boot_secondary(cpu, cpu_report);
+   }
+
+   cpumask_set_cpu(0, &smp_reported);
+   while (!cpumask_full(&smp_reported))
+   cpu_relax();
}
 
return report_summary();
diff --git a/arm/unittests.cfg b/arm/unittests.cfg
index efcca6bf24af6..ee655b2678a4e 100644
--- a/arm/unittests.cfg
+++ b/arm/unittests.cfg
@@ -13,8 +13,8 @@
 #
 [selftest::setup]
 file = selftest.flat
-smp  = 1
-extra_params = -m 256 -append 'setup smp=1 mem=256'
+smp = 2
+extra_params = -m 256 -append 'setup smp=2 mem=256'
 groups = selftest
 
 # Test vector setup and exception handling (kernel mode).
@@ -28,3 +28,10 @@ groups = selftest
 file = selftest.flat
 extra_params = -append 'vectors-user'
 groups = selftest
+
+# Test SMP support
+[selftest::smp]
+file = selftest.flat
+smp = $(getconf _NPROCESSORS_CONF)
+extra_params = -append 'smp'
+groups = selftest
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 08/18] arm/arm64: add per thread user_mode flag

2015-02-01 Thread Andrew Jones
While current_mode() == USR_MODE works on armv7 from PL0 to check
if we're in user mode, current_mode() would require reading a
privileged register on armv8. To work around this, on arm64 we
introduced a 'user_mode' variable. This variable needs to be per
thread now. Rather than starting to pollute thread_info with a
bunch of bools, create a flags field and a TIF_USER_MODE flag to
replace it. Use it on armv7 too for consistency. Also, now that
we need to create a thread_info initializer, add mpidr utilities
for setting thread_info->cpu.

Signed-off-by: Andrew Jones 
---
 arm/cstart64.S| 16 +---
 arm/selftest.c|  2 +-
 lib/arm/asm/processor.h   | 11 +++
 lib/arm/asm/thread_info.h | 13 +++--
 lib/arm/processor.c   | 14 ++
 lib/arm/setup.c   |  3 +++
 lib/arm64/asm/processor.h | 14 +-
 lib/arm64/processor.c | 15 +--
 8 files changed, 67 insertions(+), 21 deletions(-)

diff --git a/arm/cstart64.S b/arm/cstart64.S
index 2fe15eb1d3972..58e4040cfb40f 100644
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -156,13 +156,7 @@ asm_mmu_enable:
mrs x2, spsr_el1
stp x1, x2, [sp, #S_PC]
 
-   and x2, x2, #PSR_MODE_MASK
-   cmp x2, #PSR_MODE_EL0t
-   b.ne1f
-   adr x2, user_mode
-   str xzr, [x2]   /* we're in kernel mode now */
-
-1: mov x0, \vec
+   mov x0, \vec
mov x1, sp
mrs x2, esr_el1
bl  do_handle_exception
@@ -171,14 +165,6 @@ asm_mmu_enable:
msr spsr_el1, x2
msr elr_el1, x1
 
-   and x2, x2, #PSR_MODE_MASK
-   cmp x2, #PSR_MODE_EL0t
-   b.ne1f
-   adr x2, user_mode
-   mov x1, #1
-   str x1, [x2]/* we're going back to user mode */
-
-1:
.if \vec >= 8
ldr x1, [sp, #S_SP]
msr sp_el0, x1
diff --git a/arm/selftest.c b/arm/selftest.c
index 05ca7efe95f83..d77495747b08a 100644
--- a/arm/selftest.c
+++ b/arm/selftest.c
@@ -240,7 +240,7 @@ static enum vector check_vector_prep(void)
 {
unsigned long daif;
 
-   if (user_mode)
+   if (is_user())
return EL0_SYNC_64;
 
asm volatile("mrs %0, daif" : "=r" (daif) ::);
diff --git a/lib/arm/asm/processor.h b/lib/arm/asm/processor.h
index 9c37db66640e8..f25e7eee3666c 100644
--- a/lib/arm/asm/processor.h
+++ b/lib/arm/asm/processor.h
@@ -33,6 +33,17 @@ static inline unsigned long current_cpsr(void)
 
 #define current_mode() (current_cpsr() & MODE_MASK)
 
+static inline unsigned int get_mpidr(void)
+{
+   unsigned int mpidr;
+   asm volatile("mrc p15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+   return mpidr;
+}
+
+/* Only support Aff0 for now, up to 4 cpus */
+#define mpidr_to_cpu(mpidr) ((int)((mpidr) & 0xff))
+
 extern void start_usr(void (*func)(void *arg), void *arg, unsigned long 
sp_usr);
+extern bool is_user(void);
 
 #endif /* _ASMARM_PROCESSOR_H_ */
diff --git a/lib/arm/asm/thread_info.h b/lib/arm/asm/thread_info.h
index ea86f142a7d93..17997e21d1274 100644
--- a/lib/arm/asm/thread_info.h
+++ b/lib/arm/asm/thread_info.h
@@ -11,17 +11,26 @@
 #define THREAD_SIZE16384
 #define THREAD_START_SP(THREAD_SIZE - 16)
 
+#define TIF_USER_MODE  (1U << 0)
+
 struct thread_info {
int cpu;
+   unsigned int flags;
char ext[0];/* allow unit tests to add extended info */
 };
 
+static inline struct thread_info *thread_info_sp(unsigned long sp)
+{
+   return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
 register unsigned long current_stack_pointer asm("sp");
 
 static inline struct thread_info *current_thread_info(void)
 {
-   return (struct thread_info *)
-   (current_stack_pointer & ~(THREAD_SIZE - 1));
+   return thread_info_sp(current_stack_pointer);
 }
 
+extern void thread_info_init(struct thread_info *ti, unsigned int flags);
+
 #endif /* _ASMARM_THREAD_INFO_H_ */
diff --git a/lib/arm/processor.c b/lib/arm/processor.c
index d2fd597fcd139..8a514a29c063b 100644
--- a/lib/arm/processor.c
+++ b/lib/arm/processor.c
@@ -100,10 +100,19 @@ void do_handle_exception(enum vector v, struct pt_regs 
*regs)
abort();
 }
 
+void thread_info_init(struct thread_info *ti, unsigned int flags)
+{
+   memset(ti, 0, sizeof(struct thread_info));
+   ti->cpu = mpidr_to_cpu(get_mpidr());
+   ti->flags = flags;
+}
+
 void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr)
 {
sp_usr &= (~7UL); /* stack ptr needs 8-byte alignment */
 
+   thread_info_init(thread_info_sp(sp_usr), TIF_USER_MODE);
+
asm volatile(
"mrsr0, cpsr\n"
"bicr0, #" xstr(MODE_MASK) "\n"
@@ -115,3 +124,8 @@ void start_usr(void (*func)(void *arg), void *arg, unsigned 
long sp_usr)
"movpc, %2\n"
:: "r" (arg), "r" (sp_usr), "r" (func) : "r0

[PATCH 07/18] arm/arm64: introduce thread_info

2015-02-01 Thread Andrew Jones
For smp we need a way to maintain thread local state. The bottom
of the thread stack is a good place, and is where Linux puts it.
So we just steal the concept of the thread_info structure that
lives at the bottom of the stack in Linux, and introduce it to
kvm-unit-tests/arm[64]. For starters we just have cpu index for
state, and that's implicitly initialized to zero for CPU0 already.
So, as we don't have secondary cpus yet, there's not much to do.

Additionally, sneak a small fixup in to the initial stack setup
for arm64. We were assuming that spsel is EL1 after reset, which
has been true so far, but let's not assume.

Signed-off-by: Andrew Jones 
---
 arm/cstart.S|  2 +-
 arm/cstart64.S  |  5 -
 arm/flat.lds|  6 ++
 arm/selftest.c  |  8 
 lib/arm/asm/thread_info.h   | 27 +++
 lib/arm64/asm/thread_info.h |  1 +
 6 files changed, 43 insertions(+), 6 deletions(-)
 create mode 100644 lib/arm/asm/thread_info.h
 create mode 100644 lib/arm64/asm/thread_info.h

diff --git a/arm/cstart.S b/arm/cstart.S
index 39fac8f1e1bd8..fd02aaab268d7 100644
--- a/arm/cstart.S
+++ b/arm/cstart.S
@@ -27,7 +27,7 @@ start:
 * put the dtb in r0. This allows setup to be consistent
 * with arm64.
 */
-   ldr sp, =stacktop
+   ldr sp, =stackptr
mov r0, r2
push{r0-r1}
 
diff --git a/arm/cstart64.S b/arm/cstart64.S
index 9047e7ef14646..2fe15eb1d3972 100644
--- a/arm/cstart64.S
+++ b/arm/cstart64.S
@@ -21,7 +21,10 @@ start:
 * The physical address of the dtb is in x0, x1-x3 are reserved
 * See the kernel doc Documentation/arm64/booting.txt
 */
-   adr x4, stacktop
+   mov x4, #1
+   msr spsel, x4
+   isb
+   adr x4, stackptr
mov sp, x4
stp x0, x1, [sp, #-16]!
 
diff --git a/arm/flat.lds b/arm/flat.lds
index a8849ee0939a8..df80d3678e556 100644
--- a/arm/flat.lds
+++ b/arm/flat.lds
@@ -18,6 +18,12 @@ SECTIONS
 edata = .;
 . += 64K;
 . = ALIGN(64K);
+/*
+ * stack depth is ~16K, see THREAD_SIZE
+ * sp must be 16 byte aligned for arm64, and 8 byte aligned for arm
+ * sp must always be strictly less than the true stacktop
+ */
+stackptr = . - 16;
 stacktop = .;
 }
 
diff --git a/arm/selftest.c b/arm/selftest.c
index de816f8142c54..05ca7efe95f83 100644
--- a/arm/selftest.c
+++ b/arm/selftest.c
@@ -11,7 +11,7 @@
 #include 
 #include 
 #include 
-#include 
+#include 
 
 static void assert_args(int num_args, int needed_args)
 {
@@ -313,9 +313,9 @@ int main(int argc, char **argv)
 
} else if (strcmp(argv[0], "vectors-user") == 0) {
 
-   void *sp = memalign(PAGE_SIZE, PAGE_SIZE);
-   memset(sp, 0, PAGE_SIZE);
-   start_usr(check_vectors, NULL, (unsigned long)sp + PAGE_SIZE);
+   void *sp = memalign(THREAD_SIZE, THREAD_SIZE);
+   start_usr(check_vectors, NULL,
+   (unsigned long)sp + THREAD_START_SP);
}
 
return report_summary();
diff --git a/lib/arm/asm/thread_info.h b/lib/arm/asm/thread_info.h
new file mode 100644
index 0..ea86f142a7d93
--- /dev/null
+++ b/lib/arm/asm/thread_info.h
@@ -0,0 +1,27 @@
+#ifndef _ASMARM_THREAD_INFO_H_
+#define _ASMARM_THREAD_INFO_H_
+/*
+ * Adapted from arch/arm64/include/asm/thread_info.h
+ *
+ * Copyright (C) 2015, Red Hat Inc, Andrew Jones 
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+
+#define THREAD_SIZE16384
+#define THREAD_START_SP(THREAD_SIZE - 16)
+
+struct thread_info {
+   int cpu;
+   char ext[0];/* allow unit tests to add extended info */
+};
+
+register unsigned long current_stack_pointer asm("sp");
+
+static inline struct thread_info *current_thread_info(void)
+{
+   return (struct thread_info *)
+   (current_stack_pointer & ~(THREAD_SIZE - 1));
+}
+
+#endif /* _ASMARM_THREAD_INFO_H_ */
diff --git a/lib/arm64/asm/thread_info.h b/lib/arm64/asm/thread_info.h
new file mode 100644
index 0..b01fa8f34b3bf
--- /dev/null
+++ b/lib/arm64/asm/thread_info.h
@@ -0,0 +1 @@
+#include "../../arm/asm/thread_info.h"
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 16/18] arm: clarify comment about exception stack use

2015-02-01 Thread Andrew Jones
Each mode has its own stack, but we only use it as a base pointer
to a private memory region. That region, which has a size of
sizeof(struct pt_regs), is used to store some registers during the
switch to svc mode - which will use the normal svc stack. The switch
to svc mode is done because we handle all exceptions in svc.

Signed-off-by: Andrew Jones 
---
 arm/cstart.S | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arm/cstart.S b/arm/cstart.S
index fd02aaab268d7..08a0b3ecc61f6 100644
--- a/arm/cstart.S
+++ b/arm/cstart.S
@@ -121,8 +121,9 @@ asm_mmu_enable:
  * Simplified version of the Linux kernel implementation
  *   arch/arm/kernel/entry-armv.S
  *
- * Each mode has an S_FRAME_SIZE sized stack initialized
- * in exceptions_init
+ * Each mode has an S_FRAME_SIZE sized memory region,
+ * and the mode's stack pointer has been initialized
+ * to the base of that region in exceptions_init.
  */
 .macro vector_stub, name, vec, mode, correction=0
 .align 5
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 13/18] arm/arm64: import include/uapi/linux/psci.h

2015-02-01 Thread Andrew Jones
Signed-off-by: Andrew Jones 
---
 lib/arm/asm/uapi-psci.h   | 73 +++
 lib/arm64/asm/uapi-psci.h |  1 +
 2 files changed, 74 insertions(+)
 create mode 100644 lib/arm/asm/uapi-psci.h
 create mode 100644 lib/arm64/asm/uapi-psci.h

diff --git a/lib/arm/asm/uapi-psci.h b/lib/arm/asm/uapi-psci.h
new file mode 100644
index 0..5c6fada2b5105
--- /dev/null
+++ b/lib/arm/asm/uapi-psci.h
@@ -0,0 +1,73 @@
+#ifndef _ASMARM_UAPI_PSCI_H_
+#define _ASMARM_UAPI_PSCI_H_
+/*
+ * From include/uapi/linux/psci.h
+ */
+
+/* PSCI v0.2 interface */
+#define PSCI_0_2_FN_BASE   0x8400
+#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
+#define PSCI_0_2_64BIT 0x4000
+#define PSCI_0_2_FN64_BASE \
+   (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
+#define PSCI_0_2_FN64(n)   (PSCI_0_2_FN64_BASE + (n))
+
+#define PSCI_0_2_FN_PSCI_VERSION   PSCI_0_2_FN(0)
+#define PSCI_0_2_FN_CPU_SUSPENDPSCI_0_2_FN(1)
+#define PSCI_0_2_FN_CPU_OFFPSCI_0_2_FN(2)
+#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
+#define PSCI_0_2_FN_AFFINITY_INFO  PSCI_0_2_FN(4)
+#define PSCI_0_2_FN_MIGRATEPSCI_0_2_FN(5)
+#define PSCI_0_2_FN_MIGRATE_INFO_TYPE  PSCI_0_2_FN(6)
+#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPUPSCI_0_2_FN(7)
+#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
+#define PSCI_0_2_FN_SYSTEM_RESET   PSCI_0_2_FN(9)
+
+#define PSCI_0_2_FN64_CPU_SUSPEND  PSCI_0_2_FN64(1)
+#define PSCI_0_2_FN64_CPU_ON   PSCI_0_2_FN64(3)
+#define PSCI_0_2_FN64_AFFINITY_INFOPSCI_0_2_FN64(4)
+#define PSCI_0_2_FN64_MIGRATE  PSCI_0_2_FN64(5)
+#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU  PSCI_0_2_FN64(7)
+
+/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
+#define PSCI_0_2_POWER_STATE_ID_MASK   0x
+#define PSCI_0_2_POWER_STATE_ID_SHIFT  0
+#define PSCI_0_2_POWER_STATE_TYPE_SHIFT16
+#define PSCI_0_2_POWER_STATE_TYPE_MASK \
+   (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+#define PSCI_0_2_POWER_STATE_AFFL_SHIFT24
+#define PSCI_0_2_POWER_STATE_AFFL_MASK \
+   (0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+
+/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
+#define PSCI_0_2_AFFINITY_LEVEL_ON 0
+#define PSCI_0_2_AFFINITY_LEVEL_OFF1
+#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2
+
+/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
+#define PSCI_0_2_TOS_UP_MIGRATE0
+#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
+#define PSCI_0_2_TOS_MP2
+
+/* PSCI version decoding (independent of PSCI version) */
+#define PSCI_VERSION_MAJOR_SHIFT   16
+#define PSCI_VERSION_MINOR_MASK\
+   ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
+#define PSCI_VERSION_MAJOR_MASK~PSCI_VERSION_MINOR_MASK
+#define PSCI_VERSION_MAJOR(ver)\
+   (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
+#define PSCI_VERSION_MINOR(ver)\
+   ((ver) & PSCI_VERSION_MINOR_MASK)
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define PSCI_RET_SUCCESS   0
+#define PSCI_RET_NOT_SUPPORTED -1
+#define PSCI_RET_INVALID_PARAMS-2
+#define PSCI_RET_DENIED-3
+#define PSCI_RET_ALREADY_ON-4
+#define PSCI_RET_ON_PENDING-5
+#define PSCI_RET_INTERNAL_FAILURE  -6
+#define PSCI_RET_NOT_PRESENT   -7
+#define PSCI_RET_DISABLED  -8
+
+#endif /* _ASMARM_UAPI_PSCI_H_ */
diff --git a/lib/arm64/asm/uapi-psci.h b/lib/arm64/asm/uapi-psci.h
new file mode 100644
index 0..83d018f954e4c
--- /dev/null
+++ b/lib/arm64/asm/uapi-psci.h
@@ -0,0 +1 @@
+#include "../../arm/asm/uapi-psci.h"
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 02/18] lib/report: guard access to counters

2015-02-01 Thread Andrew Jones
Use a lock to avoid exposing the counters, and any other
global data, to potential races.

Signed-off-by: Andrew Jones 
---
 lib/report.c | 16 
 1 file changed, 16 insertions(+)

diff --git a/lib/report.c b/lib/report.c
index dc30250c676d3..35e664108a921 100644
--- a/lib/report.c
+++ b/lib/report.c
@@ -11,20 +11,26 @@
  */
 
 #include "libcflat.h"
+#include "asm/spinlock.h"
 
 static unsigned int tests, failures, xfailures;
 static char prefixes[256];
+static struct spinlock lock;
 
 void report_prefix_push(const char *prefix)
 {
+   spin_lock(&lock);
strcat(prefixes, prefix);
strcat(prefixes, ": ");
+   spin_unlock(&lock);
 }
 
 void report_prefix_pop(void)
 {
char *p, *q;
 
+   spin_lock(&lock);
+
if (!*prefixes)
return;
 
@@ -33,6 +39,8 @@ void report_prefix_pop(void)
p = q, q = strstr(p, ": ") + 2)
;
*p = '\0';
+
+   spin_unlock(&lock);
 }
 
 void va_report_xfail(const char *msg_fmt, bool xfail, bool cond, va_list va)
@@ -41,6 +49,8 @@ void va_report_xfail(const char *msg_fmt, bool xfail, bool 
cond, va_list va)
char *fail = xfail ? "XFAIL" : "FAIL";
char buf[2000];
 
+   spin_lock(&lock);
+
tests++;
printf("%s: ", cond ? pass : fail);
puts(prefixes);
@@ -53,6 +63,8 @@ void va_report_xfail(const char *msg_fmt, bool xfail, bool 
cond, va_list va)
xfailures++;
else if (!cond)
failures++;
+
+   spin_unlock(&lock);
 }
 
 void report(const char *msg_fmt, bool pass, ...)
@@ -73,10 +85,14 @@ void report_xfail(const char *msg_fmt, bool xfail, bool 
pass, ...)
 
 int report_summary(void)
 {
+   spin_lock(&lock);
+
printf("\nSUMMARY: %d tests, %d unexpected failures", tests, failures);
if (xfailures)
printf(", %d expected failures\n", xfailures);
else
printf("\n");
return failures > 0 ? 1 : 0;
+
+   spin_unlock(&lock);
 }
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 15/18] arm/arm64: add cpu_relax() and friends

2015-02-01 Thread Andrew Jones
Signed-off-by: Andrew Jones 
---
 lib/arm/asm/barrier.h   | 5 +
 lib/arm64/asm/barrier.h | 5 +
 2 files changed, 10 insertions(+)

diff --git a/lib/arm/asm/barrier.h b/lib/arm/asm/barrier.h
index acaeab5123431..394a4a2da26f1 100644
--- a/lib/arm/asm/barrier.h
+++ b/lib/arm/asm/barrier.h
@@ -4,6 +4,11 @@
  * Adapted form arch/arm/include/asm/barrier.h
  */
 
+#define sev()  asm volatile("sev" : : : "memory")
+#define wfe()  asm volatile("wfe" : : : "memory")
+#define wfi()  asm volatile("wfi" : : : "memory")
+#define cpu_relax()asm volatile("": : : "memory")
+
 #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
 #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
diff --git a/lib/arm64/asm/barrier.h b/lib/arm64/asm/barrier.h
index 8ebdfdf7f1037..dbdac9d339c7e 100644
--- a/lib/arm64/asm/barrier.h
+++ b/lib/arm64/asm/barrier.h
@@ -4,6 +4,11 @@
  * From Linux arch/arm64/include/asm/barrier.h
  */
 
+#define sev()  asm volatile("sev" : : : "memory")
+#define wfe()  asm volatile("wfe" : : : "memory")
+#define wfi()  asm volatile("wfi" : : : "memory")
+#define cpu_relax()asm volatile("": : : "memory")
+
 #define isb()  asm volatile("isb" : : : "memory")
 #define dmb(opt)   asm volatile("dmb " #opt : : : "memory")
 #define dsb(opt)   asm volatile("dsb " #opt : : : "memory")
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 05/18] arm/arm64: processor.[ch] cleanups

2015-02-01 Thread Andrew Jones
Add 'const' to a few global arrays that are constant. Also,
no need for default_vector_handler to be static. unittests
may want to reset vector handlers to it.

Signed-off-by: Andrew Jones 
---
 lib/arm/processor.c   | 2 +-
 lib/arm64/asm/processor.h | 2 ++
 lib/arm64/processor.c | 8 
 3 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/lib/arm/processor.c b/lib/arm/processor.c
index f8bd94cbb8bc6..5ad999c6b378b 100644
--- a/lib/arm/processor.c
+++ b/lib/arm/processor.c
@@ -20,7 +20,7 @@ static const char *processor_modes[] = {
"UK12_32", "UK13_32", "UK14_32", "SYS_32"
 };
 
-static char *vector_names[] = {
+static const char *vector_names[] = {
"rst", "und", "svc", "pabt", "dabt", "addrexcptn", "irq", "fiq"
 };
 
diff --git a/lib/arm64/asm/processor.h b/lib/arm64/asm/processor.h
index f73ffb5e4bc95..a33f70afb3a3e 100644
--- a/lib/arm64/asm/processor.h
+++ b/lib/arm64/asm/processor.h
@@ -47,6 +47,8 @@ typedef void (*exception_fn)(struct pt_regs *regs, unsigned 
int esr);
 extern void install_vector_handler(enum vector v, vector_fn fn);
 extern void install_exception_handler(enum vector v, unsigned int ec,
  exception_fn fn);
+extern void default_vector_handler(enum vector v, struct pt_regs *regs,
+  unsigned int esr);
 
 extern void show_regs(struct pt_regs *regs);
 extern void *get_sp(void);
diff --git a/lib/arm64/processor.c b/lib/arm64/processor.c
index 7dc0b2e026134..efc78353d7ec0 100644
--- a/lib/arm64/processor.c
+++ b/lib/arm64/processor.c
@@ -10,7 +10,7 @@
 #include 
 #include 
 
-static char *vector_names[] = {
+static const char *vector_names[] = {
"el1t_sync",
"el1t_irq",
"el1t_fiq",
@@ -29,7 +29,7 @@ static char *vector_names[] = {
"el0_error_32",
 };
 
-static char *ec_names[EC_MAX] = {
+static const char *ec_names[EC_MAX] = {
[ESR_EL1_EC_UNKNOWN]= "UNKNOWN",
[ESR_EL1_EC_WFI]= "WFI",
[ESR_EL1_EC_CP15_32]= "CP15_32",
@@ -142,8 +142,8 @@ void install_exception_handler(enum vector v, unsigned int 
ec, exception_fn fn)
exception_handlers[v][ec] = fn;
 }
 
-static void default_vector_handler(enum vector v, struct pt_regs *regs,
-  unsigned int esr)
+void default_vector_handler(enum vector v, struct pt_regs *regs,
+   unsigned int esr)
 {
unsigned int ec = esr >> ESR_EL1_EC_SHIFT;
 
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[kvm-unit-tests PATCH 00/18] arm/arm64: add smp support

2015-02-01 Thread Andrew Jones
This series extends the kvm-unit-tests/arm[64] framework to support smp.
A break down of the patches is as follows

01-02: prepare general framework for smp use
03-06: arm/arm64 fixups not 100% related to this series,
   but need to post some time...
07-09: add thread_info (for per-thread data) and suck some global
   data into it
10-11: add cpumask support (for per-cpu data) and suck some more
   global data in
   12: add arm64 simple spinlock implementation
13-14: add some PSCI support
15-16: further prep for smp_boot_secondary
   17: finally add smp_boot_secondary
   18: as usual, add a selftest to make sure it all works

These patches are also available here:
https://github.com/rhdrjones/kvm-unit-tests/tree/arm/smp

Thanks in advance for reviews!


Andrew Jones (18):
  x86: expose spin_lock/unlock to lib code
  lib/report: guard access to counters
  arm: fixups: add barriers, actually set MAIR
  arm64: fixup: use id_aa64mmfr0_el1 to set tcr
  arm/arm64: processor.[ch] cleanups
  arm/arm64: get rid of get_sp()
  arm/arm64: introduce thread_info
  arm/arm64: add per thread user_mode flag
  arm/arm64: maintain per thread exception handlers
  arm/arm64: add simple cpumask API
  arm/arm64: make mmu_on per cpu
  arm64: implement spinlocks
  arm/arm64: import include/uapi/linux/psci.h
  arm/arm64: add some PSCI API
  arm/arm64: add cpu_relax() and friends
  arm: clarify comment about exception stack use
  arm/arm64: add smp_boot_secondary
  arm/arm64: Add smp selftest

 arm/cstart.S |  47 +
 arm/cstart64.S   |  48 +++---
 arm/flat.lds |   6 +++
 arm/selftest.c   |  70 +++--
 arm/unittests.cfg|  11 +++-
 config/config-arm-common.mak |   3 ++
 config/config-arm64.mak  |   1 +
 lib/arm/asm-offsets.c|   3 ++
 lib/arm/asm/barrier.h|   5 ++
 lib/arm/asm/bitops.h |  53 +++
 lib/arm/asm/cpumask.h| 118 +++
 lib/arm/asm/mmu-api.h|   1 +
 lib/arm/asm/processor.h  |  12 -
 lib/arm/asm/psci.h   |  15 ++
 lib/arm/asm/smp.h|  56 
 lib/arm/asm/thread_info.h|  60 ++
 lib/arm/asm/uapi-psci.h  |  73 ++
 lib/arm/bitops.c |  81 +
 lib/arm/mmu.c|  15 --
 lib/arm/processor.c  |  44 +++-
 lib/arm/psci.c   |  49 ++
 lib/arm/setup.c  |  11 +++-
 lib/arm/smp.c|  57 +
 lib/arm64/asm-offsets.c  |   2 +
 lib/arm64/asm/barrier.h  |   5 ++
 lib/arm64/asm/bitops.h   |  51 +++
 lib/arm64/asm/cpumask.h  |   1 +
 lib/arm64/asm/processor.h|  18 ++-
 lib/arm64/asm/psci.h |  15 ++
 lib/arm64/asm/smp.h  |   1 +
 lib/arm64/asm/spinlock.h |   8 +--
 lib/arm64/asm/thread_info.h  |   1 +
 lib/arm64/asm/uapi-psci.h|   1 +
 lib/arm64/processor.c|  81 +++--
 lib/arm64/spinlock.c |  43 
 lib/report.c |  16 ++
 lib/x86/asm/spinlock.h   |  11 
 lib/x86/smp.h|   7 +--
 38 files changed, 1008 insertions(+), 92 deletions(-)
 create mode 100644 lib/arm/asm/bitops.h
 create mode 100644 lib/arm/asm/cpumask.h
 create mode 100644 lib/arm/asm/psci.h
 create mode 100644 lib/arm/asm/smp.h
 create mode 100644 lib/arm/asm/thread_info.h
 create mode 100644 lib/arm/asm/uapi-psci.h
 create mode 100644 lib/arm/bitops.c
 create mode 100644 lib/arm/psci.c
 create mode 100644 lib/arm/smp.c
 create mode 100644 lib/arm64/asm/bitops.h
 create mode 100644 lib/arm64/asm/cpumask.h
 create mode 100644 lib/arm64/asm/psci.h
 create mode 100644 lib/arm64/asm/smp.h
 create mode 100644 lib/arm64/asm/thread_info.h
 create mode 100644 lib/arm64/asm/uapi-psci.h
 create mode 100644 lib/arm64/spinlock.c
 create mode 100644 lib/x86/asm/spinlock.h

-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 01/18] x86: expose spin_lock/unlock to lib code

2015-02-01 Thread Andrew Jones
Move the spin_lock/unlock declarations to lib/x86/asm/spinlock.h,
allowing lib code, e.g. lib/report.c, to use spinlocks.

Signed-off-by: Andrew Jones 
---
 lib/x86/asm/spinlock.h | 11 +++
 lib/x86/smp.h  |  7 +--
 2 files changed, 12 insertions(+), 6 deletions(-)
 create mode 100644 lib/x86/asm/spinlock.h

diff --git a/lib/x86/asm/spinlock.h b/lib/x86/asm/spinlock.h
new file mode 100644
index 0..4b0cb331c048b
--- /dev/null
+++ b/lib/x86/asm/spinlock.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+struct spinlock {
+int v;
+};
+
+void spin_lock(struct spinlock *lock);
+void spin_unlock(struct spinlock *lock);
+
+#endif
diff --git a/lib/x86/smp.h b/lib/x86/smp.h
index df5fdba9b9288..566018f49ba31 100644
--- a/lib/x86/smp.h
+++ b/lib/x86/smp.h
@@ -1,21 +1,16 @@
 #ifndef __SMP_H
 #define __SMP_H
+#include 
 
 #define mb()   asm volatile("mfence":::"memory")
 #define rmb()  asm volatile("lfence":::"memory")
 #define wmb()  asm volatile("sfence" ::: "memory")
 
-struct spinlock {
-int v;
-};
-
 void smp_init(void);
 
 int cpu_count(void);
 int smp_id(void);
 void on_cpu(int cpu, void (*function)(void *data), void *data);
 void on_cpu_async(int cpu, void (*function)(void *data), void *data);
-void spin_lock(struct spinlock *lock);
-void spin_unlock(struct spinlock *lock);
 
 #endif
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html