Re: [PATCH 1/5] crypto: sha256 - remove duplicate generic hash init function

2021-12-22 Thread Tianjia Zhang

Hi Julian,

On 12/23/21 6:35 AM, Julian Calaby wrote:

Hi Tianjia,

On Mon, Dec 20, 2021 at 8:25 PM Tianjia Zhang
 wrote:


crypto_sha256_init() and sha256_base_init() are the same repeated
implementations, remove the crypto_sha256_init() in generic
implementation, sha224 is the same process.

Signed-off-by: Tianjia Zhang 
---
  crypto/sha256_generic.c | 16 ++--
  1 file changed, 2 insertions(+), 14 deletions(-)

diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 3b377197236e..bf147b01e313 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -72,7 +60,7 @@ EXPORT_SYMBOL(crypto_sha256_finup);

  static struct shash_alg sha256_algs[2] = { {
 .digestsize =   SHA256_DIGEST_SIZE,
-   .init   =   crypto_sha256_init,
+   .init   =   sha256_base_init,
 .update =   crypto_sha256_update,
 .final  =   crypto_sha256_final,
 .finup  =   crypto_sha256_finup,
@@ -86,7 +74,7 @@ static struct shash_alg sha256_algs[2] = { {
 }
  }, {
 .digestsize =   SHA224_DIGEST_SIZE,
-   .init   =   crypto_sha224_init,
+   .init   =   sha224_base_init,
 .update =   crypto_sha256_update,
 .final  =   crypto_sha256_final,
 .finup  =   crypto_sha256_finup,


Aren't these two functions defined as static inline functions? It
appears that these crypto_ wrappers were added so there's "actual"
referenceable functions for these structs.

Did this actually compile?

Thanks,



Judging from the compilation results, there is really no difference, but 
the modification made by this patch is still necessary, because 
crypto_sha256_init() wrapper and sha256_base_init() are two completely 
duplicate functions.


Best regards,
Tianjia


[PATCH 5/5] crypto: s390/sha512 - Use macros instead of direct IV numbers

2021-12-20 Thread Tianjia Zhang
In the init functions of sha512 and sha384, the initial hash value
use macros instead of numbers.

Signed-off-by: Tianjia Zhang 
---
 arch/s390/crypto/sha512_s390.c | 32 
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 29a6bd404c59..43ce4956df73 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -22,14 +22,14 @@ static int sha512_init(struct shash_desc *desc)
 {
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
 
-   *(__u64 *)>state[0] = 0x6a09e667f3bcc908ULL;
-   *(__u64 *)>state[2] = 0xbb67ae8584caa73bULL;
-   *(__u64 *)>state[4] = 0x3c6ef372fe94f82bULL;
-   *(__u64 *)>state[6] = 0xa54ff53a5f1d36f1ULL;
-   *(__u64 *)>state[8] = 0x510e527fade682d1ULL;
-   *(__u64 *)>state[10] = 0x9b05688c2b3e6c1fULL;
-   *(__u64 *)>state[12] = 0x1f83d9abfb41bd6bULL;
-   *(__u64 *)>state[14] = 0x5be0cd19137e2179ULL;
+   *(__u64 *)>state[0] = SHA512_H0;
+   *(__u64 *)>state[2] = SHA512_H1;
+   *(__u64 *)>state[4] = SHA512_H2;
+   *(__u64 *)>state[6] = SHA512_H3;
+   *(__u64 *)>state[8] = SHA512_H4;
+   *(__u64 *)>state[10] = SHA512_H5;
+   *(__u64 *)>state[12] = SHA512_H6;
+   *(__u64 *)>state[14] = SHA512_H7;
ctx->count = 0;
ctx->func = CPACF_KIMD_SHA_512;
 
@@ -87,14 +87,14 @@ static int sha384_init(struct shash_desc *desc)
 {
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
 
-   *(__u64 *)>state[0] = 0xcbbb9d5dc1059ed8ULL;
-   *(__u64 *)>state[2] = 0x629a292a367cd507ULL;
-   *(__u64 *)>state[4] = 0x9159015a3070dd17ULL;
-   *(__u64 *)>state[6] = 0x152fecd8f70e5939ULL;
-   *(__u64 *)>state[8] = 0x67332667ffc00b31ULL;
-   *(__u64 *)>state[10] = 0x8eb44a8768581511ULL;
-   *(__u64 *)>state[12] = 0xdb0c2e0d64f98fa7ULL;
-   *(__u64 *)>state[14] = 0x47b5481dbefa4fa4ULL;
+   *(__u64 *)>state[0] = SHA384_H0;
+   *(__u64 *)>state[2] = SHA384_H1;
+   *(__u64 *)>state[4] = SHA384_H2;
+   *(__u64 *)>state[6] = SHA384_H3;
+   *(__u64 *)>state[8] = SHA384_H4;
+   *(__u64 *)>state[10] = SHA384_H5;
+   *(__u64 *)>state[12] = SHA384_H6;
+   *(__u64 *)>state[14] = SHA384_H7;
ctx->count = 0;
ctx->func = CPACF_KIMD_SHA_512;
 
-- 
2.32.0



[PATCH 1/5] crypto: sha256 - remove duplicate generic hash init function

2021-12-20 Thread Tianjia Zhang
crypto_sha256_init() and sha256_base_init() are the same repeated
implementations, remove the crypto_sha256_init() in generic
implementation, sha224 is the same process.

Signed-off-by: Tianjia Zhang 
---
 crypto/sha256_generic.c | 16 ++--
 1 file changed, 2 insertions(+), 14 deletions(-)

diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 3b377197236e..bf147b01e313 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -33,18 +33,6 @@ const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
 };
 EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
 
-static int crypto_sha256_init(struct shash_desc *desc)
-{
-   sha256_init(shash_desc_ctx(desc));
-   return 0;
-}
-
-static int crypto_sha224_init(struct shash_desc *desc)
-{
-   sha224_init(shash_desc_ctx(desc));
-   return 0;
-}
-
 int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
  unsigned int len)
 {
@@ -72,7 +60,7 @@ EXPORT_SYMBOL(crypto_sha256_finup);
 
 static struct shash_alg sha256_algs[2] = { {
.digestsize =   SHA256_DIGEST_SIZE,
-   .init   =   crypto_sha256_init,
+   .init   =   sha256_base_init,
.update =   crypto_sha256_update,
.final  =   crypto_sha256_final,
.finup  =   crypto_sha256_finup,
@@ -86,7 +74,7 @@ static struct shash_alg sha256_algs[2] = { {
}
 }, {
.digestsize =   SHA224_DIGEST_SIZE,
-   .init   =   crypto_sha224_init,
+   .init   =   sha224_base_init,
.update =   crypto_sha256_update,
.final  =   crypto_sha256_final,
.finup  =   crypto_sha256_finup,
-- 
2.32.0



[PATCH 0/5] Remove duplicate context init function for sha algorithm

2021-12-20 Thread Tianjia Zhang
This series of patches is mainly for repetitive code cleaning. The sha
algorithm has provided generic context initialization implementation.
The context initialization code in the optimized implementation of each
platform is a repetitive code and can be deleted. The sha*_base_init()
series of functions are used uniformly.

Tianjia Zhang (5):
  crypto: sha256 - remove duplicate generic hash init function
  crypto: mips/sha - remove duplicate hash init function
  crypto: powerpc/sha - remove duplicate hash init function
  crypto: sparc/sha - remove duplicate hash init function
  crypto: s390/sha512 - Use macros instead of direct IV numbers

 arch/mips/cavium-octeon/crypto/octeon-sha1.c  | 17 +---
 .../mips/cavium-octeon/crypto/octeon-sha256.c | 39 ++-
 .../mips/cavium-octeon/crypto/octeon-sha512.c | 39 ++-
 arch/powerpc/crypto/sha1-spe-glue.c   | 17 +---
 arch/powerpc/crypto/sha1.c| 14 +--
 arch/powerpc/crypto/sha256-spe-glue.c | 39 ++-
 arch/s390/crypto/sha512_s390.c| 32 +++
 arch/sparc/crypto/sha1_glue.c | 14 +--
 arch/sparc/crypto/sha256_glue.c   | 37 ++
 arch/sparc/crypto/sha512_glue.c   | 37 ++
 crypto/sha256_generic.c   | 16 +---
 11 files changed, 41 insertions(+), 260 deletions(-)

-- 
2.32.0



[PATCH 4/5] crypto: sparc/sha - remove duplicate hash init function

2021-12-20 Thread Tianjia Zhang
sha*_base_init() series functions has implemented the initialization
of the hash context, this commit use sha*_base_init() function to
replace repeated implementations.

Signed-off-by: Tianjia Zhang 
---
 arch/sparc/crypto/sha1_glue.c   | 14 ++---
 arch/sparc/crypto/sha256_glue.c | 37 +++--
 arch/sparc/crypto/sha512_glue.c | 37 +++--
 3 files changed, 8 insertions(+), 80 deletions(-)

diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 86a654cce5ab..06b7becfcb21 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -26,17 +27,6 @@
 asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data,
   unsigned int rounds);
 
-static int sha1_sparc64_init(struct shash_desc *desc)
-{
-   struct sha1_state *sctx = shash_desc_ctx(desc);
-
-   *sctx = (struct sha1_state){
-   .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
-   };
-
-   return 0;
-}
-
 static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
  unsigned int len, unsigned int partial)
 {
@@ -128,7 +118,7 @@ static int sha1_sparc64_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg alg = {
.digestsize =   SHA1_DIGEST_SIZE,
-   .init   =   sha1_sparc64_init,
+   .init   =   sha1_base_init,
.update =   sha1_sparc64_update,
.final  =   sha1_sparc64_final,
.export =   sha1_sparc64_export,
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 60ec524cf9ca..285561a1cde5 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -17,6 +17,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -26,38 +27,6 @@
 asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data,
 unsigned int rounds);
 
-static int sha224_sparc64_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-   sctx->state[0] = SHA224_H0;
-   sctx->state[1] = SHA224_H1;
-   sctx->state[2] = SHA224_H2;
-   sctx->state[3] = SHA224_H3;
-   sctx->state[4] = SHA224_H4;
-   sctx->state[5] = SHA224_H5;
-   sctx->state[6] = SHA224_H6;
-   sctx->state[7] = SHA224_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
-static int sha256_sparc64_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-   sctx->state[0] = SHA256_H0;
-   sctx->state[1] = SHA256_H1;
-   sctx->state[2] = SHA256_H2;
-   sctx->state[3] = SHA256_H3;
-   sctx->state[4] = SHA256_H4;
-   sctx->state[5] = SHA256_H5;
-   sctx->state[6] = SHA256_H6;
-   sctx->state[7] = SHA256_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
 static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
 {
@@ -158,7 +127,7 @@ static int sha256_sparc64_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg sha256_alg = {
.digestsize =   SHA256_DIGEST_SIZE,
-   .init   =   sha256_sparc64_init,
+   .init   =   sha256_base_init,
.update =   sha256_sparc64_update,
.final  =   sha256_sparc64_final,
.export =   sha256_sparc64_export,
@@ -176,7 +145,7 @@ static struct shash_alg sha256_alg = {
 
 static struct shash_alg sha224_alg = {
.digestsize =   SHA224_DIGEST_SIZE,
-   .init   =   sha224_sparc64_init,
+   .init   =   sha224_base_init,
.update =   sha256_sparc64_update,
.final  =   sha224_sparc64_final,
.descsize   =   sizeof(struct sha256_state),
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 273ce21918c1..d66efa4ec59a 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -16,6 +16,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -25,38 +26,6 @@
 asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data,
 unsigned int rounds);
 
-static int sha512_sparc64_init(struct shash_desc *desc)
-{
-   struct sha512_state *sctx = shash_desc_ctx(desc);
-   sctx->state[0] = SHA512_H0;
-   sctx->state[1] = SHA512_H1;
-   sctx->state[2] = SHA512_H2;
-   sctx->state[3] = SHA512_H3;
-   sctx->state[4] = SHA512_H4;
-   sctx->state[5] = SHA512_H5;
-   sctx->state[6] = S

[PATCH 3/5] crypto: powerpc/sha - remove duplicate hash init function

2021-12-20 Thread Tianjia Zhang
sha*_base_init() series functions has implemented the initialization
of the hash context, this commit use sha*_base_init() function to
replace repeated implementations.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/crypto/sha1-spe-glue.c   | 17 ++--
 arch/powerpc/crypto/sha1.c| 14 ++
 arch/powerpc/crypto/sha256-spe-glue.c | 39 +++
 3 files changed, 7 insertions(+), 63 deletions(-)

diff --git a/arch/powerpc/crypto/sha1-spe-glue.c 
b/arch/powerpc/crypto/sha1-spe-glue.c
index 88e8ea73bfa7..9170892a8557 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -55,20 +56,6 @@ static inline void ppc_sha1_clear_context(struct sha1_state 
*sctx)
do { *ptr++ = 0; } while (--count);
 }
 
-static int ppc_spe_sha1_init(struct shash_desc *desc)
-{
-   struct sha1_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA1_H0;
-   sctx->state[1] = SHA1_H1;
-   sctx->state[2] = SHA1_H2;
-   sctx->state[3] = SHA1_H3;
-   sctx->state[4] = SHA1_H4;
-   sctx->count = 0;
-
-   return 0;
-}
-
 static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
 {
@@ -168,7 +155,7 @@ static int ppc_spe_sha1_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg alg = {
.digestsize =   SHA1_DIGEST_SIZE,
-   .init   =   ppc_spe_sha1_init,
+   .init   =   sha1_base_init,
.update =   ppc_spe_sha1_update,
.final  =   ppc_spe_sha1_final,
.export =   ppc_spe_sha1_export,
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index 7a55d790cdb1..f283bbd3f121 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -18,21 +18,11 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 void powerpc_sha_transform(u32 *state, const u8 *src);
 
-static int powerpc_sha1_init(struct shash_desc *desc)
-{
-   struct sha1_state *sctx = shash_desc_ctx(desc);
-
-   *sctx = (struct sha1_state){
-   .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
-   };
-
-   return 0;
-}
-
 static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
   unsigned int len)
 {
@@ -114,7 +104,7 @@ static int powerpc_sha1_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg alg = {
.digestsize =   SHA1_DIGEST_SIZE,
-   .init   =   powerpc_sha1_init,
+   .init   =   sha1_base_init,
.update =   powerpc_sha1_update,
.final  =   powerpc_sha1_final,
.export =   powerpc_sha1_export,
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c 
b/arch/powerpc/crypto/sha256-spe-glue.c
index ffedea7e4bef..2997d13236e0 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -56,40 +57,6 @@ static inline void ppc_sha256_clear_context(struct 
sha256_state *sctx)
do { *ptr++ = 0; } while (--count);
 }
 
-static int ppc_spe_sha256_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA256_H0;
-   sctx->state[1] = SHA256_H1;
-   sctx->state[2] = SHA256_H2;
-   sctx->state[3] = SHA256_H3;
-   sctx->state[4] = SHA256_H4;
-   sctx->state[5] = SHA256_H5;
-   sctx->state[6] = SHA256_H6;
-   sctx->state[7] = SHA256_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
-static int ppc_spe_sha224_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA224_H0;
-   sctx->state[1] = SHA224_H1;
-   sctx->state[2] = SHA224_H2;
-   sctx->state[3] = SHA224_H3;
-   sctx->state[4] = SHA224_H4;
-   sctx->state[5] = SHA224_H5;
-   sctx->state[6] = SHA224_H6;
-   sctx->state[7] = SHA224_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
 static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
 {
@@ -214,7 +181,7 @@ static int ppc_spe_sha256_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg algs[2] = { {
.digestsize =   SHA256_DIGEST_SIZE,
-   .init   =   ppc_spe_sha256_init,
+   .init   =   sha256_base_init,
.update =   ppc_spe_sha256_update,
.final  =   ppc_spe_sha256_final,
.export =   ppc_spe_sha256_export,
@@ -230,7 +197,7 @@ static struct shash_alg algs[2] = { {
}

[PATCH 2/5] crypto: mips/sha - remove duplicate hash init function

2021-12-20 Thread Tianjia Zhang
sha*_base_init() series functions has implemented the initialization
of the hash context, this commit use sha*_base_init() function to
replace repeated implementations.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/cavium-octeon/crypto/octeon-sha1.c  | 17 +---
 .../mips/cavium-octeon/crypto/octeon-sha256.c | 39 ++-
 .../mips/cavium-octeon/crypto/octeon-sha512.c | 39 ++-
 3 files changed, 8 insertions(+), 87 deletions(-)

diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c 
b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 30f1d75208a5..37a07b3c4568 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -15,6 +15,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -71,20 +72,6 @@ static void octeon_sha1_transform(const void *_block)
octeon_sha1_start(block[7]);
 }
 
-static int octeon_sha1_init(struct shash_desc *desc)
-{
-   struct sha1_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA1_H0;
-   sctx->state[1] = SHA1_H1;
-   sctx->state[2] = SHA1_H2;
-   sctx->state[3] = SHA1_H3;
-   sctx->state[4] = SHA1_H4;
-   sctx->count = 0;
-
-   return 0;
-}
-
 static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
 unsigned int len)
 {
@@ -200,7 +187,7 @@ static int octeon_sha1_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg octeon_sha1_alg = {
.digestsize =   SHA1_DIGEST_SIZE,
-   .init   =   octeon_sha1_init,
+   .init   =   sha1_base_init,
.update =   octeon_sha1_update,
.final  =   octeon_sha1_final,
.export =   octeon_sha1_export,
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c 
b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 36cb92895d72..435e4a6e7f13 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -16,6 +16,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -63,40 +64,6 @@ static void octeon_sha256_transform(const void *_block)
octeon_sha256_start(block[7]);
 }
 
-static int octeon_sha224_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA224_H0;
-   sctx->state[1] = SHA224_H1;
-   sctx->state[2] = SHA224_H2;
-   sctx->state[3] = SHA224_H3;
-   sctx->state[4] = SHA224_H4;
-   sctx->state[5] = SHA224_H5;
-   sctx->state[6] = SHA224_H6;
-   sctx->state[7] = SHA224_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
-static int octeon_sha256_init(struct shash_desc *desc)
-{
-   struct sha256_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA256_H0;
-   sctx->state[1] = SHA256_H1;
-   sctx->state[2] = SHA256_H2;
-   sctx->state[3] = SHA256_H3;
-   sctx->state[4] = SHA256_H4;
-   sctx->state[5] = SHA256_H5;
-   sctx->state[6] = SHA256_H6;
-   sctx->state[7] = SHA256_H7;
-   sctx->count = 0;
-
-   return 0;
-}
-
 static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
   unsigned int len)
 {
@@ -224,7 +191,7 @@ static int octeon_sha256_import(struct shash_desc *desc, 
const void *in)
 
 static struct shash_alg octeon_sha256_algs[2] = { {
.digestsize =   SHA256_DIGEST_SIZE,
-   .init   =   octeon_sha256_init,
+   .init   =   sha256_base_init,
.update =   octeon_sha256_update,
.final  =   octeon_sha256_final,
.export =   octeon_sha256_export,
@@ -240,7 +207,7 @@ static struct shash_alg octeon_sha256_algs[2] = { {
}
 }, {
.digestsize =   SHA224_DIGEST_SIZE,
-   .init   =   octeon_sha224_init,
+   .init   =   sha224_base_init,
.update =   octeon_sha256_update,
.final  =   octeon_sha224_final,
.descsize   =   sizeof(struct sha256_state),
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c 
b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index 359f039820d8..2dee9354e33f 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -15,6 +15,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -74,40 +75,6 @@ static void octeon_sha512_transform(const void *_block)
octeon_sha512_start(block[15]);
 }
 
-static int octeon_sha512_init(struct shash_desc *desc)
-{
-   struct sha512_state *sctx = shash_desc_ctx(desc);
-
-   sctx->state[0] = SHA512_H0;
-   sctx->state[1] = SHA512_H1;
-   sctx->state[2] = SHA512_H2;
-   sctx->state[3]

Re: [PATCH v4 5/7] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-07-12 Thread Tianjia Zhang




On 2020/5/26 13:59, Paul Mackerras wrote:

On Mon, Apr 27, 2020 at 12:35:12PM +0800, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.


Some of these changes don't look completely correct to me, see below.
If you're expecting these patches to go through my tree, I can fix up
the patch and commit it (with you as author), noting the changes I
made in the commit message.  Do you want me to do that?



I am very glad for you to do so, although I have submitted a new version 
of patch, I still prefer you to fix up and commit it.


Thanks and best,
Tianjia


diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..0eff749d8027 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
   /
  
  /* Registers:

- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
   */
  _GLOBAL(__kvmppc_vcpu_run)
  
@@ -68,8 +67,8 @@ kvm_start_entry:

/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
  
-	/* Save r3 (kvm_run) and r4 (vcpu) */

-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
  
  	/* Save non-volatile registers (r14 - r31) */

SAVE_NVGPRS(r1)
@@ -82,11 +81,11 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
  
  	/* Load non-volatile guest state from the vcpu */

-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
  
  kvm_start_lightweight:

/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
+   mr  r4, r3


This mr doesn't seem necessary.


bl  FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)


This should be loading r4 from GPR3(r1), not GPR4(r1) - which is what
REST_GPR(4, r1) will do.

Then, in the file but not in the patch context, there is this line:

PPC_LL  r3, GPR4(r1)/* vcpu pointer */

where once again GPR4 needs to be GPR3.


@@ -191,10 +190,10 @@ after_sprg3_load:
PPC_STL r31, VCPU_GPR(R31)(r7)
  
  	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */


The comment should be modified to say "2nd" instead of "3rd",
otherwise it is confusing.

The rest of the patch looks OK.

Paul.



Re: [PATCH v6 0/5] clean up redundant 'kvm_run' parameters

2020-07-10 Thread Tianjia Zhang

Hi Paolo,

Any opinion on this series patches? Can I help with this patchset ?

Thanks and best,
Tianjia

On 2020/6/23 21:14, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v6 changes:
   Rearrange patch sets, only keep the unmerged patch.
   rebase on mainline.

v5 change:
   ppc: fix for review.

v4 change:
   mips: fixes two errors in entry.c.

v3 change:
   Keep the existing `vcpu->run` in the function body unchanged.

v2 change:
   s390 retains the original variable name and minimizes modification.

Tianjia Zhang (5):
   KVM: s390: clean up redundant 'kvm_run' parameters
   KVM: arm64: clean up redundant 'kvm_run' parameters
   KVM: PPC: clean up redundant kvm_run parameters in assembly
   KVM: MIPS: clean up redundant 'kvm_run' parameters
   KVM: MIPS: clean up redundant kvm_run parameters in assembly

  arch/arm64/include/asm/kvm_coproc.h   |  12 +--
  arch/arm64/include/asm/kvm_host.h |  11 +--
  arch/arm64/include/asm/kvm_mmu.h  |   2 +-
  arch/arm64/kvm/arm.c  |   6 +-
  arch/arm64/kvm/handle_exit.c  |  36 
  arch/arm64/kvm/mmio.c |  11 +--
  arch/arm64/kvm/mmu.c  |   5 +-
  arch/arm64/kvm/sys_regs.c |  13 ++-
  arch/mips/include/asm/kvm_host.h  |  32 ++--
  arch/mips/kvm/emulate.c   |  59 +
  arch/mips/kvm/entry.c |  21 ++---
  arch/mips/kvm/mips.c  |  14 ++--
  arch/mips/kvm/trap_emul.c | 114 +++---
  arch/mips/kvm/vz.c|  26 +++---
  arch/powerpc/include/asm/kvm_ppc.h|   2 +-
  arch/powerpc/kvm/book3s_interrupts.S  |  22 +++--
  arch/powerpc/kvm/book3s_pr.c  |   9 +-
  arch/powerpc/kvm/booke.c  |   9 +-
  arch/powerpc/kvm/booke_interrupts.S   |   9 +-
  arch/powerpc/kvm/bookehv_interrupts.S |  10 +--
  arch/s390/kvm/kvm-s390.c  |  23 --
  21 files changed, 188 insertions(+), 258 deletions(-)



Re: [PATCH v6 1/5] KVM: s390: clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang




On 2020/6/23 23:31, Christian Borntraeger wrote:



On 23.06.20 15:14, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
Reviewed-by: Vitaly Kuznetsov 
---
  arch/s390/kvm/kvm-s390.c | 23 +++
  1 file changed, 15 insertions(+), 8 deletions(-)


Tinajia,

I have trouble seeing value in this particular patch. We add LOCs
without providing any noticable benefit. All other patches in this series at
least reduce the amount of code. So I would defer this to Paolo if he prefers
to have this way across all architectures.


Yes, this is a full architecture optimization. Some of the architecture 
optimization has been merged into the mainline. I think it is necessary 
to unify this optimization. This is also the meaning of Paolo.

You can refer to the email of the previous version:
https://lkml.org/lkml/2020/4/27/16

Thanks,
Tianjia


[PATCH v6 3/5] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 22 ++
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 28 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..a3674f6b8d3d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,14 +81,13 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
bl  FUNC(kvmppc_copy_to_svcpu)
nop
-   REST_GPR(4, r1)
+   REST_GPR(3, r1)
 
 #ifdef CONFIG_PPC_BOOK3S_64
/* Get the dcbz32 flag */
@@ -146,7 +144,7 @@ after_sprg3_load:
 *
 */
 
-   PPC_LL  r3, GPR4(r1)/* vcpu pointer */
+   PPC_LL  r3, GPR3(r1)/* vcpu pointer */
 
/*
 * kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -190,11 +188,11 @@ after_sprg3_load:
PPC_STL r30, VCPU_GPR(R30)(r7)
PPC_STL r31, VCPU_GPR(R31)(r7)
 
-   /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c0d62a917e20..3e1c9f08e302 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -731,12 +731,11 @@ int kvmppc_core_che

[PATCH v6 0/5] clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v6 changes:
  Rearrange patch sets, only keep the unmerged patch.
  rebase on mainline.

v5 change:
  ppc: fix for review.

v4 change:
  mips: fixes two errors in entry.c.

v3 change:
  Keep the existing `vcpu->run` in the function body unchanged.

v2 change:
  s390 retains the original variable name and minimizes modification.

Tianjia Zhang (5):
  KVM: s390: clean up redundant 'kvm_run' parameters
  KVM: arm64: clean up redundant 'kvm_run' parameters
  KVM: PPC: clean up redundant kvm_run parameters in assembly
  KVM: MIPS: clean up redundant 'kvm_run' parameters
  KVM: MIPS: clean up redundant kvm_run parameters in assembly

 arch/arm64/include/asm/kvm_coproc.h   |  12 +--
 arch/arm64/include/asm/kvm_host.h |  11 +--
 arch/arm64/include/asm/kvm_mmu.h  |   2 +-
 arch/arm64/kvm/arm.c  |   6 +-
 arch/arm64/kvm/handle_exit.c  |  36 
 arch/arm64/kvm/mmio.c |  11 +--
 arch/arm64/kvm/mmu.c  |   5 +-
 arch/arm64/kvm/sys_regs.c |  13 ++-
 arch/mips/include/asm/kvm_host.h  |  32 ++--
 arch/mips/kvm/emulate.c   |  59 +
 arch/mips/kvm/entry.c |  21 ++---
 arch/mips/kvm/mips.c  |  14 ++--
 arch/mips/kvm/trap_emul.c | 114 +++---
 arch/mips/kvm/vz.c|  26 +++---
 arch/powerpc/include/asm/kvm_ppc.h|   2 +-
 arch/powerpc/kvm/book3s_interrupts.S  |  22 +++--
 arch/powerpc/kvm/book3s_pr.c  |   9 +-
 arch/powerpc/kvm/booke.c  |   9 +-
 arch/powerpc/kvm/booke_interrupts.S   |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S |  10 +--
 arch/s390/kvm/kvm-s390.c  |  23 --
 21 files changed, 188 insertions(+), 258 deletions(-)

-- 
2.17.1



[PATCH v6 1/5] KVM: s390: clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
Reviewed-by: Vitaly Kuznetsov 
---
 arch/s390/kvm/kvm-s390.c | 23 +++
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d47c19718615..f5f96dc33712 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4175,8 +4175,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
 
@@ -4242,8 +4243,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
/* SIE will load etoken directly from SDNX and therefore kvm_run */
 }
 
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
@@ -4272,7 +4275,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
-   sync_regs_fmt2(vcpu, kvm_run);
+   sync_regs_fmt2(vcpu);
} else {
/*
 * In several places we have to modify our internal view to
@@ -4291,8 +4294,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
kvm_run->kvm_dirty_regs = 0;
 }
 
-static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
@@ -4312,8 +4317,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */
 }
 
-static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
@@ -4332,7 +4339,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
-   store_regs_fmt2(vcpu, kvm_run);
+   store_regs_fmt2(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@@ -4370,7 +4377,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
 
-   sync_regs(vcpu, kvm_run);
+   sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu);
 
might_fault();
@@ -4392,7 +4399,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
 
disable_cpu_timer_accounting(vcpu);
-   store_regs(vcpu, kvm_run);
+   store_regs(vcpu);
 
kvm_sigset_deactivate(vcpu);
 
-- 
2.17.1



[PATCH v6 5/5] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
Reviewed-by: Huacai Chen 
---
 arch/mips/include/asm/kvm_host.h |  4 ++--
 arch/mips/kvm/entry.c| 21 -
 arch/mips/kvm/mips.c |  3 ++-
 arch/mips/kvm/trap_emul.c|  2 +-
 arch/mips/kvm/vz.c   |  2 +-
 5 files changed, 14 insertions(+), 18 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 157fc876feca..01efa635fa73 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -352,7 +352,7 @@ struct kvm_mmu_memory_cache {
 #define KVM_MIPS_GUEST_TLB_SIZE64
 struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
 
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -863,7 +863,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
 
 /* Building of entry/exception code */
 int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index fd716942e302..832475bf2055 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -205,7 +205,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
  * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
  * conforms to the following prototype:
  *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
  *
  * The exit from the guest and return to the caller is handled by the code
  * generated by kvm_mips_build_ret_to_host().
@@ -218,8 +218,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
 
/*
-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
 
/* k0/k1 not being used in host kernel context */
@@ -238,10 +237,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
 
/* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
 
/*
 * Save the host stack to VCPU, used for exception processing
@@ -645,10 +644,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
 
/* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/*
 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -810,7 +806,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);
@@ -852,7 +847,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
 * guest, reload k1
 */
 
-   uasm_i_move(, K1, S1);
+   uasm_i_move(, K1, S0);
UASM_i_ADDIU(, K1, K1, offsetof(struct kvm_vcpu, arch));
 
/*
@@ -886,8 +881,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
 {
u32 *p = addr;
 
-   /* Put the saved pointer to vcpu (s1) back into the scratch register */
-   UASM_i_MTC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
+   /* Put the saved pointer to vcpu (s0) back into the scratch register */
+   UASM_i_MTC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f5ba393472e3..21bfbf414d2c 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1195,8 +1195,9 @@ static void kvm_mips_set_c0_status(void)
 /*
  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run

[PATCH v6 2/5] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
Reviewed-by: Vitaly Kuznetsov 
---
 arch/arm64/include/asm/kvm_coproc.h | 12 +-
 arch/arm64/include/asm/kvm_host.h   | 11 -
 arch/arm64/include/asm/kvm_mmu.h|  2 +-
 arch/arm64/kvm/arm.c|  6 ++---
 arch/arm64/kvm/handle_exit.c| 36 ++---
 arch/arm64/kvm/mmio.c   | 11 +
 arch/arm64/kvm/mmu.c|  5 ++--
 arch/arm64/kvm/sys_regs.c   | 13 +--
 8 files changed, 46 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0185ee8b8b5e..454373704b8a 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
 void kvm_register_target_sys_reg_table(unsigned int target,
   struct kvm_sys_reg_target_table *table);
 
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
 
 #define kvm_coproc_table_init kvm_sys_reg_table_init
 void kvm_sys_reg_table_init(void);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..5c9db5767ba4 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -481,18 +481,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-   int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
-  int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index b12bfc1f051a..40be8f6c7351 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -139,7 +139,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  phys_addr_t pa, unsigned long size, bool writable);
 
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 90cb90561446..985ede7bcca0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -658,7 +658,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
 
if (run->exit_reason == KVM_EXIT_MMIO) {
-   ret = kvm_handle_mmio_return(vcpu, run);
+   ret = kvm_handle_mmio_return(vcpu);
if (ret)
return ret;
}
@@ -810,11 +810,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), 
*vcpu_pc(vcpu));
 
/* Exit types that need handling before we can be preempted */
-   handle_exit_early(vcpu, run, ret);
+   handle_exit_early(vcpu, ret);
 
preempt_enable();
 
-   ret = handle_exit(vcpu, run, ret);
+   ret = handle_exit(vcpu, ret);
}
 
/* Tell userspace about in-kernel device output levels */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5a02d4c90

[PATCH v6 4/5] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
Reviewed-by: Huacai Chen 
---
 arch/mips/include/asm/kvm_host.h |  28 +---
 arch/mips/kvm/emulate.c  |  59 ++--
 arch/mips/kvm/mips.c |  11 ++-
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 +++
 5 files changed, 87 insertions(+), 151 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 363e7a89d173..157fc876feca 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -854,8 +854,8 @@ struct kvm_mips_callbacks {
   const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-   void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
+   void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -910,7 +910,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct 
kvm_vcpu *vcpu,
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu,
 bool write_fault);
 
@@ -1021,83 +1020,67 @@ static inline bool kvm_is_ifetch_fault(struct 
kvm_vcpu_arch *vcpu)
 
 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
   u32 *opc,
-  struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 
 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause

Re: [PATCH v4 0/7] clean up redundant 'kvm_run' parameters

2020-06-23 Thread Tianjia Zhang




On 2020/6/23 17:42, Paolo Bonzini wrote:

On 27/04/20 06:35, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.


Tianjia, can you please refresh the patches so that each architecture
maintainer can pick them up?  Thanks very much for this work!

Paolo



No problem, this is what I should do.
After I update, do I submit separately for each architecture or submit 
them together in a patchset?


Thanks,
Tianjia


Re: [PATCH v4 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-06-16 Thread Tianjia Zhang




On 2020/5/29 17:48, Paolo Bonzini wrote:

On 27/05/20 08:24, Tianjia Zhang wrote:





Hi Huacai,

These two patches(6/7 and 7/7) should be merged into the tree of the
mips architecture separately. At present, there seems to be no good way
to merge the whole architecture patchs.

For this series of patches, some architectures have been merged, some
need to update the patch.


Hi Tianjia, I will take care of this during the merge window.

Thanks,

Paolo



Hi Paolo,

The following individual patch is the v5 version of 5/7 in this group of 
patches.


https://lkml.org/lkml/2020/5/28/106
([v5] KVM: PPC: clean up redundant kvm_run parameters in assembly)

Thanks and best,
Tianjia


[PATCH v5] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-05-28 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 22 ++
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 28 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..a3674f6b8d3d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,14 +81,13 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
bl  FUNC(kvmppc_copy_to_svcpu)
nop
-   REST_GPR(4, r1)
+   REST_GPR(3, r1)
 
 #ifdef CONFIG_PPC_BOOK3S_64
/* Get the dcbz32 flag */
@@ -146,7 +144,7 @@ after_sprg3_load:
 *
 */
 
-   PPC_LL  r3, GPR4(r1)/* vcpu pointer */
+   PPC_LL  r3, GPR3(r1)/* vcpu pointer */
 
/*
 * kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -190,11 +188,11 @@ after_sprg3_load:
PPC_STL r30, VCPU_GPR(R30)(r7)
PPC_STL r31, VCPU_GPR(R31)(r7)
 
-   /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c0d62a917e20..3e1c9f08e302 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -731,12 +731,11 @@ int kvmppc_core_che

Re: [PATCH v4 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-05-27 Thread Tianjia Zhang




On 2020/4/27 13:40, Huacai Chen wrote:

Reviewed-by: Huacai Chen 

On Mon, Apr 27, 2020 at 12:35 PM Tianjia Zhang
 wrote:


In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/mips/include/asm/kvm_host.h |  28 +---
  arch/mips/kvm/emulate.c  |  59 ++--
  arch/mips/kvm/mips.c |  11 ++-
  arch/mips/kvm/trap_emul.c| 114 ++-
  arch/mips/kvm/vz.c   |  26 +++
  5 files changed, 87 insertions(+), 151 deletions(-)



Hi Huacai,

These two patches(6/7 and 7/7) should be merged into the tree of the 
mips architecture separately. At present, there seems to be no good way 
to merge the whole architecture patchs.


For this series of patches, some architectures have been merged, some 
need to update the patch.


Thanks and best,
Tianjia


Re: [PATCH v4 3/7] KVM: PPC: Remove redundant kvm_run from vcpu_arch

2020-05-26 Thread Tianjia Zhang




On 2020/5/27 12:20, Paul Mackerras wrote:

On Mon, Apr 27, 2020 at 12:35:10PM +0800, Tianjia Zhang wrote:

The 'kvm_run' field already exists in the 'vcpu' structure, which
is the same structure as the 'kvm_run' in the 'vcpu_arch' and
should be deleted.

Signed-off-by: Tianjia Zhang 


Thanks, patches 3 and 4 of this series applied to my kvm-ppc-next branch.

Paul.



Thanks for your suggestion, for 5/7, I will submit a new version patch.

Thanks,
Tianjia


Re: [PATCH v4 2/7] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-05-07 Thread Tianjia Zhang




On 2020/5/5 16:39, Marc Zyngier wrote:

Hi Tianjia,

On 2020-04-27 05:35, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 


On the face of it, this looks OK, but I haven't tried to run the
resulting kernel. I'm not opposed to taking this patch *if* there
is an agreement across architectures to take the series (I value
consistency over the janitorial exercise).

Another thing is that this is going to conflict with the set of
patches that move the KVM/arm code back where it belongs (arch/arm64/kvm),
so I'd probably cherry-pick that one directly.

Thanks,

     M.



Do I need to submit this set of patches separately for each 
architecture? Could it be merged at once, if necessary, I will

resubmit based on the latest mainline.

Thanks,
Tianjia


Re: [PATCH v4 0/7] clean up redundant 'kvm_run' parameters

2020-05-04 Thread Tianjia Zhang

Paolo Bonzini, any opinion on this?

Thanks and best,
Tianjia

On 2020/4/27 12:35, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v4 change:
   mips: fixes two errors in entry.c.

v3 change:
   Keep the existing `vcpu->run` in the function body unchanged.

v2 change:
   s390 retains the original variable name and minimizes modification.

Tianjia Zhang (7):
   KVM: s390: clean up redundant 'kvm_run' parameters
   KVM: arm64: clean up redundant 'kvm_run' parameters
   KVM: PPC: Remove redundant kvm_run from vcpu_arch
   KVM: PPC: clean up redundant 'kvm_run' parameters
   KVM: PPC: clean up redundant kvm_run parameters in assembly
   KVM: MIPS: clean up redundant 'kvm_run' parameters
   KVM: MIPS: clean up redundant kvm_run parameters in assembly

  arch/arm64/include/asm/kvm_coproc.h  |  12 +--
  arch/arm64/include/asm/kvm_host.h|  11 +--
  arch/arm64/include/asm/kvm_mmu.h |   2 +-
  arch/arm64/kvm/handle_exit.c |  36 +++
  arch/arm64/kvm/sys_regs.c|  13 ++-
  arch/mips/include/asm/kvm_host.h |  32 +--
  arch/mips/kvm/emulate.c  |  59 
  arch/mips/kvm/entry.c|  21 ++---
  arch/mips/kvm/mips.c |  14 +--
  arch/mips/kvm/trap_emul.c| 114 ++-
  arch/mips/kvm/vz.c   |  26 ++
  arch/powerpc/include/asm/kvm_book3s.h|  16 ++--
  arch/powerpc/include/asm/kvm_host.h  |   1 -
  arch/powerpc/include/asm/kvm_ppc.h   |  27 +++---
  arch/powerpc/kvm/book3s.c|   4 +-
  arch/powerpc/kvm/book3s.h|   2 +-
  arch/powerpc/kvm/book3s_64_mmu_hv.c  |  12 +--
  arch/powerpc/kvm/book3s_64_mmu_radix.c   |   4 +-
  arch/powerpc/kvm/book3s_emulate.c|  10 +-
  arch/powerpc/kvm/book3s_hv.c |  64 ++---
  arch/powerpc/kvm/book3s_hv_nested.c  |  12 +--
  arch/powerpc/kvm/book3s_interrupts.S |  17 ++--
  arch/powerpc/kvm/book3s_paired_singles.c |  72 +++---
  arch/powerpc/kvm/book3s_pr.c |  33 ---
  arch/powerpc/kvm/booke.c |  39 
  arch/powerpc/kvm/booke.h |   8 +-
  arch/powerpc/kvm/booke_emulate.c |   2 +-
  arch/powerpc/kvm/booke_interrupts.S  |   9 +-
  arch/powerpc/kvm/bookehv_interrupts.S|  10 +-
  arch/powerpc/kvm/e500_emulate.c  |  15 ++-
  arch/powerpc/kvm/emulate.c   |  10 +-
  arch/powerpc/kvm/emulate_loadstore.c |  32 +++
  arch/powerpc/kvm/powerpc.c   |  72 +++---
  arch/powerpc/kvm/trace_hv.h  |   6 +-
  arch/s390/kvm/kvm-s390.c |  23 +++--
  virt/kvm/arm/arm.c   |   6 +-
  virt/kvm/arm/mmio.c  |  11 ++-
  virt/kvm/arm/mmu.c   |   5 +-
  38 files changed, 392 insertions(+), 470 deletions(-)



Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-28 Thread Tianjia Zhang




On 2020/4/26 20:59, Thomas Huth wrote:

On 23/04/2020 13.00, Christian Borntraeger wrote:



On 23.04.20 12:58, Tianjia Zhang wrote:



On 2020/4/23 18:39, Cornelia Huck wrote:

On Thu, 23 Apr 2020 11:01:43 +0800
Tianjia Zhang  wrote:


On 2020/4/23 0:04, Cornelia Huck wrote:

On Wed, 22 Apr 2020 17:58:04 +0200
Christian Borntraeger  wrote:
   

On 22.04.20 15:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:
  

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?
  

parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
    arch/s390/kvm/kvm-s390.c | 37 ++---
    1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
    return rc;
    }
    -static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
    {
+    struct kvm_run *kvm_run = vcpu->run;
    struct runtime_instr_cb *riccb;
    struct gs_cb *gscb;
    @@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
    }
    if (vcpu->arch.gs_enabled) {
    current->thread.gs_cb = (struct gs_cb *)
-    >run->s.regs.gscb;
+    _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?


Agreed. It feels kind of random. Maybe just do the first line (move kvm_run 
from the
function parameter list into the variable declaration)? Not sure if this is 
better.
   


There's more in this patch that I cut... but I think just moving
kvm_run from the parameter list would be much less disruptive.



I think there are two kinds of code(`vcpu->run->` and `kvm_run->`), but
there will be more disruptive, not less.


I just fail to see the benefit; sure, kvm_run-> is convenient, but the
current code is just fine, and any rework should be balanced against
the cost (e.g. cluttering git annotate).



cluttering git annotate ? Does it mean Fix  ("comment"). Is it possible to 
solve this problem by splitting this patch?


No its about breaking git blame (and bugfix backports) for just a cosmetic 
improvement.


It could be slightly more than a cosmetic improvement (depending on the
smartness of the compiler): vcpu->run-> are two dereferences, while
kvm_run-> is only one dereference. So it could be slightly more compact
and faster code.

  Thomas



If the compiler is smart enough, this place can be automatically 
optimized, but we can't just rely on the compiler, if not? This requires 
a trade-off between code cleanliness readability and breaking git blame.
In addition, I have removed the changes here and sent a v4 patch. Please 
also help review it.


Thanks and best,
Tianjia


[PATCH v4 4/7] KVM: PPC: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_book3s.h| 16 +++---
 arch/powerpc/include/asm/kvm_ppc.h   | 27 +
 arch/powerpc/kvm/book3s.c|  4 +-
 arch/powerpc/kvm/book3s.h|  2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 12 ++--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |  4 +-
 arch/powerpc/kvm/book3s_emulate.c| 10 ++--
 arch/powerpc/kvm/book3s_hv.c | 60 ++--
 arch/powerpc/kvm/book3s_hv_nested.c  | 11 ++--
 arch/powerpc/kvm/book3s_paired_singles.c | 72 
 arch/powerpc/kvm/book3s_pr.c | 30 +-
 arch/powerpc/kvm/booke.c | 36 ++--
 arch/powerpc/kvm/booke.h |  8 +--
 arch/powerpc/kvm/booke_emulate.c |  2 +-
 arch/powerpc/kvm/e500_emulate.c  | 15 +++--
 arch/powerpc/kvm/emulate.c   | 10 ++--
 arch/powerpc/kvm/emulate_loadstore.c | 32 +--
 arch/powerpc/kvm/powerpc.c   | 72 
 arch/powerpc/kvm/trace_hv.h  |  6 +-
 19 files changed, 212 insertions(+), 217 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 506e4df2d730..66dbb1f85d59 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, 
struct kvmppc_pte *pte)
 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong 
seg_size);
 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu, unsigned long addr,
-   unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+   unsigned long addr, unsigned long status);
 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
 
 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache 
*pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
 extern int kvmppc_mmu_hv_init(void);
 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
 
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu 
*vcpu, ulong fac);
 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
   bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu 
*vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 
dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
  u64 time_limit, unsigned long lpcr);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
   struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
 
 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28 +58,28

[PATCH v4 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  4 ++--
 arch/mips/kvm/entry.c| 21 -
 arch/mips/kvm/mips.c |  3 ++-
 arch/mips/kvm/trap_emul.c|  2 +-
 arch/mips/kvm/vz.c   |  2 +-
 5 files changed, 14 insertions(+), 18 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
 #define KVM_MIPS_GUEST_TLB_SIZE64
 struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
 
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
 
 /* Building of entry/exception code */
 int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..1083f35361ea 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
  * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
  * conforms to the following prototype:
  *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
  *
  * The exit from the guest and return to the caller is handled by the code
  * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
 
/*
-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
 
/* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
 
/* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
 
/*
 * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
 
/* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/*
 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);
@@ -835,7 +830,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
 * guest, reload k1
 */
 
-   uasm_i_move(, K1, S1);
+   uasm_i_move(, K1, S0);
UASM_i_ADDIU(, K1, K1, offsetof(struct kvm_vcpu, arch));
 
/*
@@ -869,8 +864,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
 {
u32 *p = addr;
 
-   /* Put the saved pointer to vcpu (s1) back into the scratch register */
-   UASM_i_MTC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
+   /* Put the saved pointer to vcpu (s0) back into the scratch register */
+   UASM_i_MTC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
 /*
  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
  

[PATCH v4 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  28 +---
 arch/mips/kvm/emulate.c  |  59 ++--
 arch/mips/kvm/mips.c |  11 ++-
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 +++
 5 files changed, 87 insertions(+), 151 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2c343c346b79..971439297cea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -812,8 +812,8 @@ struct kvm_mips_callbacks {
   const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-   void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
+   void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -868,7 +868,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct 
kvm_vcpu *vcpu,
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu,
 bool write_fault);
 
@@ -975,83 +974,67 @@ static inline bool kvm_is_ifetch_fault(struct 
kvm_vcpu_arch *vcpu)
 
 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
   u32 *opc,
-  struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 
 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause

[PATCH v4 5/7] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 17 -
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..0eff749d8027 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,11 +81,11 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
+   mr  r4, r3
bl  FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -191,10 +190,10 @@ after_sprg3_load:
PPC_STL r31, VCPU_GPR(R31)(r7)
 
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 26b3f5900b72..942039aae598 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -732,12 +732,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 
 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
 
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
 
@@ -779,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_vcp

[PATCH v4 2/7] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/arm64/include/asm/kvm_coproc.h | 12 +-
 arch/arm64/include/asm/kvm_host.h   | 11 -
 arch/arm64/include/asm/kvm_mmu.h|  2 +-
 arch/arm64/kvm/handle_exit.c| 36 ++---
 arch/arm64/kvm/sys_regs.c   | 13 +--
 virt/kvm/arm/arm.c  |  6 ++---
 virt/kvm/arm/mmio.c | 11 +
 virt/kvm/arm/mmu.c  |  5 ++--
 8 files changed, 46 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0185ee8b8b5e..454373704b8a 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
 void kvm_register_target_sys_reg_table(unsigned int target,
   struct kvm_sys_reg_target_table *table);
 
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
 
 #define kvm_coproc_table_init kvm_sys_reg_table_init
 void kvm_sys_reg_table_init(void);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..3fab32e4948c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -481,18 +481,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-   int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
-  int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 30b0e8d6b895..2ec7b9bb25d3 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -159,7 +159,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  phys_addr_t pa, unsigned long size, bool writable);
 
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index aacfc55de44c..ec3a66642ea5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -25,7 +25,7 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
 {
@@ -33,7 +33,7 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, 
u32 esr)
kvm_inject_vabt(vcpu);
 }
 
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_hvc(struct kvm_vcpu *vcpu)
 {
int ret;
 
@@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
return ret;
 }
 
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_smc(struct kvm_vcpu *vcpu)
 {
/*
 * "If an SMC instruction executed at Non-secure EL1 is
@@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
  * Guest access to FP/ASIMD regis

[PATCH v4 3/7] KVM: PPC: Remove redundant kvm_run from vcpu_arch

2020-04-26 Thread Tianjia Zhang
The 'kvm_run' field already exists in the 'vcpu' structure, which
is the same structure as the 'kvm_run' in the 'vcpu_arch' and
should be deleted.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/kvm/book3s_hv.c| 6 ++
 arch/powerpc/kvm/book3s_hv_nested.c | 3 +--
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1dc63101ffe1..2745ff8faa01 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
 
struct task_struct *run_task;
-   struct kvm_run *kvm_run;
 
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 93493f0cbfe8..413ea2dcb10c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2934,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, 
bool is_master)
 
ret = RESUME_GUEST;
if (vcpu->arch.trap)
-   ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+   ret = kvmppc_handle_exit_hv(vcpu->run, vcpu,
vcpu->arch.run_task);
 
vcpu->arch.ret = ret;
@@ -3920,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
spin_lock(>lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3973,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
-   v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+   v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(>arch.cpu_run);
}
@@ -4049,7 +4048,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index dc97e5be76f6..5a3987f3ebf3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
-   r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+   r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
 
/* save L2 state for return */
-- 
2.17.1



[PATCH v4 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/s390/kvm/kvm-s390.c | 23 +++
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..c0d94eaa00d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
 
@@ -4243,8 +4244,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
/* SIE will load etoken directly from SDNX and therefore kvm_run */
 }
 
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
@@ -4273,7 +4276,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
-   sync_regs_fmt2(vcpu, kvm_run);
+   sync_regs_fmt2(vcpu);
} else {
/*
 * In several places we have to modify our internal view to
@@ -4292,8 +4295,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
kvm_run->kvm_dirty_regs = 0;
 }
 
-static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
@@ -4313,8 +4318,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */
 }
 
-static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
@@ -4333,7 +4340,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
-   store_regs_fmt2(vcpu, kvm_run);
+   store_regs_fmt2(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@@ -4371,7 +4378,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
 
-   sync_regs(vcpu, kvm_run);
+   sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu);
 
might_fault();
@@ -4393,7 +4400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
 
disable_cpu_timer_accounting(vcpu);
-   store_regs(vcpu, kvm_run);
+   store_regs(vcpu);
 
kvm_sigset_deactivate(vcpu);
 
-- 
2.17.1



[PATCH v4 0/7] clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v4 change:
  mips: fixes two errors in entry.c.

v3 change:
  Keep the existing `vcpu->run` in the function body unchanged.

v2 change:
  s390 retains the original variable name and minimizes modification.

Tianjia Zhang (7):
  KVM: s390: clean up redundant 'kvm_run' parameters
  KVM: arm64: clean up redundant 'kvm_run' parameters
  KVM: PPC: Remove redundant kvm_run from vcpu_arch
  KVM: PPC: clean up redundant 'kvm_run' parameters
  KVM: PPC: clean up redundant kvm_run parameters in assembly
  KVM: MIPS: clean up redundant 'kvm_run' parameters
  KVM: MIPS: clean up redundant kvm_run parameters in assembly

 arch/arm64/include/asm/kvm_coproc.h  |  12 +--
 arch/arm64/include/asm/kvm_host.h|  11 +--
 arch/arm64/include/asm/kvm_mmu.h |   2 +-
 arch/arm64/kvm/handle_exit.c |  36 +++
 arch/arm64/kvm/sys_regs.c|  13 ++-
 arch/mips/include/asm/kvm_host.h |  32 +--
 arch/mips/kvm/emulate.c  |  59 
 arch/mips/kvm/entry.c|  21 ++---
 arch/mips/kvm/mips.c |  14 +--
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 ++
 arch/powerpc/include/asm/kvm_book3s.h|  16 ++--
 arch/powerpc/include/asm/kvm_host.h  |   1 -
 arch/powerpc/include/asm/kvm_ppc.h   |  27 +++---
 arch/powerpc/kvm/book3s.c|   4 +-
 arch/powerpc/kvm/book3s.h|   2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  12 +--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |   4 +-
 arch/powerpc/kvm/book3s_emulate.c|  10 +-
 arch/powerpc/kvm/book3s_hv.c |  64 ++---
 arch/powerpc/kvm/book3s_hv_nested.c  |  12 +--
 arch/powerpc/kvm/book3s_interrupts.S |  17 ++--
 arch/powerpc/kvm/book3s_paired_singles.c |  72 +++---
 arch/powerpc/kvm/book3s_pr.c |  33 ---
 arch/powerpc/kvm/booke.c |  39 
 arch/powerpc/kvm/booke.h |   8 +-
 arch/powerpc/kvm/booke_emulate.c |   2 +-
 arch/powerpc/kvm/booke_interrupts.S  |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S|  10 +-
 arch/powerpc/kvm/e500_emulate.c  |  15 ++-
 arch/powerpc/kvm/emulate.c   |  10 +-
 arch/powerpc/kvm/emulate_loadstore.c |  32 +++
 arch/powerpc/kvm/powerpc.c   |  72 +++---
 arch/powerpc/kvm/trace_hv.h  |   6 +-
 arch/s390/kvm/kvm-s390.c |  23 +++--
 virt/kvm/arm/arm.c   |   6 +-
 virt/kvm/arm/mmio.c  |  11 ++-
 virt/kvm/arm/mmu.c   |   5 +-
 38 files changed, 392 insertions(+), 470 deletions(-)

-- 
2.17.1



Re: [PATCH v3 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-26 Thread Tianjia Zhang




On 2020/4/27 11:51, Huacai Chen wrote:

Hi, Tianjia,

On Sun, Apr 26, 2020 at 8:40 PM Tianjia Zhang
 wrote:


In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/mips/include/asm/kvm_host.h |  4 ++--
  arch/mips/kvm/entry.c| 15 +--
  arch/mips/kvm/mips.c |  3 ++-
  arch/mips/kvm/trap_emul.c|  2 +-
  arch/mips/kvm/vz.c   |  2 +-
  5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
  #define KVM_MIPS_GUEST_TLB_SIZE64
  struct kvm_vcpu_arch {
 void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);

 /* Host registers preserved across guest mode execution */
 unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
  /* Debug: dump vcpu state */
  int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);

-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);

  /* Building of entry/exception code */
  int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
   * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
   * conforms to the following prototype:
   *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
   *
   * The exit from the guest and return to the caller is handled by the code
   * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
 unsigned int i;

 /*
-* A0: run
-* A1: vcpu
+* A0: vcpu
  */

 /* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
 kvm_mips_build_save_scratch(, V1, K1);

 /* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);

 /* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));

 /*
  * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
 /* Now that context has been saved, we can use other registers */

 /* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);

 /*
  * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
  * with this in the kernel
  */
 uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
 UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
 uasm_i_jalr(, RA, T9);
  UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);

I think uasm_i_move(, K1, S1) in kvm_mips_build_ret_from_exit() and
UASM_i_MTC0(, S1, scratch_vcpu[0], scratch_vcpu[1]) in
kvm_mips_build_ret_to_guest() should also be changed.



Your statement is correct, this is my omission, I will fix it in the 
next version of the patch, thanks for your review.


Thanks and best,
Tianjia


diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
  /*
   * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | 
RESUME_FLAG_NV)
   */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *run = vcpu->run;
 u32 cause = vcpu->arch.host_cp0_cause;
 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index d822f3aee3dc..04c864cc356a 100644
--- a/arch/mips/kvm/

[PATCH v3 4/7] KVM: PPC: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_book3s.h| 16 +++---
 arch/powerpc/include/asm/kvm_ppc.h   | 27 +
 arch/powerpc/kvm/book3s.c|  4 +-
 arch/powerpc/kvm/book3s.h|  2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 12 ++--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |  4 +-
 arch/powerpc/kvm/book3s_emulate.c| 10 ++--
 arch/powerpc/kvm/book3s_hv.c | 60 ++--
 arch/powerpc/kvm/book3s_hv_nested.c  | 11 ++--
 arch/powerpc/kvm/book3s_paired_singles.c | 72 
 arch/powerpc/kvm/book3s_pr.c | 30 +-
 arch/powerpc/kvm/booke.c | 36 ++--
 arch/powerpc/kvm/booke.h |  8 +--
 arch/powerpc/kvm/booke_emulate.c |  2 +-
 arch/powerpc/kvm/e500_emulate.c  | 15 +++--
 arch/powerpc/kvm/emulate.c   | 10 ++--
 arch/powerpc/kvm/emulate_loadstore.c | 32 +--
 arch/powerpc/kvm/powerpc.c   | 72 
 arch/powerpc/kvm/trace_hv.h  |  6 +-
 19 files changed, 212 insertions(+), 217 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 506e4df2d730..66dbb1f85d59 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, 
struct kvmppc_pte *pte)
 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong 
seg_size);
 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu, unsigned long addr,
-   unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+   unsigned long addr, unsigned long status);
 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
 
 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache 
*pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
 extern int kvmppc_mmu_hv_init(void);
 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
 
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu 
*vcpu, ulong fac);
 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
   bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu 
*vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 
dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
  u64 time_limit, unsigned long lpcr);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
   struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
 
 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28

[PATCH v3 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  28 +---
 arch/mips/kvm/emulate.c  |  59 ++--
 arch/mips/kvm/mips.c |  11 ++-
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 +++
 5 files changed, 87 insertions(+), 151 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2c343c346b79..971439297cea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -812,8 +812,8 @@ struct kvm_mips_callbacks {
   const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-   void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
+   void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -868,7 +868,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct 
kvm_vcpu *vcpu,
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu,
 bool write_fault);
 
@@ -975,83 +974,67 @@ static inline bool kvm_is_ifetch_fault(struct 
kvm_vcpu_arch *vcpu)
 
 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
   u32 *opc,
-  struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 
 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause

[PATCH v3 3/7] KVM: PPC: Remove redundant kvm_run from vcpu_arch

2020-04-26 Thread Tianjia Zhang
The 'kvm_run' field already exists in the 'vcpu' structure, which
is the same structure as the 'kvm_run' in the 'vcpu_arch' and
should be deleted.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/kvm/book3s_hv.c| 6 ++
 arch/powerpc/kvm/book3s_hv_nested.c | 3 +--
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1dc63101ffe1..2745ff8faa01 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
 
struct task_struct *run_task;
-   struct kvm_run *kvm_run;
 
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 93493f0cbfe8..413ea2dcb10c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2934,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, 
bool is_master)
 
ret = RESUME_GUEST;
if (vcpu->arch.trap)
-   ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+   ret = kvmppc_handle_exit_hv(vcpu->run, vcpu,
vcpu->arch.run_task);
 
vcpu->arch.ret = ret;
@@ -3920,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
spin_lock(>lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3973,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
-   v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+   v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(>arch.cpu_run);
}
@@ -4049,7 +4048,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index dc97e5be76f6..5a3987f3ebf3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
-   r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+   r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
 
/* save L2 state for return */
-- 
2.17.1



[PATCH v3 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/s390/kvm/kvm-s390.c | 23 +++
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..c0d94eaa00d7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
 
@@ -4243,8 +4244,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
/* SIE will load etoken directly from SDNX and therefore kvm_run */
 }
 
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
@@ -4273,7 +4276,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
-   sync_regs_fmt2(vcpu, kvm_run);
+   sync_regs_fmt2(vcpu);
} else {
/*
 * In several places we have to modify our internal view to
@@ -4292,8 +4295,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
kvm_run->kvm_dirty_regs = 0;
 }
 
-static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
@@ -4313,8 +4318,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */
 }
 
-static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
@@ -4333,7 +4340,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
-   store_regs_fmt2(vcpu, kvm_run);
+   store_regs_fmt2(vcpu);
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
@@ -4371,7 +4378,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
 
-   sync_regs(vcpu, kvm_run);
+   sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu);
 
might_fault();
@@ -4393,7 +4400,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
 
disable_cpu_timer_accounting(vcpu);
-   store_regs(vcpu, kvm_run);
+   store_regs(vcpu);
 
kvm_sigset_deactivate(vcpu);
 
-- 
2.17.1



[PATCH v3 0/7] clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v3 change:
  Keep the existing `vcpu->run` in the function body unchanged.

v2 change:
  s390 retains the original variable name and minimizes modification.

Tianjia Zhang (7):
  KVM: s390: clean up redundant 'kvm_run' parameters
  KVM: arm64: clean up redundant 'kvm_run' parameters
  KVM: PPC: Remove redundant kvm_run from vcpu_arch
  KVM: PPC: clean up redundant 'kvm_run' parameters
  KVM: PPC: clean up redundant kvm_run parameters in assembly
  KVM: MIPS: clean up redundant 'kvm_run' parameters
  KVM: MIPS: clean up redundant kvm_run parameters in assembly

 arch/arm64/include/asm/kvm_coproc.h  |  12 +--
 arch/arm64/include/asm/kvm_host.h|  11 +--
 arch/arm64/include/asm/kvm_mmu.h |   2 +-
 arch/arm64/kvm/handle_exit.c |  36 +++
 arch/arm64/kvm/sys_regs.c|  13 ++-
 arch/mips/include/asm/kvm_host.h |  32 +--
 arch/mips/kvm/emulate.c  |  59 
 arch/mips/kvm/entry.c|  15 +--
 arch/mips/kvm/mips.c |  14 +--
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 ++
 arch/powerpc/include/asm/kvm_book3s.h|  16 ++--
 arch/powerpc/include/asm/kvm_host.h  |   1 -
 arch/powerpc/include/asm/kvm_ppc.h   |  27 +++---
 arch/powerpc/kvm/book3s.c|   4 +-
 arch/powerpc/kvm/book3s.h|   2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  12 +--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |   4 +-
 arch/powerpc/kvm/book3s_emulate.c|  10 +-
 arch/powerpc/kvm/book3s_hv.c |  64 ++---
 arch/powerpc/kvm/book3s_hv_nested.c  |  12 +--
 arch/powerpc/kvm/book3s_interrupts.S |  17 ++--
 arch/powerpc/kvm/book3s_paired_singles.c |  72 +++---
 arch/powerpc/kvm/book3s_pr.c |  33 ---
 arch/powerpc/kvm/booke.c |  39 
 arch/powerpc/kvm/booke.h |   8 +-
 arch/powerpc/kvm/booke_emulate.c |   2 +-
 arch/powerpc/kvm/booke_interrupts.S  |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S|  10 +-
 arch/powerpc/kvm/e500_emulate.c  |  15 ++-
 arch/powerpc/kvm/emulate.c   |  10 +-
 arch/powerpc/kvm/emulate_loadstore.c |  32 +++
 arch/powerpc/kvm/powerpc.c   |  72 +++---
 arch/powerpc/kvm/trace_hv.h  |   6 +-
 arch/s390/kvm/kvm-s390.c |  23 +++--
 virt/kvm/arm/arm.c   |   6 +-
 virt/kvm/arm/mmio.c  |  11 ++-
 virt/kvm/arm/mmu.c   |   5 +-
 38 files changed, 389 insertions(+), 467 deletions(-)

-- 
2.17.1



[PATCH v3 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  4 ++--
 arch/mips/kvm/entry.c| 15 +--
 arch/mips/kvm/mips.c |  3 ++-
 arch/mips/kvm/trap_emul.c|  2 +-
 arch/mips/kvm/vz.c   |  2 +-
 5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
 #define KVM_MIPS_GUEST_TLB_SIZE64
 struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
 
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
 
 /* Building of entry/exception code */
 int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
  * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
  * conforms to the following prototype:
  *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
  *
  * The exit from the guest and return to the caller is handled by the code
  * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
 
/*
-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
 
/* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
 
/* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
 
/*
 * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
 
/* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/*
 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
 /*
  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index d822f3aee3dc..04c864cc356a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
 */
kvm_mips_suspend_mm(cpu);
 
-   r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
+   r = vcpu->arch.vcpu_run(vcpu);
 
/* We may have migrated while handling guest exits */
cpu = smp_processor_id();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 94f1d23828e3..c5878fa0636d 100644
--- a/arch/mips/kvm/vz.c
+++

[PATCH v3 5/7] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 17 -
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..0eff749d8027 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,11 +81,11 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
+   mr  r4, r3
bl  FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -191,10 +190,10 @@ after_sprg3_load:
PPC_STL r31, VCPU_GPR(R31)(r7)
 
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 26b3f5900b72..942039aae598 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -732,12 +732,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 
 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
 
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
 
@@ -779,7 +778,7 @@ int kvmppc_vcpu_run(st

[PATCH v3 2/7] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-04-26 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/arm64/include/asm/kvm_coproc.h | 12 +-
 arch/arm64/include/asm/kvm_host.h   | 11 -
 arch/arm64/include/asm/kvm_mmu.h|  2 +-
 arch/arm64/kvm/handle_exit.c| 36 ++---
 arch/arm64/kvm/sys_regs.c   | 13 +--
 virt/kvm/arm/arm.c  |  6 ++---
 virt/kvm/arm/mmio.c | 11 +
 virt/kvm/arm/mmu.c  |  5 ++--
 8 files changed, 46 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0185ee8b8b5e..454373704b8a 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
 void kvm_register_target_sys_reg_table(unsigned int target,
   struct kvm_sys_reg_target_table *table);
 
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
 
 #define kvm_coproc_table_init kvm_sys_reg_table_init
 void kvm_sys_reg_table_init(void);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..3fab32e4948c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -481,18 +481,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-   int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
-  int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 30b0e8d6b895..2ec7b9bb25d3 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -159,7 +159,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  phys_addr_t pa, unsigned long size, bool writable);
 
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index aacfc55de44c..ec3a66642ea5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -25,7 +25,7 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
 {
@@ -33,7 +33,7 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, 
u32 esr)
kvm_inject_vabt(vcpu);
 }
 
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_hvc(struct kvm_vcpu *vcpu)
 {
int ret;
 
@@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
return ret;
 }
 
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_smc(struct kvm_vcpu *vcpu)
 {
/*
 * "If an SMC instruction executed at Non-secure EL1 is
@@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
  * Guest access to FP/ASIMD regis

Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-23 Thread Tianjia Zhang




On 2020/4/23 19:00, Christian Borntraeger wrote:



On 23.04.20 12:58, Tianjia Zhang wrote:



On 2020/4/23 18:39, Cornelia Huck wrote:

On Thu, 23 Apr 2020 11:01:43 +0800
Tianjia Zhang  wrote:


On 2020/4/23 0:04, Cornelia Huck wrote:

On Wed, 22 Apr 2020 17:58:04 +0200
Christian Borntraeger  wrote:
   

On 22.04.20 15:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:
  

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?
  

parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
    arch/s390/kvm/kvm-s390.c | 37 ++---
    1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
    return rc;
    }
    -static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
    {
+    struct kvm_run *kvm_run = vcpu->run;
    struct runtime_instr_cb *riccb;
    struct gs_cb *gscb;
    @@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
    }
    if (vcpu->arch.gs_enabled) {
    current->thread.gs_cb = (struct gs_cb *)
-    >run->s.regs.gscb;
+    _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?


Agreed. It feels kind of random. Maybe just do the first line (move kvm_run 
from the
function parameter list into the variable declaration)? Not sure if this is 
better.
   


There's more in this patch that I cut... but I think just moving
kvm_run from the parameter list would be much less disruptive.



I think there are two kinds of code(`vcpu->run->` and `kvm_run->`), but
there will be more disruptive, not less.


I just fail to see the benefit; sure, kvm_run-> is convenient, but the
current code is just fine, and any rework should be balanced against
the cost (e.g. cluttering git annotate).



cluttering git annotate ? Does it mean Fix  ("comment"). Is it possible to 
solve this problem by splitting this patch?


No its about breaking git blame (and bugfix backports) for just a cosmetic 
improvement.
And I agree with Conny: the cost is higher than the benefit.



I will make a fix in the v3 version. Help to see if there are problems 
with the next few patches.


Thanks,
Tianjia


Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-23 Thread Tianjia Zhang




On 2020/4/23 18:39, Cornelia Huck wrote:

On Thu, 23 Apr 2020 11:01:43 +0800
Tianjia Zhang  wrote:


On 2020/4/23 0:04, Cornelia Huck wrote:

On Wed, 22 Apr 2020 17:58:04 +0200
Christian Borntraeger  wrote:
   

On 22.04.20 15:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:
  

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?
  

parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
   arch/s390/kvm/kvm-s390.c | 37 ++---
   1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
   }
   
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
   {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
   
@@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?


Agreed. It feels kind of random. Maybe just do the first line (move kvm_run 
from the
function parameter list into the variable declaration)? Not sure if this is 
better.
  


There's more in this patch that I cut... but I think just moving
kvm_run from the parameter list would be much less disruptive.
   


I think there are two kinds of code(`vcpu->run->` and `kvm_run->`), but
there will be more disruptive, not less.


I just fail to see the benefit; sure, kvm_run-> is convenient, but the
current code is just fine, and any rework should be balanced against
the cost (e.g. cluttering git annotate).



cluttering git annotate ? Does it mean Fix  ("comment"). Is it 
possible to solve this problem by splitting this patch?


Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang




On 2020/4/22 23:58, Christian Borntraeger wrote:



On 22.04.20 15:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:


In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?


parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/s390/kvm/kvm-s390.c | 37 ++---
  1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
  }
  
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
  
@@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?


Agreed. It feels kind of random. Maybe just do the first line (move kvm_run 
from the
function parameter list into the variable declaration)? Not sure if this is 
better.



Why not, `kvm_run` is equivalent to `vcpu->run`, which is also part of 
the cleanup, or do you mean to put this change in another patch?


Thanks,
Tianjia


Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang




On 2020/4/23 0:04, Cornelia Huck wrote:

On Wed, 22 Apr 2020 17:58:04 +0200
Christian Borntraeger  wrote:


On 22.04.20 15:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:
   

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?
   

parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/s390/kvm/kvm-s390.c | 37 ++---
  1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
  }
  
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
  
@@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?


Agreed. It feels kind of random. Maybe just do the first line (move kvm_run 
from the
function parameter list into the variable declaration)? Not sure if this is 
better.



There's more in this patch that I cut... but I think just moving
kvm_run from the parameter list would be much less disruptive.



I think there are two kinds of code(`vcpu->run->` and `kvm_run->`), but 
there will be more disruptive, not less.


Thanks,
Tianjia


Re: [PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang




On 2020/4/22 21:45, Cornelia Huck wrote:

On Wed, 22 Apr 2020 20:58:04 +0800
Tianjia Zhang  wrote:


In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function


s/Earlier than/For/ ?



Yes, it should be replaced like this.


parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/s390/kvm/kvm-s390.c | 37 ++---
  1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
  }
  
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
  
@@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   _run->s.regs.gscb;


Not sure if these changes (vcpu->run-> => kvm_run->) are really worth
it. (It seems they amount to at least as much as the changes advertised
in the patch description.)

Other opinions?



Why not replace `vcpu->run->` to `kvm_run->` ? If not, there will be 
both styles of code, which is confusing. I will be confused and think 
that this is something different.


Thanks,
Tianjia


restore_gs_cb(current->thread.gs_cb);
}
preempt_enable();


[PATCH v2 4/7] KVM: PPC: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_book3s.h| 16 +++---
 arch/powerpc/include/asm/kvm_ppc.h   | 27 +
 arch/powerpc/kvm/book3s.c|  4 +-
 arch/powerpc/kvm/book3s.h|  2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 12 ++--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |  4 +-
 arch/powerpc/kvm/book3s_emulate.c| 10 ++--
 arch/powerpc/kvm/book3s_hv.c | 60 ++--
 arch/powerpc/kvm/book3s_hv_nested.c  | 11 ++--
 arch/powerpc/kvm/book3s_paired_singles.c | 72 
 arch/powerpc/kvm/book3s_pr.c | 30 +-
 arch/powerpc/kvm/booke.c | 36 ++--
 arch/powerpc/kvm/booke.h |  8 +--
 arch/powerpc/kvm/booke_emulate.c |  2 +-
 arch/powerpc/kvm/e500_emulate.c  | 15 +++--
 arch/powerpc/kvm/emulate.c   | 10 ++--
 arch/powerpc/kvm/emulate_loadstore.c | 32 +--
 arch/powerpc/kvm/powerpc.c   | 72 
 arch/powerpc/kvm/trace_hv.h  |  6 +-
 19 files changed, 212 insertions(+), 217 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 506e4df2d730..66dbb1f85d59 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, 
struct kvmppc_pte *pte)
 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong 
seg_size);
 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu, unsigned long addr,
-   unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+   unsigned long addr, unsigned long status);
 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
 
 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache 
*pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
 extern int kvmppc_mmu_hv_init(void);
 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
 
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu 
*vcpu, ulong fac);
 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
   bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu 
*vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 
dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
  u64 time_limit, unsigned long lpcr);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
   struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
 
 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28

[PATCH v2 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  28 +---
 arch/mips/kvm/emulate.c  |  59 ++--
 arch/mips/kvm/mips.c |  11 ++-
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 +++
 5 files changed, 87 insertions(+), 151 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2c343c346b79..971439297cea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -812,8 +812,8 @@ struct kvm_mips_callbacks {
   const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-   void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
+   void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -868,7 +868,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct 
kvm_vcpu *vcpu,
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu,
 bool write_fault);
 
@@ -975,83 +974,67 @@ static inline bool kvm_is_ifetch_fault(struct 
kvm_vcpu_arch *vcpu)
 
 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
   u32 *opc,
-  struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 
 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause

[PATCH v2 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/s390/kvm/kvm-s390.c | 37 ++---
 1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e335a7e5ead7..d7bb2e7a07ff 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4176,8 +4176,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
 
@@ -4235,7 +4236,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   _run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
}
preempt_enable();
@@ -4243,8 +4244,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
/* SIE will load etoken directly from SDNX and therefore kvm_run */
 }
 
-static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
@@ -4257,23 +4260,23 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
}
save_access_regs(vcpu->arch.host_acrs);
-   restore_access_regs(vcpu->run->s.regs.acrs);
+   restore_access_regs(kvm_run->s.regs.acrs);
/* save host (userspace) fprs/vrs */
save_fpu_regs();
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (MACHINE_HAS_VX)
-   current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+   current->thread.fpu.regs = kvm_run->s.regs.vrs;
else
-   current->thread.fpu.regs = vcpu->run->s.regs.fprs;
-   current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+   current->thread.fpu.regs = kvm_run->s.regs.fprs;
+   current->thread.fpu.fpc = kvm_run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
 
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
-   sync_regs_fmt2(vcpu, kvm_run);
+   sync_regs_fmt2(vcpu);
} else {
/*
 * In several places we have to modify our internal view to
@@ -4292,8 +4295,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
kvm_run->kvm_dirty_regs = 0;
 }
 
-static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
@@ -4313,8 +4318,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */
 }
 
-static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void store_regs(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
@@ -4324,16 +4331,16 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
-   save_access_regs(vcpu->run->s.regs.acrs);
+   save_access_regs(kvm_run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
   

[PATCH v2 3/7] KVM: PPC: Remove redundant kvm_run from vcpu_arch

2020-04-22 Thread Tianjia Zhang
The 'kvm_run' field already exists in the 'vcpu' structure, which
is the same structure as the 'kvm_run' in the 'vcpu_arch' and
should be deleted.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/kvm/book3s_hv.c| 6 ++
 arch/powerpc/kvm/book3s_hv_nested.c | 3 +--
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1dc63101ffe1..2745ff8faa01 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
 
struct task_struct *run_task;
-   struct kvm_run *kvm_run;
 
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 93493f0cbfe8..413ea2dcb10c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2934,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, 
bool is_master)
 
ret = RESUME_GUEST;
if (vcpu->arch.trap)
-   ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+   ret = kvmppc_handle_exit_hv(vcpu->run, vcpu,
vcpu->arch.run_task);
 
vcpu->arch.ret = ret;
@@ -3920,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
spin_lock(>lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3973,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
-   v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+   v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(>arch.cpu_run);
}
@@ -4049,7 +4048,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index dc97e5be76f6..5a3987f3ebf3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
-   r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+   r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
 
/* save L2 state for return */
-- 
2.17.1



[PATCH v2 2/7] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/arm64/include/asm/kvm_coproc.h | 12 +-
 arch/arm64/include/asm/kvm_host.h   | 11 -
 arch/arm64/include/asm/kvm_mmu.h|  2 +-
 arch/arm64/kvm/handle_exit.c| 36 ++---
 arch/arm64/kvm/sys_regs.c   | 13 +--
 virt/kvm/arm/arm.c  |  6 ++---
 virt/kvm/arm/mmio.c | 11 +
 virt/kvm/arm/mmu.c  |  5 ++--
 8 files changed, 46 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0185ee8b8b5e..454373704b8a 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
 void kvm_register_target_sys_reg_table(unsigned int target,
   struct kvm_sys_reg_target_table *table);
 
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
 
 #define kvm_coproc_table_init kvm_sys_reg_table_init
 void kvm_sys_reg_table_init(void);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..3fab32e4948c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -481,18 +481,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-   int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
-  int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 30b0e8d6b895..2ec7b9bb25d3 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -159,7 +159,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  phys_addr_t pa, unsigned long size, bool writable);
 
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index aacfc55de44c..ec3a66642ea5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -25,7 +25,7 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
 {
@@ -33,7 +33,7 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, 
u32 esr)
kvm_inject_vabt(vcpu);
 }
 
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_hvc(struct kvm_vcpu *vcpu)
 {
int ret;
 
@@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
return ret;
 }
 
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_smc(struct kvm_vcpu *vcpu)
 {
/*
 * "If an SMC instruction executed at Non-secure EL1 is
@@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
  * Guest access to FP/ASIMD regis

[PATCH v2 0/7] clean up redundant 'kvm_run' parameters

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

---
v2 change:
  s390 retains the original variable name and minimizes modification.

Tianjia Zhang (7):
  KVM: s390: clean up redundant 'kvm_run' parameters
  KVM: arm64: clean up redundant 'kvm_run' parameters
  KVM: PPC: Remove redundant kvm_run from vcpu_arch
  KVM: PPC: clean up redundant 'kvm_run' parameters
  KVM: PPC: clean up redundant kvm_run parameters in assembly
  KVM: MIPS: clean up redundant 'kvm_run' parameters
  KVM: MIPS: clean up redundant kvm_run parameters in assembly

 arch/arm64/include/asm/kvm_coproc.h  |  12 +--
 arch/arm64/include/asm/kvm_host.h|  11 +--
 arch/arm64/include/asm/kvm_mmu.h |   2 +-
 arch/arm64/kvm/handle_exit.c |  36 +++
 arch/arm64/kvm/sys_regs.c|  13 ++-
 arch/mips/include/asm/kvm_host.h |  32 +--
 arch/mips/kvm/emulate.c  |  59 
 arch/mips/kvm/entry.c|  15 +--
 arch/mips/kvm/mips.c |  14 +--
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 ++
 arch/powerpc/include/asm/kvm_book3s.h|  16 ++--
 arch/powerpc/include/asm/kvm_host.h  |   1 -
 arch/powerpc/include/asm/kvm_ppc.h   |  27 +++---
 arch/powerpc/kvm/book3s.c|   4 +-
 arch/powerpc/kvm/book3s.h|   2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  12 +--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |   4 +-
 arch/powerpc/kvm/book3s_emulate.c|  10 +-
 arch/powerpc/kvm/book3s_hv.c |  64 ++---
 arch/powerpc/kvm/book3s_hv_nested.c  |  12 +--
 arch/powerpc/kvm/book3s_interrupts.S |  17 ++--
 arch/powerpc/kvm/book3s_paired_singles.c |  72 +++---
 arch/powerpc/kvm/book3s_pr.c |  33 ---
 arch/powerpc/kvm/booke.c |  39 
 arch/powerpc/kvm/booke.h |   8 +-
 arch/powerpc/kvm/booke_emulate.c |   2 +-
 arch/powerpc/kvm/booke_interrupts.S  |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S|  10 +-
 arch/powerpc/kvm/e500_emulate.c  |  15 ++-
 arch/powerpc/kvm/emulate.c   |  10 +-
 arch/powerpc/kvm/emulate_loadstore.c |  32 +++
 arch/powerpc/kvm/powerpc.c   |  72 +++---
 arch/powerpc/kvm/trace_hv.h  |   6 +-
 arch/s390/kvm/kvm-s390.c |  37 +---
 virt/kvm/arm/arm.c   |   6 +-
 virt/kvm/arm/mmio.c  |  11 ++-
 virt/kvm/arm/mmu.c   |   5 +-
 38 files changed, 396 insertions(+), 474 deletions(-)

-- 
2.17.1



[PATCH v2 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  4 ++--
 arch/mips/kvm/entry.c| 15 +--
 arch/mips/kvm/mips.c |  3 ++-
 arch/mips/kvm/trap_emul.c|  2 +-
 arch/mips/kvm/vz.c   |  2 +-
 5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
 #define KVM_MIPS_GUEST_TLB_SIZE64
 struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
 
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
 
 /* Building of entry/exception code */
 int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
  * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
  * conforms to the following prototype:
  *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
  *
  * The exit from the guest and return to the caller is handled by the code
  * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
 
/*
-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
 
/* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
 
/* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
 
/*
 * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
 
/* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/*
 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
 /*
  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index d822f3aee3dc..04c864cc356a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
 */
kvm_mips_suspend_mm(cpu);
 
-   r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
+   r = vcpu->arch.vcpu_run(vcpu);
 
/* We may have migrated while handling guest exits */
cpu = smp_processor_id();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 94f1d23828e3..c5878fa0636d 100644
--- a/arch/mips/kvm/vz.c
+++

[PATCH v2 5/7] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-04-22 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 17 -
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..0eff749d8027 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,11 +81,11 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
+   mr  r4, r3
bl  FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -191,10 +190,10 @@ after_sprg3_load:
PPC_STL r31, VCPU_GPR(R31)(r7)
 
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 26b3f5900b72..942039aae598 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -732,12 +732,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 
 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
 
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
 
@@ -779,7 +778,7 @@ int kvmppc_vcpu_run(st

Re: [PATCH 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-20 Thread Tianjia Zhang




On 2020/4/20 18:32, maobibo wrote:



On 04/19/2020 03:51 PM, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/mips/include/asm/kvm_host.h |  4 ++--
  arch/mips/kvm/entry.c| 15 +--
  arch/mips/kvm/mips.c |  3 ++-
  arch/mips/kvm/trap_emul.c|  2 +-
  arch/mips/kvm/vz.c   |  2 +-
  5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
  #define KVM_MIPS_GUEST_TLB_SIZE   64
  struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
  
  	/* Host registers preserved across guest mode execution */

unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
  /* Debug: dump vcpu state */
  int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);

+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
  
  /* Building of entry/exception code */

  int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
   * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
   * conforms to the following prototype:
   *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
   *
   * The exit from the guest and return to the caller is handled by the code
   * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
  
  	/*

-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
  
  	/* k0/k1 not being used in host kernel context */

@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
  
  	/* VCPU scratch register has pointer to vcpu */

-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
  
  	/* Offset into vcpu->arch */

-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
  
  	/*

 * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
  
  	/* Restore vcpu */

-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
  
  	/*

 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);


I suggest keeping asm code untouched, the change for c code is much easier to 
understand, however I do not see obvious advantage to remove one redundant 
function parameter :)


regards
bibo,mao



This is acceptable, asm code is an independent patch, is to provide 
convenience for this operation.





diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
  /*
   * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | 
RESUME_FLAG_NV)
   */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index d822f3aee3dc..04c864cc356a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1238,7 +1238,7 @@ static int kvm_trap_emul

Re: [PATCH 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-20 Thread Tianjia Zhang




On 2020/4/20 15:07, Christian Borntraeger wrote:



On 19.04.20 09:51, Tianjia Zhang wrote:

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
  arch/s390/kvm/kvm-s390.c | 127 +--
  1 file changed, 67 insertions(+), 60 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 443af3ead739..cf420d013ba3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4173,24 +4173,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
  }
  
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
  {
+   struct kvm_run *run = vcpu->run;


Please use kvm_run as variable name. This makes all of the changes below go 
away.



It's OK, I will fix it in v2 patch.

Thanks,
Tianjia




struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
  
-	riccb = (struct runtime_instr_cb *) _run->s.regs.riccb;

-   gscb = (struct gs_cb *) _run->s.regs.gscb;
-   vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
-   vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
-   if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
-   vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
-   vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
-   vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
-   }
-   if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
-   vcpu->arch.pfault_token = kvm_run->s.regs.pft;
-   vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
-   vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
+   riccb = (struct runtime_instr_cb *) >s.regs.riccb;
+   gscb = (struct gs_cb *) >s.regs.gscb;
+   vcpu->arch.sie_block->gpsw.mask = run->psw_mask;
+   vcpu->arch.sie_block->gpsw.addr = run->psw_addr;
+   if (run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
+   vcpu->arch.sie_block->todpr = run->s.regs.todpr;
+   vcpu->arch.sie_block->pp = run->s.regs.pp;
+   vcpu->arch.sie_block->gbea = run->s.regs.gbea;
+   }
+   if (run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
+   vcpu->arch.pfault_token = run->s.regs.pft;
+   vcpu->arch.pfault_select = run->s.regs.pfs;
+   vcpu->arch.pfault_compare = run->s.regs.pfc;
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
@@ -4198,7 +4199,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 * If userspace sets the riccb (e.g. after migration) to a valid state,
 * we should enable RI here instead of doing the lazy enablement.
 */
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64) &&
riccb->v &&
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
@@ -4209,7 +4210,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 * If userspace sets the gscb (e.g. after migration) to non-zero,
 * we should enable GS here instead of doing the lazy enablement.
 */
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
test_kvm_facility(vcpu->kvm, 133) &&
gscb->gssm &&
!vcpu->arch.gs_enabled) {
@@ -4218,10 +4219,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
}
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
test_kvm_facility(vcpu->kvm, 82)) {
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
-   vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 
0;
+   vcpu->arch.sie_block->fpf |= run->s.regs.bpbc ? FPF_BPBC : 0;
}
if (MACHINE_HAS_GS) {
preempt_disable();
@@ -4232,45 +4233,47 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
}
if (vcpu->arch.gs_enabled) {
  

Re: [PATCH] KVM: X86: Fix compile error in svm/sev.c

2020-04-20 Thread Tianjia Zhang




On 2020/4/19 16:24, Xiaoyao Li wrote:

On 4/19/2020 3:30 PM, Tianjia Zhang wrote:

The compiler reported the following compilation errors:

arch/x86/kvm/svm/sev.c: In function ‘sev_pin_memory’:
arch/x86/kvm/svm/sev.c:361:3: error: implicit declaration of function
‘release_pages’ [-Werror=implicit-function-declaration]
    release_pages(pages, npinned);
    ^

The reason is that the 'pagemap.h' header file is not included.



FYI.

Boris has sent the Patch:
https://lkml.kernel.org/r/20200411160927.27954-1...@alien8.de

and it's already in kvm master/queue branch




This is the same fix, please ignore this patch.

Thanks and best,
Tianjia


[PATCH 7/7] KVM: MIPS: clean up redundant kvm_run parameters in assembly

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  4 ++--
 arch/mips/kvm/entry.c| 15 +--
 arch/mips/kvm/mips.c |  3 ++-
 arch/mips/kvm/trap_emul.c|  2 +-
 arch/mips/kvm/vz.c   |  2 +-
 5 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 971439297cea..db915c55166d 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache {
 #define KVM_MIPS_GUEST_TLB_SIZE64
 struct kvm_vcpu_arch {
void *guest_ebase;
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
 
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
@@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks 
**install_callbacks);
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
 
 /* Building of entry/exception code */
 int kvm_mips_entry_setup(void);
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..e3f29af3b6cd 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int 
reg)
  * Assemble the start of the vcpu_run function to run a guest VCPU. The 
function
  * conforms to the following prototype:
  *
- * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ * int vcpu_run(struct kvm_vcpu *vcpu);
  *
  * The exit from the guest and return to the caller is handled by the code
  * generated by kvm_mips_build_ret_to_host().
@@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
 
/*
-* A0: run
-* A1: vcpu
+* A0: vcpu
 */
 
/* k0/k1 not being used in host kernel context */
@@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(, V1, K1);
 
/* VCPU scratch register has pointer to vcpu */
-   UASM_i_MTC0(, A1, scratch_vcpu[0], scratch_vcpu[1]);
+   UASM_i_MTC0(, A0, scratch_vcpu[0], scratch_vcpu[1]);
 
/* Offset into vcpu->arch */
-   UASM_i_ADDIU(, K1, A1, offsetof(struct kvm_vcpu, arch));
+   UASM_i_ADDIU(, K1, A0, offsetof(struct kvm_vcpu, arch));
 
/*
 * Save the host stack to VCPU, used for exception processing
@@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
 
/* Restore vcpu */
-   UASM_i_MFC0(, S1, scratch_vcpu[0], scratch_vcpu[1]);
-
-   /* Restore run (vcpu->run) */
-   UASM_i_LW(, S0, offsetof(struct kvm_vcpu, run), S1);
+   UASM_i_MFC0(, S0, scratch_vcpu[0], scratch_vcpu[1]);
 
/*
 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr)
 * with this in the kernel
 */
uasm_i_move(, A0, S0);
-   uasm_i_move(, A1, S1);
UASM_i_LA(, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(, RA, T9);
 UASM_i_ADDIU(, SP, SP, -CALLFRAME_SIZ);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9710477a9827..32850470c037 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void)
 /*
  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index d822f3aee3dc..04c864cc356a 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
 */
kvm_mips_suspend_mm(cpu);
 
-   r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
+   r = vcpu->arch.vcpu_run(vcpu);
 
/* We may have migrated while handling guest exits */
cpu = smp_processor_id();
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 94f1d23828e3..c5878fa0636d 100644
--- a/arch/mips/kvm/vz.c
+++

[PATCH 5/7] KVM: PPC: clean up redundant kvm_run parameters in assembly

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_ppc.h|  2 +-
 arch/powerpc/kvm/book3s_interrupts.S  | 17 -
 arch/powerpc/kvm/book3s_pr.c  |  9 -
 arch/powerpc/kvm/booke.c  |  9 -
 arch/powerpc/kvm/booke_interrupts.S   |  9 -
 arch/powerpc/kvm/bookehv_interrupts.S | 10 +-
 6 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index ccf66b3a4c1d..0a056c64c317 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -59,7 +59,7 @@ enum xlate_readwrite {
 };
 
 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
 extern void kvmppc_handler_highmem(void);
 
 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_interrupts.S 
b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..0eff749d8027 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -55,8 +55,7 @@
  /
 
 /* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
+ *  r3: vcpu pointer
  */
 _GLOBAL(__kvmppc_vcpu_run)
 
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
 
-   /* Save r3 (kvm_run) and r4 (vcpu) */
-   SAVE_2GPRS(3, r1)
+   /* Save r3 (vcpu) */
+   SAVE_GPR(3, r1)
 
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,11 +81,11 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
 
/* Load non-volatile guest state from the vcpu */
-   VCPU_LOAD_NVGPRS(r4)
+   VCPU_LOAD_NVGPRS(r3)
 
 kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
-   mr  r3, r4
+   mr  r4, r3
bl  FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -191,10 +190,10 @@ after_sprg3_load:
PPC_STL r31, VCPU_GPR(R31)(r7)
 
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-   lwz r5, VCPU_TRAP(r7)
+   lwz r4, VCPU_TRAP(r7)
 
-   /* Restore r3 (kvm_run) and r4 (vcpu) */
-   REST_2GPRS(3, r1)
+   /* Restore r3 (vcpu) */
+   REST_GPR(3, r1)
bl  FUNC(kvmppc_handle_exit_pr)
 
/* If RESUME_GUEST, get back in the loop */
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..01c8fe5abe0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1151,9 +1151,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, 
unsigned int exit_nr)
return r;
 }
 
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 {
+   struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
 
@@ -1826,7 +1826,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu 
*vcpu)
 
 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret;
 #ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1833,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1860,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
 
kvmppc_fix_ee_before_entry();
 
-   ret = __kvmppc_vcpu_run(run, vcpu);
+   ret = __kvmppc_vcpu_run(vcpu);
 
kvmppc_clear_debug(vcpu);
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 26b3f5900b72..942039aae598 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -732,12 +732,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 
 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
 {
-   struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
 
if (!vcpu->arch.sane) {
-   run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+   vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
 
@@ -779,7 +778,7 @@ int kvmppc_vcpu_run(st

[PATCH 6/7] KVM: MIPS: clean up redundant 'kvm_run' parameters

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/mips/include/asm/kvm_host.h |  28 +---
 arch/mips/kvm/emulate.c  |  59 ++--
 arch/mips/kvm/mips.c |  11 ++-
 arch/mips/kvm/trap_emul.c| 114 ++-
 arch/mips/kvm/vz.c   |  26 +++
 5 files changed, 87 insertions(+), 151 deletions(-)

diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2c343c346b79..971439297cea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -812,8 +812,8 @@ struct kvm_mips_callbacks {
   const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
-   int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
-   void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+   int (*vcpu_run)(struct kvm_vcpu *vcpu);
+   void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -868,7 +868,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct 
kvm_vcpu *vcpu,
 
 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu,
 bool write_fault);
 
@@ -975,83 +974,67 @@ static inline bool kvm_is_ifetch_fault(struct 
kvm_vcpu_arch *vcpu)
 
 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
   u32 *opc,
-  struct kvm_run *run,
   struct kvm_vcpu *vcpu);
 
 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
  u32 *opc,
- struct kvm_run *run,
  struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc,
-   struct kvm_run *run,
struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
 u32 *opc,
-struct kvm_run *run,
 struct kvm_vcpu *vcpu);
 
 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause

[PATCH 4/7] KVM: PPC: clean up redundant 'kvm_run' parameters

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_book3s.h| 16 +++---
 arch/powerpc/include/asm/kvm_ppc.h   | 27 +
 arch/powerpc/kvm/book3s.c|  4 +-
 arch/powerpc/kvm/book3s.h|  2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  | 12 ++--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |  4 +-
 arch/powerpc/kvm/book3s_emulate.c| 10 ++--
 arch/powerpc/kvm/book3s_hv.c | 60 ++--
 arch/powerpc/kvm/book3s_hv_nested.c  | 11 ++--
 arch/powerpc/kvm/book3s_paired_singles.c | 72 
 arch/powerpc/kvm/book3s_pr.c | 30 +-
 arch/powerpc/kvm/booke.c | 36 ++--
 arch/powerpc/kvm/booke.h |  8 +--
 arch/powerpc/kvm/booke_emulate.c |  2 +-
 arch/powerpc/kvm/e500_emulate.c  | 15 +++--
 arch/powerpc/kvm/emulate.c   | 10 ++--
 arch/powerpc/kvm/emulate_loadstore.c | 32 +--
 arch/powerpc/kvm/powerpc.c   | 72 
 arch/powerpc/kvm/trace_hv.h  |  6 +-
 19 files changed, 212 insertions(+), 217 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h 
b/arch/powerpc/include/asm/kvm_book3s.h
index 506e4df2d730..66dbb1f85d59 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, 
struct kvmppc_pte *pte)
 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong 
seg_size);
 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu, unsigned long addr,
-   unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+   unsigned long addr, unsigned long status);
 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
 
 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache 
*pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
 extern int kvmppc_mmu_hv_init(void);
 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
 
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
-   struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu 
*vcpu, ulong fac);
 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
   bool upper, u32 val);
 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu 
*vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 
dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
  u64 time_limit, unsigned long lpcr);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
   struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
 
 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28

[PATCH 0/7] clean up redundant 'kvm_run' parameters

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

This series of patches has completely cleaned the architecture of
arm64, mips, ppc, and s390 (no such redundant code on x86). Due to
the large number of modified codes, a separate patch is made for each
platform. On the ppc platform, there is also a redundant structure
pointer of 'kvm_run' in 'vcpu_arch', which has also been cleaned
separately.

Thanks and best.

Tianjia Zhang (7):
  KVM: s390: clean up redundant 'kvm_run' parameters
  KVM: arm64: clean up redundant 'kvm_run' parameters
  KVM: PPC: Remove redundant kvm_run from vcpu_arch
  KVM: PPC: clean up redundant 'kvm_run' parameters
  KVM: PPC: clean up redundant kvm_run parameters in assembly
  KVM: MIPS: clean up redundant 'kvm_run' parameters
  KVM: MIPS: clean up redundant kvm_run parameters in assembly

 arch/arm64/include/asm/kvm_coproc.h  |  12 +--
 arch/arm64/include/asm/kvm_host.h|  11 +-
 arch/arm64/include/asm/kvm_mmu.h |   2 +-
 arch/arm64/kvm/handle_exit.c |  36 +++
 arch/arm64/kvm/sys_regs.c|  13 ++-
 arch/mips/include/asm/kvm_host.h |  32 +-
 arch/mips/kvm/emulate.c  |  59 ---
 arch/mips/kvm/entry.c|  15 +--
 arch/mips/kvm/mips.c |  14 +--
 arch/mips/kvm/trap_emul.c| 114 +---
 arch/mips/kvm/vz.c   |  26 ++---
 arch/powerpc/include/asm/kvm_book3s.h|  16 ++-
 arch/powerpc/include/asm/kvm_host.h  |   1 -
 arch/powerpc/include/asm/kvm_ppc.h   |  27 +++--
 arch/powerpc/kvm/book3s.c|   4 +-
 arch/powerpc/kvm/book3s.h|   2 +-
 arch/powerpc/kvm/book3s_64_mmu_hv.c  |  12 +--
 arch/powerpc/kvm/book3s_64_mmu_radix.c   |   4 +-
 arch/powerpc/kvm/book3s_emulate.c|  10 +-
 arch/powerpc/kvm/book3s_hv.c |  64 ++--
 arch/powerpc/kvm/book3s_hv_nested.c  |  12 +--
 arch/powerpc/kvm/book3s_interrupts.S |  17 ++-
 arch/powerpc/kvm/book3s_paired_singles.c |  72 ++---
 arch/powerpc/kvm/book3s_pr.c |  33 +++---
 arch/powerpc/kvm/booke.c |  39 +++
 arch/powerpc/kvm/booke.h |   8 +-
 arch/powerpc/kvm/booke_emulate.c |   2 +-
 arch/powerpc/kvm/booke_interrupts.S  |   9 +-
 arch/powerpc/kvm/bookehv_interrupts.S|  10 +-
 arch/powerpc/kvm/e500_emulate.c  |  15 ++-
 arch/powerpc/kvm/emulate.c   |  10 +-
 arch/powerpc/kvm/emulate_loadstore.c |  32 +++---
 arch/powerpc/kvm/powerpc.c   |  72 ++---
 arch/powerpc/kvm/trace_hv.h  |   6 +-
 arch/s390/kvm/kvm-s390.c | 127 ---
 virt/kvm/arm/arm.c   |   6 +-
 virt/kvm/arm/mmio.c  |  11 +-
 virt/kvm/arm/mmu.c   |   5 +-
 38 files changed, 441 insertions(+), 519 deletions(-)

-- 
2.17.1



[PATCH 2/7] KVM: arm64: clean up redundant 'kvm_run' parameters

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/arm64/include/asm/kvm_coproc.h | 12 +-
 arch/arm64/include/asm/kvm_host.h   | 11 -
 arch/arm64/include/asm/kvm_mmu.h|  2 +-
 arch/arm64/kvm/handle_exit.c| 36 ++---
 arch/arm64/kvm/sys_regs.c   | 13 +--
 virt/kvm/arm/arm.c  |  6 ++---
 virt/kvm/arm/mmio.c | 11 +
 virt/kvm/arm/mmu.c  |  5 ++--
 8 files changed, 46 insertions(+), 50 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_coproc.h 
b/arch/arm64/include/asm/kvm_coproc.h
index 0185ee8b8b5e..454373704b8a 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
 void kvm_register_target_sys_reg_table(unsigned int target,
   struct kvm_sys_reg_target_table *table);
 
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
+int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
 
 #define kvm_coproc_table_init kvm_sys_reg_table_init
 void kvm_sys_reg_table_init(void);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..3fab32e4948c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -481,18 +481,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
-   int exception_index);
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
-  int exception_index);
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
 
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
-phys_addr_t fault_ipa);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
+int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
 int kvm_perf_init(void);
 int kvm_perf_teardown(void);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 30b0e8d6b895..2ec7b9bb25d3 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -159,7 +159,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  phys_addr_t pa, unsigned long size, bool writable);
 
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index aacfc55de44c..ec3a66642ea5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -25,7 +25,7 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
 
 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
 {
@@ -33,7 +33,7 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, 
u32 esr)
kvm_inject_vabt(vcpu);
 }
 
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_hvc(struct kvm_vcpu *vcpu)
 {
int ret;
 
@@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
return ret;
 }
 
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int handle_smc(struct kvm_vcpu *vcpu)
 {
/*
 * "If an SMC instruction executed at Non-secure EL1 is
@@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
  * Guest access to FP/ASIMD regis

[PATCH 3/7] KVM: PPC: Remove redundant kvm_run from vcpu_arch

2020-04-19 Thread Tianjia Zhang
The 'kvm_run' field already exists in the 'vcpu' structure, which
is the same structure as the 'kvm_run' in the 'vcpu_arch' and
should be deleted.

Signed-off-by: Tianjia Zhang 
---
 arch/powerpc/include/asm/kvm_host.h | 1 -
 arch/powerpc/kvm/book3s_hv.c| 6 ++
 arch/powerpc/kvm/book3s_hv_nested.c | 3 +--
 3 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1dc63101ffe1..2745ff8faa01 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
 
struct task_struct *run_task;
-   struct kvm_run *kvm_run;
 
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 93493f0cbfe8..413ea2dcb10c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2934,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, 
bool is_master)
 
ret = RESUME_GUEST;
if (vcpu->arch.trap)
-   ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+   ret = kvmppc_handle_exit_hv(vcpu->run, vcpu,
vcpu->arch.run_task);
 
vcpu->arch.ret = ret;
@@ -3920,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
spin_lock(>lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3973,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
-   v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+   v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(>arch.cpu_run);
}
@@ -4049,7 +4048,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
-   vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index dc97e5be76f6..5a3987f3ebf3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
-   r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+   r = kvmhv_run_single_vcpu(vcpu->run, vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
 
/* save L2 state for return */
-- 
2.17.1



[PATCH 1/7] KVM: s390: clean up redundant 'kvm_run' parameters

2020-04-19 Thread Tianjia Zhang
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. Earlier than historical reasons, many kvm-related function
parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time.
This patch does a unified cleanup of these remaining redundant parameters.

Signed-off-by: Tianjia Zhang 
---
 arch/s390/kvm/kvm-s390.c | 127 +--
 1 file changed, 67 insertions(+), 60 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 443af3ead739..cf420d013ba3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4173,24 +4173,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
 }
 
-static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
struct runtime_instr_cb *riccb;
struct gs_cb *gscb;
 
-   riccb = (struct runtime_instr_cb *) _run->s.regs.riccb;
-   gscb = (struct gs_cb *) _run->s.regs.gscb;
-   vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
-   vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
-   if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
-   vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
-   vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
-   vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
-   }
-   if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
-   vcpu->arch.pfault_token = kvm_run->s.regs.pft;
-   vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
-   vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
+   riccb = (struct runtime_instr_cb *) >s.regs.riccb;
+   gscb = (struct gs_cb *) >s.regs.gscb;
+   vcpu->arch.sie_block->gpsw.mask = run->psw_mask;
+   vcpu->arch.sie_block->gpsw.addr = run->psw_addr;
+   if (run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
+   vcpu->arch.sie_block->todpr = run->s.regs.todpr;
+   vcpu->arch.sie_block->pp = run->s.regs.pp;
+   vcpu->arch.sie_block->gbea = run->s.regs.gbea;
+   }
+   if (run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
+   vcpu->arch.pfault_token = run->s.regs.pft;
+   vcpu->arch.pfault_select = run->s.regs.pfs;
+   vcpu->arch.pfault_compare = run->s.regs.pfc;
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
@@ -4198,7 +4199,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 * If userspace sets the riccb (e.g. after migration) to a valid state,
 * we should enable RI here instead of doing the lazy enablement.
 */
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64) &&
riccb->v &&
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
@@ -4209,7 +4210,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 * If userspace sets the gscb (e.g. after migration) to non-zero,
 * we should enable GS here instead of doing the lazy enablement.
 */
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
test_kvm_facility(vcpu->kvm, 133) &&
gscb->gssm &&
!vcpu->arch.gs_enabled) {
@@ -4218,10 +4219,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu->arch.gs_enabled = 1;
}
-   if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+   if ((run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
test_kvm_facility(vcpu->kvm, 82)) {
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
-   vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 
0;
+   vcpu->arch.sie_block->fpf |= run->s.regs.bpbc ? FPF_BPBC : 0;
}
if (MACHINE_HAS_GS) {
preempt_disable();
@@ -4232,45 +4233,47 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
}
if (vcpu->arch.gs_enabled) {
current->thread.gs_cb = (struct gs_cb *)
-   >run->s.regs.gscb;
+   >s.regs.gscb;
restore_gs_cb(curr

[PATCH] KVM: X86: Fix compile error in svm/sev.c

2020-04-19 Thread Tianjia Zhang
The compiler reported the following compilation errors:

arch/x86/kvm/svm/sev.c: In function ‘sev_pin_memory’:
arch/x86/kvm/svm/sev.c:361:3: error: implicit declaration of function
‘release_pages’ [-Werror=implicit-function-declaration]
   release_pages(pages, npinned);
   ^

The reason is that the 'pagemap.h' header file is not included.

Signed-off-by: Tianjia Zhang 
---
 arch/x86/kvm/svm/sev.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0e3fc311d7da..3ef99e87c1db 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "x86.h"
 #include "svm.h"
-- 
2.17.1



Re: [PATCH v2] KVM: Optimize kvm_arch_vcpu_ioctl_run function

2020-04-16 Thread Tianjia Zhang




On 2020/4/16 16:50, Cornelia Huck wrote:

On Thu, 16 Apr 2020 16:45:33 +0800
Tianjia Zhang  wrote:


On 2020/4/16 16:28, Marc Zyngier wrote:

On 2020-04-16 08:03, Vitaly Kuznetsov wrote:

Tianjia Zhang  writes:
  

In earlier versions of kvm, 'kvm_run' is an independent structure
and is not included in the vcpu structure. At present, 'kvm_run'
is already included in the vcpu structure, so the parameter
'kvm_run' is redundant.

This patch simplify the function definition, removes the extra
'kvm_run' parameter, and extract it from the 'kvm_vcpu' structure
if necessary.

Signed-off-by: Tianjia Zhang 
---

v2 change:
   remove 'kvm_run' parameter and extract it from 'kvm_vcpu'

  arch/mips/kvm/mips.c   |  3 ++-
  arch/powerpc/kvm/powerpc.c |  3 ++-
  arch/s390/kvm/kvm-s390.c   |  3 ++-
  arch/x86/kvm/x86.c | 11 ++-
  include/linux/kvm_host.h   |  2 +-
  virt/kvm/arm/arm.c |  6 +++---
  virt/kvm/kvm_main.c    |  2 +-
  7 files changed, 17 insertions(+), 13 deletions(-)



Overall, there is a large set of cleanups to be done when both the vcpu
and the run
structures are passed as parameters at the same time. Just grepping the
tree for
kvm_run is pretty instructive.

      M.


Sorry, it's my mistake, I only compiled the x86 platform, I will submit
patch again.


I think it's completely fine (and even preferable) to do cleanups like
that on top.

[FWIW, I compiled s390 here.]



Very good, I will do a comprehensive cleanup of this type of code.

Thanks,
Tianjia


Re: [PATCH v2] KVM: Optimize kvm_arch_vcpu_ioctl_run function

2020-04-16 Thread Tianjia Zhang




On 2020/4/16 16:28, Marc Zyngier wrote:

On 2020-04-16 08:03, Vitaly Kuznetsov wrote:

Tianjia Zhang  writes:


In earlier versions of kvm, 'kvm_run' is an independent structure
and is not included in the vcpu structure. At present, 'kvm_run'
is already included in the vcpu structure, so the parameter
'kvm_run' is redundant.

This patch simplify the function definition, removes the extra
'kvm_run' parameter, and extract it from the 'kvm_vcpu' structure
if necessary.

Signed-off-by: Tianjia Zhang 
---

v2 change:
  remove 'kvm_run' parameter and extract it from 'kvm_vcpu'

 arch/mips/kvm/mips.c   |  3 ++-
 arch/powerpc/kvm/powerpc.c |  3 ++-
 arch/s390/kvm/kvm-s390.c   |  3 ++-
 arch/x86/kvm/x86.c | 11 ++-
 include/linux/kvm_host.h   |  2 +-
 virt/kvm/arm/arm.c |  6 +++---
 virt/kvm/kvm_main.c    |  2 +-
 7 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8f05dd0a0f4e..ec24adf4857e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -439,8 +439,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct 
kvm_vcpu *vcpu,

 return -ENOIOCTLCMD;
 }

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+    struct kvm_run *run = vcpu->run;
 int r = -EINTR;

 vcpu_load(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index e15166b0a16d..7e24691e138a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1764,8 +1764,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu 
*vcpu, struct kvm_one_reg *reg)

 return r;
 }

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+    struct kvm_run *run = vcpu->run;
 int r;

 vcpu_load(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 19a81024fe16..443af3ead739 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4333,8 +4333,9 @@ static void store_regs(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)

 store_regs_fmt2(vcpu, kvm_run);
 }

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run 
*kvm_run)

+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+    struct kvm_run *kvm_run = vcpu->run;
 int rc;

 if (kvm_run->immediate_exit)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3bf2ecafd027..a0338e86c90f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8707,8 +8707,9 @@ static void kvm_put_guest_fpu(struct kvm_vcpu 
*vcpu)

 trace_kvm_fpu(0);
 }

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run 
*kvm_run)

+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+    struct kvm_run *kvm_run = vcpu->run;
 int r;

 vcpu_load(vcpu);
@@ -8726,18 +8727,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu 
*vcpu, struct kvm_run *kvm_run)

 r = -EAGAIN;
 if (signal_pending(current)) {
 r = -EINTR;
-    vcpu->run->exit_reason = KVM_EXIT_INTR;
+    kvm_run->exit_reason = KVM_EXIT_INTR;
 ++vcpu->stat.signal_exits;
 }
 goto out;
 }

-    if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
+    if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
 r = -EINVAL;
 goto out;
 }

-    if (vcpu->run->kvm_dirty_regs) {
+    if (kvm_run->kvm_dirty_regs) {
 r = sync_regs(vcpu);
 if (r != 0)
 goto out;
@@ -8767,7 +8768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu 
*vcpu, struct kvm_run *kvm_run)


 out:
 kvm_put_guest_fpu(vcpu);
-    if (vcpu->run->kvm_valid_regs)
+    if (kvm_run->kvm_valid_regs)
 store_regs(vcpu);
 post_kvm_run_save(vcpu);
 kvm_sigset_deactivate(vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6d58beb65454..1e17ef719595 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -866,7 +866,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct 
kvm_vcpu *vcpu,

 struct kvm_mp_state *mp_state);
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run 
*kvm_run);

+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);

 int kvm_arch_init(void *opaque);
 void kvm_arch_exit(void);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 48d0ec44ad77..f5390ac2165b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -639,7 +639,6 @@ static void check_vcpu_requests(struct kvm_vcpu 
*vcpu)

 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute 
guest code

  * @vcpu:    The VCPU pointer
- * @run:    The kvm_run structure pointer used for userspace state 
exchange

  *
  * This function is called through the 

[PATCH v2] KVM: Optimize kvm_arch_vcpu_ioctl_run function

2020-04-15 Thread Tianjia Zhang
In earlier versions of kvm, 'kvm_run' is an independent structure
and is not included in the vcpu structure. At present, 'kvm_run'
is already included in the vcpu structure, so the parameter
'kvm_run' is redundant.

This patch simplify the function definition, removes the extra
'kvm_run' parameter, and extract it from the 'kvm_vcpu' structure
if necessary.

Signed-off-by: Tianjia Zhang 
---

v2 change:
  remove 'kvm_run' parameter and extract it from 'kvm_vcpu'

 arch/mips/kvm/mips.c   |  3 ++-
 arch/powerpc/kvm/powerpc.c |  3 ++-
 arch/s390/kvm/kvm-s390.c   |  3 ++-
 arch/x86/kvm/x86.c | 11 ++-
 include/linux/kvm_host.h   |  2 +-
 virt/kvm/arm/arm.c |  6 +++---
 virt/kvm/kvm_main.c|  2 +-
 7 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8f05dd0a0f4e..ec24adf4857e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -439,8 +439,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu 
*vcpu,
return -ENOIOCTLCMD;
 }
 
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
int r = -EINTR;
 
vcpu_load(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index e15166b0a16d..7e24691e138a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1764,8 +1764,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
return r;
 }
 
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *run = vcpu->run;
int r;
 
vcpu_load(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 19a81024fe16..443af3ead739 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4333,8 +4333,9 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
store_regs_fmt2(vcpu, kvm_run);
 }
 
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
int rc;
 
if (kvm_run->immediate_exit)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3bf2ecafd027..a0338e86c90f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8707,8 +8707,9 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
trace_kvm_fpu(0);
 }
 
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
+   struct kvm_run *kvm_run = vcpu->run;
int r;
 
vcpu_load(vcpu);
@@ -8726,18 +8727,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
r = -EAGAIN;
if (signal_pending(current)) {
r = -EINTR;
-   vcpu->run->exit_reason = KVM_EXIT_INTR;
+   kvm_run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
goto out;
}
 
-   if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
+   if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
r = -EINVAL;
goto out;
}
 
-   if (vcpu->run->kvm_dirty_regs) {
+   if (kvm_run->kvm_dirty_regs) {
r = sync_regs(vcpu);
if (r != 0)
goto out;
@@ -8767,7 +8768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 
 out:
kvm_put_guest_fpu(vcpu);
-   if (vcpu->run->kvm_valid_regs)
+   if (kvm_run->kvm_valid_regs)
store_regs(vcpu);
post_kvm_run_save(vcpu);
kvm_sigset_deactivate(vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6d58beb65454..1e17ef719595 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -866,7 +866,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
 
 int kvm_arch_init(void *opaque);
 void kvm_arch_exit(void);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 48d0ec44ad77..f5390ac2165b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -639,7 +639,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:  The VCPU pointer
-