From: Grygorii Strashko <[email protected]>

Xen uses below pattern for raw_x_guest() functions:

define raw_copy_to_guest(dst, src, len)        \
    (is_hvm_vcpu(current) ?                     \
     copy_to_user_hvm((dst), (src), (len)) :    \
     copy_to_guest_pv(dst, src, len))

This pattern works depending on CONFIG_PV/CONFIG_HVM as:
- PV=y and HVM=y
  Proper guest access function is selected depending on domain type.
- PV=y and HVM=n
  Only PV domains are possible. is_hvm_domain/vcpu() will constify to "false"
  and compiler will optimize code and skip HVM specific part.
- PV=n and HVM=y
  Only HVM domains are possible. is_hvm_domain/vcpu() will not be constified.
  No PV specific code will be optimized by compiler.
- PV=n and HVM=n
  No guests should possible. The code will still follow PV path.

Rework raw_x_guest() code to use static inline functions which account for
above PV/HVM possible configurations with main intention to optimize code
for (PV=n and HVM=y) case.

For the case (PV=n and HVM=n) return "len" value indicating a failure (no
guests should be possible in this case, which means no access to guest
memory should ever happen).

The measured (bloat-o-meter) improvement for (PV=n and HVM=y) case is about 
-11K.

[[email protected]: Suggested to use static inline functions vs
macro combinations]
Suggested-by: Teddy Astie <[email protected]>
Signed-off-by: Grygorii Strashko <[email protected]>
---
changes in v5:
- rebase
- drop moving usercopy.c as it is not needed since commit 7370966d1cb7 
  ("x86: move / split usercopy.c to / into arch-specific library")

changes in v4:
- move usercopy.c into arch/x86/pv/
- rework to always dynamically check for HVM vcpu(domain) by using is_hvm_vcpu()
  as requested by Jan Beulich

changes in v3:
- add raw_use_hvm_access() wrapper

changes in v2:
- use static inline functions instead of macro combinations

 xen/arch/x86/include/asm/guest_access.h | 78 ++++++++++++++++++-------
 1 file changed, 58 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/include/asm/guest_access.h 
b/xen/arch/x86/include/asm/guest_access.h
index 69716c8b41bb..f0e56b112e14 100644
--- a/xen/arch/x86/include/asm/guest_access.h
+++ b/xen/arch/x86/include/asm/guest_access.h
@@ -13,26 +13,64 @@
 #include <asm/hvm/guest_access.h>
 
 /* Raw access functions: no type checking. */
-#define raw_copy_to_guest(dst, src, len)        \
-    (is_hvm_vcpu(current) ?                     \
-     copy_to_user_hvm((dst), (src), (len)) :    \
-     copy_to_guest_pv(dst, src, len))
-#define raw_copy_from_guest(dst, src, len)      \
-    (is_hvm_vcpu(current) ?                     \
-     copy_from_user_hvm((dst), (src), (len)) :  \
-     copy_from_guest_pv(dst, src, len))
-#define raw_clear_guest(dst,  len)              \
-    (is_hvm_vcpu(current) ?                     \
-     clear_user_hvm((dst), (len)) :             \
-     clear_guest_pv(dst, len))
-#define __raw_copy_to_guest(dst, src, len)      \
-    (is_hvm_vcpu(current) ?                     \
-     copy_to_user_hvm((dst), (src), (len)) :    \
-     __copy_to_guest_pv(dst, src, len))
-#define __raw_copy_from_guest(dst, src, len)    \
-    (is_hvm_vcpu(current) ?                     \
-     copy_from_user_hvm((dst), (src), (len)) :  \
-     __copy_from_guest_pv(dst, src, len))
+static inline unsigned int raw_copy_to_guest(void *dst, const void *src,
+                                             unsigned int len)
+{
+    if ( is_hvm_vcpu(current) )
+        return copy_to_user_hvm(dst, src, len);
+
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return len;
+
+    return copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_copy_from_guest(void *dst, const void *src,
+                                               unsigned int len)
+{
+    if ( is_hvm_vcpu(current) )
+        return copy_from_user_hvm(dst, src, len);
+
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return len;
+
+    return copy_from_guest_pv(dst, src, len);
+}
+
+static inline unsigned int raw_clear_guest(void *dst, unsigned int len)
+{
+    if ( is_hvm_vcpu(current) )
+        return clear_user_hvm(dst, len);
+
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return len;
+
+    return clear_guest_pv(dst, len);
+}
+
+static inline unsigned int __raw_copy_to_guest(void *dst, const void *src,
+                                               unsigned int len)
+{
+    if ( is_hvm_vcpu(current) )
+        return copy_to_user_hvm(dst, src, len);
+
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return len;
+
+    return __copy_to_guest_pv(dst, src, len);
+}
+
+static inline unsigned int __raw_copy_from_guest(void *dst, const void *src,
+                                                 unsigned int len)
+{
+    if ( is_hvm_vcpu(current) )
+        return copy_from_user_hvm(dst, src, len);
+
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return len;
+
+    return __copy_from_guest_pv(dst, src, len);
+}
 
 /*
  * Pre-validate a guest handle.
-- 
2.34.1

Reply via email to