The following commit has been merged into the x86/cleanups branch of tip:

Commit-ID:     32b1cbe380417f2ed80f758791179de6b05795ab
Gitweb:        
https://git.kernel.org/tip/32b1cbe380417f2ed80f758791179de6b05795ab
Author:        Marco Ammon <[email protected]>
AuthorDate:    Mon, 02 Sep 2019 14:02:59 +02:00
Committer:     Borislav Petkov <[email protected]>
CommitterDate: Mon, 02 Sep 2019 14:02:59 +02:00

x86: Correct misc typos

Correct spelling typos in comments in different files under arch/x86/.

 [ bp: Merge into a single patch, massage. ]

Signed-off-by: Marco Ammon <[email protected]>
Signed-off-by: Borislav Petkov <[email protected]>
Cc: Daniel Bristot de Oliveira <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Juergen Gross <[email protected]>
Cc: Masami Hiramatsu <[email protected]>
Cc: Nadav Amit <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Pu Wen <[email protected]>
Cc: Rick Edgecombe <[email protected]>
Cc: "Steven Rostedt (VMware)" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: x86-ml <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
 arch/x86/include/asm/text-patching.h | 4 ++--
 arch/x86/kernel/alternative.c        | 6 +++---
 arch/x86/kernel/kprobes/opt.c        | 2 +-
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/text-patching.h 
b/arch/x86/include/asm/text-patching.h
index 70c0996..5e8319b 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -45,8 +45,8 @@ extern void text_poke_early(void *addr, const void *opcode, 
size_t len);
  * no thread can be preempted in the instructions being modified (no iret to an
  * invalid instruction possible) or if the instructions are changed from a
  * consistent state to another consistent state atomically.
- * On the local CPU you need to be protected again NMI or MCE handlers seeing 
an
- * inconsistent instruction while you patch.
+ * On the local CPU you need to be protected against NMI or MCE handlers seeing
+ * an inconsistent instruction while you patch.
  */
 extern void *text_poke(void *addr, const void *opcode, size_t len);
 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ccd3201..9d3a971 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -713,7 +713,7 @@ void __init alternative_instructions(void)
         * Don't stop machine check exceptions while patching.
         * MCEs only happen when something got corrupted and in this
         * case we must do something about the corruption.
-        * Ignoring it is worse than a unlikely patching race.
+        * Ignoring it is worse than an unlikely patching race.
         * Also machine checks tend to be broadcast and if one CPU
         * goes into machine check the others follow quickly, so we don't
         * expect a machine check to cause undue problems during to code
@@ -753,8 +753,8 @@ void __init alternative_instructions(void)
  * When you use this code to patch more than one byte of an instruction
  * you need to make sure that other CPUs cannot execute this code in parallel.
  * Also no thread must be currently preempted in the middle of these
- * instructions. And on the local CPU you need to be protected again NMI or MCE
- * handlers seeing an inconsistent instruction while you patch.
+ * instructions. And on the local CPU you need to be protected against NMI or
+ * MCE handlers seeing an inconsistent instruction while you patch.
  */
 void __init_or_module text_poke_early(void *addr, const void *opcode,
                                      size_t len)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 9d4aede..b348dd5 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -403,7 +403,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op,
                           (u8 *)op->kp.addr + op->optinsn.size);
        len += RELATIVEJUMP_SIZE;
 
-       /* We have to use text_poke for instuction buffer because it is RO */
+       /* We have to use text_poke() for instruction buffer because it is RO */
        text_poke(slot, buf, len);
        ret = 0;
 out:

Reply via email to