Make arch_specific_insn.boostable to boolean, since it has
only 2 states, boostable or not. So it is better to use
boolean from the viewpoint of code readability.

Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
---
 arch/x86/include/asm/kprobes.h   |    7 +++----
 arch/x86/kernel/kprobes/core.c   |   12 ++++++------
 arch/x86/kernel/kprobes/ftrace.c |    2 +-
 3 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 2005816..34b984c 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -72,14 +72,13 @@ struct arch_specific_insn {
        /* copy of the original instruction */
        kprobe_opcode_t *insn;
        /*
-        * boostable = -1: This instruction type is not boostable.
-        * boostable = 0: This instruction type is boostable.
-        * boostable = 1: This instruction has been boosted: we have
+        * boostable = false: This instruction type is not boostable.
+        * boostable = true: This instruction has been boosted: we have
         * added a relative jump after the instruction copy in insn,
         * so no single-step and fixup are needed (unless there's
         * a post_handler or break_handler).
         */
-       int boostable;
+       bool boostable;
        bool if_modifier;
 };
 
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a654054..3f084a0 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -409,9 +409,9 @@ static void prepare_boost(struct kprobe *p, int length)
                 * jumps back to correct address.
                 */
                synthesize_reljump(p->ainsn.insn + length, p->addr + length);
-               p->ainsn.boostable = 1;
+               p->ainsn.boostable = true;
        } else {
-               p->ainsn.boostable = -1;
+               p->ainsn.boostable = false;
        }
 }
 
@@ -467,7 +467,7 @@ void arch_disarm_kprobe(struct kprobe *p)
 void arch_remove_kprobe(struct kprobe *p)
 {
        if (p->ainsn.insn) {
-               free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
+               free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
                p->ainsn.insn = NULL;
        }
 }
@@ -539,7 +539,7 @@ static void setup_singlestep(struct kprobe *p, struct 
pt_regs *regs,
                return;
 
 #if !defined(CONFIG_PREEMPT)
-       if (p->ainsn.boostable == 1 && !p->post_handler) {
+       if (p->ainsn.boostable && !p->post_handler) {
                /* Boost up -- we can execute copied instructions directly */
                if (!reenter)
                        reset_current_kprobe();
@@ -859,7 +859,7 @@ static void resume_execution(struct kprobe *p, struct 
pt_regs *regs,
        case 0xcf:
        case 0xea:      /* jmp absolute -- ip is correct */
                /* ip is already adjusted, no more changes required */
-               p->ainsn.boostable = 1;
+               p->ainsn.boostable = true;
                goto no_change;
        case 0xe8:      /* call relative - Fix return addr */
                *tos = orig_ip + (*tos - copy_ip);
@@ -884,7 +884,7 @@ static void resume_execution(struct kprobe *p, struct 
pt_regs *regs,
                         * jmp near and far, absolute indirect
                         * ip is correct. And this is boostable
                         */
-                       p->ainsn.boostable = 1;
+                       p->ainsn.boostable = true;
                        goto no_change;
                }
        default:
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 5f8f0b3..041f7b6 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -94,6 +94,6 @@ NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 int arch_prepare_kprobe_ftrace(struct kprobe *p)
 {
        p->ainsn.insn = NULL;
-       p->ainsn.boostable = -1;
+       p->ainsn.boostable = false;
        return 0;
 }

Reply via email to