Clean up a handful of checkpatch warnings:
- WARNING: Prefer 'long' over 'long int' as the int is unnecessary
- WARNING: Missing a blank line after declarations
- WARNING: Prefer 'unsigned int *' to bare use of 'unsigned *'
- WARNING: unnecessary whitespace before a quoted newline
- WARNING: please, no spaces at the start of a line
- WARNING: Comparisons should place the constant on the right
side of the test

Signed-off-by: XueBing Chen <chenxueb...@jari.cn>
---
 arch/powerpc/kvm/book3s_hv_nested.c | 21 ++++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_nested.c 
b/arch/powerpc/kvm/book3s_hv_nested.c
index 9d373f8963ee..af56958b0a28 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -288,7 +288,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
 
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 {
-       long int err, r;
+       long err, r;
        struct kvm_nested_guest *l2;
        struct pt_regs l2_regs, saved_l1_regs;
        struct hv_guest_state l2_hv = {0}, saved_l1_hv;
@@ -430,7 +430,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 
 long kvmhv_nested_init(void)
 {
-       long int ptb_order;
+       long ptb_order;
        unsigned long ptcr;
        long rc;
 
@@ -646,6 +646,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest 
*gp)
        ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
        if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
                int srcu_idx = srcu_read_lock(&kvm->srcu);
+
                ret = kvm_read_guest(kvm, ptbl_addr,
                                     &ptbl_entry, sizeof(ptbl_entry));
                srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -849,7 +850,7 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct 
kvm *kvm, int lpid)
 }
 
 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
-                                unsigned long ea, unsigned *hshift)
+                                unsigned long ea, unsigned int *hshift)
 {
        struct kvm_nested_guest *gp;
        pte_t *pte;
@@ -859,7 +860,7 @@ pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned 
long lpid,
                return NULL;
 
        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
-               "%s called with kvm mmu_lock not held \n", __func__);
+               "%s called with kvm mmu_lock not held\n", __func__);
        pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
 
        return pte;
@@ -1003,6 +1004,7 @@ void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
 
        for (; gfn < end_gfn; gfn++) {
                unsigned long *rmap = &memslot->arch.rmap[gfn];
+
                kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
        }
 }
@@ -1475,7 +1477,7 @@ static inline int kvmppc_radix_shift_to_level(int shift)
 }
 
 /* called with gp->tlb_lock held */
-static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
+static long __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
                                          struct kvm_nested_guest *gp)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1491,7 +1493,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu 
*vcpu,
        unsigned int shift, l1_shift, level;
        bool writing = !!(dsisr & DSISR_ISSTORE);
        bool kvm_ro = false;
-       long int ret;
+       long ret;
 
        if (!gp->l1_gr_to_hr) {
                kvmhv_update_ptbl_cache(gp);
@@ -1614,7 +1616,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu 
*vcpu,
        if (shift > l1_shift) {
                u64 mask;
                unsigned int actual_shift = PAGE_SHIFT;
-               if (PMD_SHIFT < l1_shift)
+
+               if (l1_shift > PMD_SHIFT)
                        actual_shift = PMD_SHIFT;
                mask = (1UL << shift) - (1UL << actual_shift);
                pte = __pte(pte_val(pte) | (gpa & mask));
@@ -1644,10 +1647,10 @@ static long int __kvmhv_nested_page_fault(struct 
kvm_vcpu *vcpu,
        return RESUME_GUEST;
 }
 
-long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
+long kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
 {
        struct kvm_nested_guest *gp = vcpu->arch.nested;
-       long int ret;
+       long ret;
 
        mutex_lock(&gp->tlb_lock);
        ret = __kvmhv_nested_page_fault(vcpu, gp);
-- 
2.36.1

Reply via email to