Module Name:    src
Committed By:   msaitoh
Date:           Tue Apr 11 10:07:12 UTC 2023

Modified Files:
        src/sys/dev/tprof: tprof.c tprof_armv7.c tprof_armv8.c tprof_types.h
            tprof_x86_amd.c tprof_x86_intel.c

Log Message:
KNF. No functional change.


To generate a diff of this commit:
cvs rdiff -u -r1.22 -r1.23 src/sys/dev/tprof/tprof.c
cvs rdiff -u -r1.12 -r1.13 src/sys/dev/tprof/tprof_armv7.c
cvs rdiff -u -r1.19 -r1.20 src/sys/dev/tprof/tprof_armv8.c
cvs rdiff -u -r1.6 -r1.7 src/sys/dev/tprof/tprof_types.h
cvs rdiff -u -r1.7 -r1.8 src/sys/dev/tprof/tprof_x86_amd.c \
    src/sys/dev/tprof/tprof_x86_intel.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/tprof/tprof.c
diff -u src/sys/dev/tprof/tprof.c:1.22 src/sys/dev/tprof/tprof.c:1.23
--- src/sys/dev/tprof/tprof.c:1.22	Fri Dec 16 17:38:56 2022
+++ src/sys/dev/tprof/tprof.c	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: tprof.c,v 1.22 2022/12/16 17:38:56 ryo Exp $	*/
+/*	$NetBSD: tprof.c,v 1.23 2023/04/11 10:07:12 msaitoh Exp $	*/
 
 /*-
  * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.22 2022/12/16 17:38:56 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.23 2023/04/11 10:07:12 msaitoh Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -213,7 +213,7 @@ tprof_worker(struct work *wk, void *dumm
 	KASSERT(dummy == NULL);
 
 	/*
-	 * get a per cpu buffer.
+	 * Get a per cpu buffer.
 	 */
 	buf = tprof_buf_refresh();
 
@@ -245,12 +245,11 @@ tprof_worker(struct work *wk, void *dumm
 		tprof_stat.ts_dropbuf++;
 	}
 	mutex_exit(&tprof_lock);
-	if (buf) {
+	if (buf)
 		tprof_buf_free(buf);
-	}
-	if (!shouldstop) {
+
+	if (!shouldstop)
 		callout_schedule(&c->c_callout, hz / 8);
-	}
 }
 
 static void
@@ -276,9 +275,9 @@ tprof_stop1(void)
 		tprof_buf_t *old;
 
 		old = tprof_buf_switch(c, NULL);
-		if (old != NULL) {
+		if (old != NULL)
 			tprof_buf_free(old);
-		}
+
 		callout_destroy(&c->c_callout);
 	}
 	workqueue_destroy(tprof_wq);
@@ -293,9 +292,8 @@ tprof_getinfo(struct tprof_info *info)
 
 	memset(info, 0, sizeof(*info));
 	info->ti_version = TPROF_VERSION;
-	if ((tb = tprof_backend) != NULL) {
+	if ((tb = tprof_backend) != NULL)
 		info->ti_ident = tb->tb_ops->tbo_ident();
-	}
 }
 
 static int
@@ -351,8 +349,8 @@ tprof_start(tprof_countermask_t runmask)
 	runmask &= tb->tb_softc.sc_ctr_configured_mask;
 	if (runmask == 0) {
 		/*
-		 * targets are already running.
-		 * unconfigured counters are ignored.
+		 * Targets are already running.
+		 * Unconfigured counters are ignored.
 		 */
 		error = 0;
 		goto done;
@@ -427,7 +425,7 @@ tprof_stop(tprof_countermask_t stopmask)
 	KASSERT(mutex_owned(&tprof_startstop_lock));
 	stopmask &= tb->tb_softc.sc_ctr_running_mask;
 	if (stopmask == 0) {
-		/* targets are not running */
+		/* Targets are not running */
 		goto done;
 	}
 
@@ -437,13 +435,13 @@ tprof_stop(tprof_countermask_t stopmask)
 	tb->tb_softc.sc_ctr_running_mask &= ~stopmask;
 	mutex_exit(&tprof_lock);
 
-	/* all counters have stopped? */
+	/* All counters have stopped? */
 	if (tb->tb_softc.sc_ctr_running_mask == 0) {
 		mutex_enter(&tprof_lock);
 		cv_broadcast(&tprof_reader_cv);
-		while (tprof_nworker > 0) {
+		while (tprof_nworker > 0)
 			cv_wait(&tprof_cv, &tprof_lock);
-		}
+
 		mutex_exit(&tprof_lock);
 
 		tprof_stop1();
@@ -516,7 +514,7 @@ tprof_configure_event(const tprof_param_
 	    tb->tb_ops->tbo_counter_bitwidth(param->p_counter);
 
 	sc_param = &sc->sc_count[c].ctr_param;
-	memcpy(sc_param, param, sizeof(*sc_param));	/* save copy of param */
+	memcpy(sc_param, param, sizeof(*sc_param)); /* save copy of param */
 
 	if (ISSET(param->p_flags, TPROF_PARAM_PROFILE)) {
 		uint64_t freq, inum, dnum;
@@ -618,9 +616,8 @@ tprof_getcounts_cpu(void *arg1, void *ar
 			counters[c] = counters_offset[c] +
 			    ((ctr - sc->sc_count[c].ctr_counter_reset_val) &
 			    __BITS(sc->sc_count[c].ctr_bitwidth - 1, 0));
-		} else {
+		} else
 			counters[c] = 0;
-		}
 	}
 	percpu_putref(sc->sc_ctr_offset_percpu);
 }
@@ -741,9 +738,8 @@ tprof_backend_register(const char *name,
 {
 	tprof_backend_t *tb;
 
-	if (vers != TPROF_BACKEND_VERSION) {
+	if (vers != TPROF_BACKEND_VERSION)
 		return EINVAL;
-	}
 
 	mutex_enter(&tprof_startstop_lock);
 	tb = tprof_backend_lookup(name);
@@ -768,7 +764,7 @@ tprof_backend_register(const char *name,
 #endif
 	mutex_exit(&tprof_startstop_lock);
 
-	/* init backend softc */
+	/* Init backend softc */
 	tb->tb_softc.sc_ncounters = tb->tb_ops->tbo_ncounters();
 	tb->tb_softc.sc_ctr_offset_percpu_size =
 	    sizeof(uint64_t) * tb->tb_softc.sc_ncounters;
@@ -800,9 +796,8 @@ tprof_backend_unregister(const char *nam
 		return EBUSY;
 	}
 #if 1 /* XXX for now */
-	if (tprof_backend == tb) {
+	if (tprof_backend == tb)
 		tprof_backend = NULL;
-	}
 #endif
 	LIST_REMOVE(tb, tb_list);
 	mutex_exit(&tprof_startstop_lock);
@@ -811,7 +806,7 @@ tprof_backend_unregister(const char *nam
 	percpu_free(tb->tb_softc.sc_ctr_offset_percpu,
 	    tb->tb_softc.sc_ctr_offset_percpu_size);
 
-	/* free backend */
+	/* Free backend */
 	kmem_free(tb, sizeof(*tb));
 
 	return 0;
@@ -823,9 +818,9 @@ static int
 tprof_open(dev_t dev, int flags, int type, struct lwp *l)
 {
 
-	if (minor(dev) != 0) {
+	if (minor(dev) != 0)
 		return EXDEV;
-	}
+
 	mutex_enter(&tprof_lock);
 	if (tprof_owner != NULL) {
 		mutex_exit(&tprof_lock);
@@ -953,7 +948,7 @@ tprof_read(dev_t dev, struct uio *uio, i
 	mutex_enter(&tprof_reader_lock);
 	while (uio->uio_resid > 0 && error == 0) {
 		/*
-		 * take the first buffer from the list.
+		 * Take the first buffer from the list.
 		 */
 		mutex_enter(&tprof_lock);
 		buf = STAILQ_FIRST(&tprof_list);
@@ -975,7 +970,7 @@ tprof_read(dev_t dev, struct uio *uio, i
 		mutex_exit(&tprof_lock);
 
 		/*
-		 * copy it out.
+		 * Copy it out.
 		 */
 		bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
 		    tprof_reader_offset, uio->uio_resid);
@@ -986,7 +981,7 @@ tprof_read(dev_t dev, struct uio *uio, i
 		tprof_reader_offset += done;
 
 		/*
-		 * if we didn't consume the whole buffer,
+		 * If we didn't consume the whole buffer,
 		 * put it back to the list.
 		 */
 		if (tprof_reader_offset <
@@ -1080,7 +1075,7 @@ void
 tprofattach(int nunits)
 {
 
-	/* nothing */
+	/* Nothing */
 }
 
 MODULE(MODULE_CLASS_DRIVER, tprof, NULL);

Index: src/sys/dev/tprof/tprof_armv7.c
diff -u src/sys/dev/tprof/tprof_armv7.c:1.12 src/sys/dev/tprof/tprof_armv7.c:1.13
--- src/sys/dev/tprof/tprof_armv7.c:1.12	Thu Dec 22 06:59:32 2022
+++ src/sys/dev/tprof/tprof_armv7.c	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: tprof_armv7.c,v 1.12 2022/12/22 06:59:32 ryo Exp $ */
+/* $NetBSD: tprof_armv7.c,v 1.13 2023/04/11 10:07:12 msaitoh Exp $ */
 
 /*-
  * Copyright (c) 2018 Jared McNeill <jmcne...@invisible.ca>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tprof_armv7.c,v 1.12 2022/12/22 06:59:32 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tprof_armv7.c,v 1.13 2023/04/11 10:07:12 msaitoh Exp $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -117,7 +117,7 @@ armv7_pmu_get_pmevcntr(u_int counter)
 	return armreg_pmxevcntr_read();
 }
 
-/* read and write at once */
+/* Read and write at once */
 static inline uint64_t
 armv7_pmu_getset_pmevcntr(u_int counter, uint64_t val)
 {
@@ -190,7 +190,7 @@ armv7_pmu_configure_event(u_int counter,
 	/* Clear overflow flag */
 	armreg_pmovsr_write(__BIT(counter) & PMOVS_P);
 
-	/* reset the counter */
+	/* Reset the counter */
 	armv7_pmu_set_pmevcntr(counter, param->p_value);
 }
 
@@ -235,13 +235,13 @@ armv7_pmu_intr(void *priv)
 		CLR(mask, __BIT(bit));
 
 		if (ISSET(sc->sc_ctr_prof_mask, __BIT(bit))) {
-			/* account for the counter, and reset */
+			/* Account for the counter, and reset */
 			uint64_t ctr = armv7_pmu_getset_pmevcntr(bit,
 			    sc->sc_count[bit].ctr_counter_reset_val);
 			counters_offset[bit] +=
 			    sc->sc_count[bit].ctr_counter_val + ctr;
 
-			/* record a sample */
+			/* Record a sample */
 			tfi.tfi_pc = tf->tf_pc;
 			tfi.tfi_counter = bit;
 			tfi.tfi_inkernel =
@@ -249,7 +249,7 @@ armv7_pmu_intr(void *priv)
 			    tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS;
 			tprof_sample(NULL, &tfi);
 		} else if (ISSET(sc->sc_ctr_ovf_mask, __BIT(bit))) {
-			/* counter has overflowed */
+			/* Counter has overflowed */
 			counters_offset[bit] += __BIT(32);
 		}
 	}

Index: src/sys/dev/tprof/tprof_armv8.c
diff -u src/sys/dev/tprof/tprof_armv8.c:1.19 src/sys/dev/tprof/tprof_armv8.c:1.20
--- src/sys/dev/tprof/tprof_armv8.c:1.19	Thu Dec 22 06:59:32 2022
+++ src/sys/dev/tprof/tprof_armv8.c	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: tprof_armv8.c,v 1.19 2022/12/22 06:59:32 ryo Exp $ */
+/* $NetBSD: tprof_armv8.c,v 1.20 2023/04/11 10:07:12 msaitoh Exp $ */
 
 /*-
  * Copyright (c) 2018 Jared McNeill <jmcne...@invisible.ca>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tprof_armv8.c,v 1.19 2022/12/22 06:59:32 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tprof_armv8.c,v 1.20 2023/04/11 10:07:12 msaitoh Exp $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -94,7 +94,7 @@ armv8_pmu_get_pmevcntr(u_int counter)
 	return reg_pmxevcntr_el0_read();
 }
 
-/* read and write at once */
+/* Read and write at once */
 static inline uint64_t
 armv8_pmu_getset_pmevcntr(u_int counter, uint64_t val)
 {
@@ -162,7 +162,7 @@ armv8_pmu_configure_event(u_int counter,
 	/* Clear overflow flag */
 	reg_pmovsclr_el0_write(__BIT(counter) & PMOVS_P);
 
-	/* reset the counter */
+	/* Reset the counter */
 	armv8_pmu_set_pmevcntr(counter, param->p_value);
 }
 
@@ -207,13 +207,13 @@ armv8_pmu_intr(void *priv)
 		CLR(mask, __BIT(bit));
 
 		if (ISSET(sc->sc_ctr_prof_mask, __BIT(bit))) {
-			/* account for the counter, and reset */
+			/* Account for the counter, and reset */
 			uint64_t ctr = armv8_pmu_getset_pmevcntr(bit,
 			    sc->sc_count[bit].ctr_counter_reset_val);
 			counters_offset[bit] +=
 			    sc->sc_count[bit].ctr_counter_val + ctr;
 
-			/* record a sample */
+			/* Record a sample */
 			tfi.tfi_pc = tf->tf_pc;
 			tfi.tfi_counter = bit;
 			tfi.tfi_inkernel =
@@ -221,7 +221,7 @@ armv8_pmu_intr(void *priv)
 			    tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS;
 			tprof_sample(NULL, &tfi);
 		} else if (ISSET(sc->sc_ctr_ovf_mask, __BIT(bit))) {
-			/* counter has overflowed */
+			/* Counter has overflowed */
 			counters_offset[bit] += __BIT(32);
 		}
 	}

Index: src/sys/dev/tprof/tprof_types.h
diff -u src/sys/dev/tprof/tprof_types.h:1.6 src/sys/dev/tprof/tprof_types.h:1.7
--- src/sys/dev/tprof/tprof_types.h:1.6	Thu Dec  1 00:32:52 2022
+++ src/sys/dev/tprof/tprof_types.h	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: tprof_types.h,v 1.6 2022/12/01 00:32:52 ryo Exp $	*/
+/*	$NetBSD: tprof_types.h,v 1.7 2023/04/11 10:07:12 msaitoh Exp $	*/
 
 /*-
  * Copyright (c)2010,2011 YAMAMOTO Takashi,
@@ -76,8 +76,8 @@ typedef struct tprof_param {
 	 *   speed ratio. if the counter is N times slower than the cycle
 	 *   counter, p_value2 is (0x1_0000_0000 / N). 0 is treated as 1.0.
 	 * TPROF_PARAM_VALUE2_TRIGGERCOUNT:
-	 *   When the event counter counts up p_value2, an interrupt for profile
-	 *   is generated. 0 is treated as 1.
+	 *   When the event counter counts up p_value2, an interrupt for
+	 *   profile is generated. 0 is treated as 1.
 	 */
 } tprof_param_t;
 

Index: src/sys/dev/tprof/tprof_x86_amd.c
diff -u src/sys/dev/tprof/tprof_x86_amd.c:1.7 src/sys/dev/tprof/tprof_x86_amd.c:1.8
--- src/sys/dev/tprof/tprof_x86_amd.c:1.7	Thu Dec  8 05:29:27 2022
+++ src/sys/dev/tprof/tprof_x86_amd.c	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: tprof_x86_amd.c,v 1.7 2022/12/08 05:29:27 msaitoh Exp $	*/
+/*	$NetBSD: tprof_x86_amd.c,v 1.8 2023/04/11 10:07:12 msaitoh Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tprof_x86_amd.c,v 1.7 2022/12/08 05:29:27 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tprof_x86_amd.c,v 1.8 2023/04/11 10:07:12 msaitoh Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -154,7 +154,7 @@ tprof_amd_configure_event(u_int counter,
 	    __SHIFTIN(param->p_unit, PESR_UNIT_MASK);
 	wrmsr(PERFEVTSEL(counter), pesr);
 
-	/* reset the counter */
+	/* Reset the counter */
 	tprof_amd_counter_write(counter, param->p_value);
 }
 
@@ -202,13 +202,13 @@ tprof_amd_nmi(const struct trapframe *tf
 			continue;	/* not overflowed */
 
 		if (ISSET(sc->sc_ctr_prof_mask, __BIT(bit))) {
-			/* account for the counter, and reset */
+			/* Account for the counter, and reset */
 			tprof_amd_counter_write(bit,
 			    sc->sc_count[bit].ctr_counter_reset_val);
 			counters_offset[bit] +=
 			    sc->sc_count[bit].ctr_counter_val + ctr;
 
-			/* record a sample */
+			/* Record a sample */
 #if defined(__x86_64__)
 			tfi.tfi_pc = tf->tf_rip;
 #else
@@ -218,7 +218,7 @@ tprof_amd_nmi(const struct trapframe *tf
 			tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS;
 			tprof_sample(NULL, &tfi);
 		} else {
-			/* not profiled, but require to consider overflow */
+			/* Not profiled, but require to consider overflow */
 			counters_offset[bit] += __BIT(COUNTER_BITWIDTH);
 		}
 	}
@@ -237,9 +237,8 @@ tprof_amd_ident(void)
 {
 	struct cpu_info *ci = curcpu();
 
-	if (cpu_vendor != CPUVENDOR_AMD) {
+	if (cpu_vendor != CPUVENDOR_AMD)
 		return TPROF_IDENT_NONE;
-	}
 
 	switch (CPUID_TO_FAMILY(ci->ci_signature)) {
 	case 0x10:
@@ -274,9 +273,8 @@ tprof_amd_establish(tprof_backend_softc_
 {
 	uint64_t xc;
 
-	if (tprof_amd_ident() == TPROF_IDENT_NONE) {
+	if (tprof_amd_ident() == TPROF_IDENT_NONE)
 		return ENOTSUP;
-	}
 
 	KASSERT(amd_nmi_handle == NULL);
 	amd_nmi_handle = nmi_establish(tprof_amd_nmi, sc);
Index: src/sys/dev/tprof/tprof_x86_intel.c
diff -u src/sys/dev/tprof/tprof_x86_intel.c:1.7 src/sys/dev/tprof/tprof_x86_intel.c:1.8
--- src/sys/dev/tprof/tprof_x86_intel.c:1.7	Tue Apr 11 09:53:28 2023
+++ src/sys/dev/tprof/tprof_x86_intel.c	Tue Apr 11 10:07:12 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: tprof_x86_intel.c,v 1.7 2023/04/11 09:53:28 msaitoh Exp $	*/
+/*	$NetBSD: tprof_x86_intel.c,v 1.8 2023/04/11 10:07:12 msaitoh Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tprof_x86_intel.c,v 1.7 2023/04/11 09:53:28 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tprof_x86_intel.c,v 1.8 2023/04/11 10:07:12 msaitoh Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -114,18 +114,21 @@ tprof_intel_ncounters(void)
 static u_int
 tprof_intel_counter_bitwidth(u_int counter)
 {
+
 	return counter_bitwidth;
 }
 
 static inline void
 tprof_intel_counter_write(u_int counter, uint64_t val)
 {
+
 	wrmsr(PERFCTR(counter), val);
 }
 
 static inline uint64_t
 tprof_intel_counter_read(u_int counter)
 {
+
 	return rdmsr(PERFCTR(counter));
 }
 
@@ -142,7 +145,7 @@ tprof_intel_configure_event(u_int counte
 	    PERFEVTSEL_INT;
 	wrmsr(PERFEVTSEL(counter), evtval);
 
-	/* reset the counter */
+	/* Reset the counter */
 	tprof_intel_counter_write(counter, param->p_value);
 }
 
@@ -166,7 +169,8 @@ tprof_intel_stop(tprof_countermask_t sto
 	while ((bit = ffs(stopmask)) != 0) {
 		bit--;
 		CLR(stopmask, __BIT(bit));
-		wrmsr(PERFEVTSEL(bit), rdmsr(PERFEVTSEL(bit)) & ~PERFEVTSEL_EN);
+		wrmsr(PERFEVTSEL(bit), rdmsr(PERFEVTSEL(bit)) &
+		    ~PERFEVTSEL_EN);
 	}
 }
 
@@ -191,13 +195,13 @@ tprof_intel_nmi(const struct trapframe *
 			continue;	/* not overflowed */
 
 		if (ISSET(sc->sc_ctr_prof_mask, __BIT(bit))) {
-			/* account for the counter, and reset */
+			/* Account for the counter, and reset */
 			tprof_intel_counter_write(bit,
 			    sc->sc_count[bit].ctr_counter_reset_val);
 			counters_offset[bit] +=
 			    sc->sc_count[bit].ctr_counter_val + ctr;
 
-			/* record a sample */
+			/* Record a sample */
 #if defined(__x86_64__)
 			tfi.tfi_pc = tf->tf_rip;
 #else
@@ -207,12 +211,12 @@ tprof_intel_nmi(const struct trapframe *
 			tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS;
 			tprof_sample(NULL, &tfi);
 		} else {
-			/* not profiled, but require to consider overflow */
+			/* Not profiled, but require to consider overflow */
 			counters_offset[bit] += __BIT(counter_bitwidth);
 		}
 	}
 
-	/* unmask PMI */
+	/* Unmask PMI */
 	pcint = lapic_readreg(LAPIC_LVT_PCINT);
 	KASSERT((pcint & LAPIC_LVT_MASKED) != 0);
 	lapic_writereg(LAPIC_LVT_PCINT, pcint & ~LAPIC_LVT_MASKED);
@@ -223,6 +227,7 @@ tprof_intel_nmi(const struct trapframe *
 static uint64_t
 tprof_intel_counter_estimate_freq(u_int counter)
 {
+
 	return curcpu()->ci_data.cpu_cc_freq;
 }
 
@@ -231,20 +236,18 @@ tprof_intel_ident(void)
 {
 	uint32_t descs[4];
 
-	if (cpu_vendor != CPUVENDOR_INTEL) {
+	if (cpu_vendor != CPUVENDOR_INTEL)
 		return TPROF_IDENT_NONE;
-	}
 
-	if (cpuid_level < 0x0A) {
+	if (cpuid_level < 0x0a)
 		return TPROF_IDENT_NONE;
-	}
-	x86_cpuid(0x0A, descs);
-	if ((descs[0] & CPUID_PERF_VERSION) == 0) {
+
+	x86_cpuid(0x0a, descs);
+	if ((descs[0] & CPUID_PERF_VERSION) == 0)
 		return TPROF_IDENT_NONE;
-	}
-	if ((descs[0] & CPUID_PERF_NGPPC) == 0) {
+
+	if ((descs[0] & CPUID_PERF_NGPPC) == 0)
 		return TPROF_IDENT_NONE;
-	}
 
 	counter_bitwidth = __SHIFTOUT(descs[0], CPUID_PERF_NBWGPPC);
 
@@ -273,9 +276,8 @@ tprof_intel_establish(tprof_backend_soft
 {
 	uint64_t xc;
 
-	if (tprof_intel_ident() == TPROF_IDENT_NONE) {
+	if (tprof_intel_ident() == TPROF_IDENT_NONE)
 		return ENOTSUP;
-	}
 
 	KASSERT(intel_nmi_handle == NULL);
 	intel_nmi_handle = nmi_establish(tprof_intel_nmi, sc);

Reply via email to