Module Name: src Committed By: skrll Date: Wed Aug 12 13:28:46 UTC 2020
Modified Files: src/sys/arch/aarch64/conf: files.aarch64 src/sys/arch/aarch64/include: mutex.h src/sys/arch/evbarm/include: mutex.h Added Files: src/sys/arch/aarch64/aarch64: lock_stubs.S Log Message: Part III of ad's performance improvements for aarch64 - Assembly language stubs for mutex_enter() and mutex_exit(). To generate a diff of this commit: cvs rdiff -u -r0 -r1.1 src/sys/arch/aarch64/aarch64/lock_stubs.S cvs rdiff -u -r1.26 -r1.27 src/sys/arch/aarch64/conf/files.aarch64 cvs rdiff -u -r1.1 -r1.2 src/sys/arch/aarch64/include/mutex.h cvs rdiff -u -r1.2 -r1.3 src/sys/arch/evbarm/include/mutex.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/aarch64/conf/files.aarch64 diff -u src/sys/arch/aarch64/conf/files.aarch64:1.26 src/sys/arch/aarch64/conf/files.aarch64:1.27 --- src/sys/arch/aarch64/conf/files.aarch64:1.26 Sat Jul 25 22:51:57 2020 +++ src/sys/arch/aarch64/conf/files.aarch64 Wed Aug 12 13:28:46 2020 @@ -1,4 +1,4 @@ -# $NetBSD: files.aarch64,v 1.26 2020/07/25 22:51:57 riastradh Exp $ +# $NetBSD: files.aarch64,v 1.27 2020/08/12 13:28:46 skrll Exp $ defflag opt_cpuoptions.h AARCH64_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_EL0_STACK_ALIGNMENT_CHECK @@ -100,6 +100,7 @@ file arch/aarch64/aarch64/exec_machdep.c file arch/aarch64/aarch64/fusu.S file arch/aarch64/aarch64/idle_machdep.S file arch/aarch64/aarch64/kobj_machdep.c modular +file arch/aarch64/aarch64/lock_stubs.S file arch/aarch64/aarch64/process_machdep.c file arch/aarch64/aarch64/procfs_machdep.c procfs file arch/aarch64/aarch64/sig_machdep.c Index: src/sys/arch/aarch64/include/mutex.h diff -u src/sys/arch/aarch64/include/mutex.h:1.1 src/sys/arch/aarch64/include/mutex.h:1.2 --- src/sys/arch/aarch64/include/mutex.h:1.1 Sun Aug 10 05:47:38 2014 +++ src/sys/arch/aarch64/include/mutex.h Wed Aug 12 13:28:46 2020 @@ -1,3 +1,5 @@ -/* $NetBSD: mutex.h,v 1.1 2014/08/10 05:47:38 matt Exp $ */ +/* $NetBSD: mutex.h,v 1.2 2020/08/12 13:28:46 skrll Exp $ */ #include <arm/mutex.h> + +#define __HAVE_MUTEX_STUBS 1 Index: src/sys/arch/evbarm/include/mutex.h diff -u src/sys/arch/evbarm/include/mutex.h:1.2 src/sys/arch/evbarm/include/mutex.h:1.3 --- src/sys/arch/evbarm/include/mutex.h:1.2 Fri Feb 9 21:55:03 2007 +++ src/sys/arch/evbarm/include/mutex.h Wed Aug 12 13:28:46 2020 @@ -1,3 +1,7 @@ -/* $NetBSD: mutex.h,v 1.2 2007/02/09 21:55:03 ad Exp $ */ +/* $NetBSD: mutex.h,v 1.3 2020/08/12 13:28:46 skrll Exp $ */ +#ifdef __aarch64__ +#include <aarch64/mutex.h> +#else #include <arm/mutex.h> +#endif Added files: Index: src/sys/arch/aarch64/aarch64/lock_stubs.S diff -u /dev/null src/sys/arch/aarch64/aarch64/lock_stubs.S:1.1 --- /dev/null Wed Aug 12 13:28:46 2020 +++ src/sys/arch/aarch64/aarch64/lock_stubs.S Wed Aug 12 13:28:46 2020 @@ -0,0 +1,81 @@ +/* $NetBSD: lock_stubs.S,v 1.1 2020/08/12 13:28:46 skrll Exp $ */ + +/*- + * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Matt Thomas of 3am Software Foundry, and by Andrew Doran. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_lockdebug.h" + +#include <aarch64/asm.h> + +#include "assym.h" + +RCSID("$NetBSD: lock_stubs.S,v 1.1 2020/08/12 13:28:46 skrll Exp $") + +#ifndef LOCKDEBUG +/* + * mutex_enter(): the compare-and-set must be atomic with respect to + * interrupts and with respect to other CPUs. + */ +ENTRY(mutex_enter) + mrs x1, tpidr_el1 /* x1 = curlwp */ +1: + ldxr x2, [x0] /* load old value */ + cbnz x2, 3f /* equals zero? */ + stxr w3, x1, [x0] /* store curlwp as new value */ + cbnz w3, 2f /* succeed? nope, try again. */ + dmb sy /* membar_enter() */ + ret +2: + b 1b +3: + b _C_LABEL(mutex_vector_enter) +END(mutex_enter) + +/* + * mutex_exit(): the compare-and-set need only be atomic with respect + * to interrupts. the cheapest way to achieve that may be to use a + * restartable sequence, but the code do that would be quite involved, + * so just use ldxr+stxr to achieve the same. + */ +ENTRY(mutex_exit) + dmb sy /* membar_exit() */ + mrs x1, tpidr_el1 /* x1 = curlwp */ +1: + ldxr x2, [x0] /* load old value */ + cmp x1, x2 /* equals curlwp? */ + b.ne 3f /* slow path if different */ + stxr w3, xzr, [x0] /* store zero as new value */ + cbnz w3, 2f /* succeed? nope, try again. */ + ret +2: + b 1b +3: + b _C_LABEL(mutex_vector_exit) +END(mutex_exit) +#endif /* !LOCKDEBUG */