Module Name: src Committed By: matt Date: Fri Jun 26 14:27:35 UTC 2015
Modified Files: src/sys/arch/riscv/include: lock.h Added Files: src/sys/sys: common_lock.h Log Message: Move the riscv lock.h which uses only compiler builtin atomic primitives to a common location which can be used by others and make riscv's lock.h use it. To generate a diff of this commit: cvs rdiff -u -r1.3 -r1.4 src/sys/arch/riscv/include/lock.h cvs rdiff -u -r0 -r1.1 src/sys/sys/common_lock.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/riscv/include/lock.h diff -u src/sys/arch/riscv/include/lock.h:1.3 src/sys/arch/riscv/include/lock.h:1.4 --- src/sys/arch/riscv/include/lock.h:1.3 Fri Jun 26 14:20:11 2015 +++ src/sys/arch/riscv/include/lock.h Fri Jun 26 14:27:35 2015 @@ -1,100 +1,3 @@ -/* $NetBSD: lock.h,v 1.3 2015/06/26 14:20:11 matt Exp $ */ +/* $NetBSD: lock.h,v 1.4 2015/06/26 14:27:35 matt Exp $ */ -/*- - * Copyright (c) 2014 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Matt Thomas of 3am Software Foundry. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/* - * Machine-dependent spin lock operations. - */ - -#ifndef _RISCV_LOCK_H_ -#define _RISCV_LOCK_H_ - -static __inline int -__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) -{ - return *__ptr != __SIMPLELOCK_UNLOCKED; -} - -static __inline int -__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) -{ - return *__ptr == __SIMPLELOCK_UNLOCKED; -} - -static __inline void -__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) -{ -#if 1 - *__ptr = __SIMPLELOCK_UNLOCKED; -#else - __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELAXED); -#endif -} - -static __inline void -__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) -{ -#if 1 - *__ptr = __SIMPLELOCK_LOCKED; -#else - __atomic_store_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_RELAXED); -#endif -} - -static __inline void __unused -__cpu_simple_lock_init(__cpu_simple_lock_t *__ptr) -{ -#if 1 - *__ptr = __SIMPLELOCK_UNLOCKED; -#else - __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELAXED); -#endif -} - -static __inline void __unused -__cpu_simple_lock(__cpu_simple_lock_t *__ptr) -{ - while (__atomic_exchange_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_ACQUIRE) == __SIMPLELOCK_LOCKED) { - /* do nothing */ - } -} - -static __inline int __unused -__cpu_simple_lock_try(__cpu_simple_lock_t *__ptr) -{ - return __atomic_exchange_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_ACQUIRE) == __SIMPLELOCK_UNLOCKED; -} - -static __inline void __unused -__cpu_simple_unlock(__cpu_simple_lock_t *__ptr) -{ - __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELEASE); -} - -#endif /* _RISCV_LOCK_H_ */ +#include <sys/common_lock.h> Added files: Index: src/sys/sys/common_lock.h diff -u /dev/null src/sys/sys/common_lock.h:1.1 --- /dev/null Fri Jun 26 14:27:35 2015 +++ src/sys/sys/common_lock.h Fri Jun 26 14:27:35 2015 @@ -0,0 +1,102 @@ +/* $NetBSD: common_lock.h,v 1.1 2015/06/26 14:27:35 matt Exp $ */ + +/*- + * Copyright (c) 2014 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Matt Thomas of 3am Software Foundry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Machine-dependent spin lock operations using the builtin compiler atomic + * primitives. + */ + +#ifndef _SYS_COMMON_LOCK_H_ +#define _SYS_COMMON_LOCK_H_ + +static __inline int +__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr) +{ + return *__ptr != __SIMPLELOCK_UNLOCKED; +} + +static __inline int +__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr) +{ + return *__ptr == __SIMPLELOCK_UNLOCKED; +} + +static __inline void +__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) +{ +#if 1 + *__ptr = __SIMPLELOCK_UNLOCKED; +#else + __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELAXED); +#endif +} + +static __inline void +__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) +{ +#if 1 + *__ptr = __SIMPLELOCK_LOCKED; +#else + __atomic_store_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_RELAXED); +#endif +} + +static __inline void __unused +__cpu_simple_lock_init(__cpu_simple_lock_t *__ptr) +{ +#if 1 + *__ptr = __SIMPLELOCK_UNLOCKED; +#else + __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELAXED); +#endif +} + +static __inline void __unused +__cpu_simple_lock(__cpu_simple_lock_t *__ptr) +{ + while (__atomic_exchange_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_ACQUIRE) == __SIMPLELOCK_LOCKED) { + /* do nothing */ + } +} + +static __inline int __unused +__cpu_simple_lock_try(__cpu_simple_lock_t *__ptr) +{ + return __atomic_exchange_n(__ptr, __SIMPLELOCK_LOCKED, __ATOMIC_ACQUIRE) == __SIMPLELOCK_UNLOCKED; +} + +static __inline void __unused +__cpu_simple_unlock(__cpu_simple_lock_t *__ptr) +{ + __atomic_store_n(__ptr, __SIMPLELOCK_UNLOCKED, __ATOMIC_RELEASE); +} + +#endif /* _SYS_COMMON_LOCK_H_ */