CVS commit: src/lib/libc_vfp
Module Name:src Committed By: joerg Date: Thu Jul 19 19:35:02 UTC 2018 Modified Files: src/lib/libc_vfp: vfpdf.S vfpsf.S Log Message: The fpu mode for VFP2 instructions is still just vfp. To generate a diff of this commit: cvs rdiff -u -r1.3 -r1.4 src/lib/libc_vfp/vfpdf.S src/lib/libc_vfp/vfpsf.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. Modified files: Index: src/lib/libc_vfp/vfpdf.S diff -u src/lib/libc_vfp/vfpdf.S:1.3 src/lib/libc_vfp/vfpdf.S:1.4 --- src/lib/libc_vfp/vfpdf.S:1.3 Tue Jul 17 15:03:48 2018 +++ src/lib/libc_vfp/vfpdf.S Thu Jul 19 19:35:02 2018 @@ -29,7 +29,7 @@ #include -RCSID("$NetBSD: vfpdf.S,v 1.3 2018/07/17 15:03:48 joerg Exp $") +RCSID("$NetBSD: vfpdf.S,v 1.4 2018/07/19 19:35:02 joerg Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -39,7 +39,7 @@ RCSID("$NetBSD: vfpdf.S,v 1.3 2018/07/17 * This file implements the double precision floating point routines. */ -.fpu vfp2 +.fpu vfp #ifdef __ARMEL__ #define vmov_arg0 vmov d0, r0, r1 Index: src/lib/libc_vfp/vfpsf.S diff -u src/lib/libc_vfp/vfpsf.S:1.3 src/lib/libc_vfp/vfpsf.S:1.4 --- src/lib/libc_vfp/vfpsf.S:1.3 Tue Jul 17 15:03:48 2018 +++ src/lib/libc_vfp/vfpsf.S Thu Jul 19 19:35:02 2018 @@ -30,7 +30,7 @@ #include #include -RCSID("$NetBSD: vfpsf.S,v 1.3 2018/07/17 15:03:48 joerg Exp $") +RCSID("$NetBSD: vfpsf.S,v 1.4 2018/07/19 19:35:02 joerg Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -40,7 +40,7 @@ RCSID("$NetBSD: vfpsf.S,v 1.3 2018/07/17 * This file implements the single precision floating point routines. */ -.fpu vfp2 +.fpu vfp #ifdef __ARM_EABI__ #define __addsf3 __aeabi_fadd
CVS commit: src/lib/libc_vfp
Module Name:src Committed By: joerg Date: Tue Jul 17 15:03:48 UTC 2018 Modified Files: src/lib/libc_vfp: Makefile vfpdf.S vfpsf.S Log Message: Push FPU choice into the assembler sources themselve. To generate a diff of this commit: cvs rdiff -u -r1.4 -r1.5 src/lib/libc_vfp/Makefile cvs rdiff -u -r1.2 -r1.3 src/lib/libc_vfp/vfpdf.S src/lib/libc_vfp/vfpsf.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. Modified files: Index: src/lib/libc_vfp/Makefile diff -u src/lib/libc_vfp/Makefile:1.4 src/lib/libc_vfp/Makefile:1.5 --- src/lib/libc_vfp/Makefile:1.4 Wed Jul 8 01:08:24 2015 +++ src/lib/libc_vfp/Makefile Tue Jul 17 15:03:48 2018 @@ -1,16 +1,10 @@ -# $NetBSD: Makefile,v 1.4 2015/07/08 01:08:24 matt Exp $ +# $NetBSD: Makefile,v 1.5 2018/07/17 15:03:48 joerg Exp $ # LIB= c_vfp USE_SHLIBDIR= yes -.include - -CPUFLAGS+= -mfpu=vfp -marm - SRCS= vfpsf.S vfpdf.S -AFLAGS.vfpsf.S+= -mfpu=vfp -AFLAGS.vfpdf.S+= -mfpu=vfp .include Index: src/lib/libc_vfp/vfpdf.S diff -u src/lib/libc_vfp/vfpdf.S:1.2 src/lib/libc_vfp/vfpdf.S:1.3 --- src/lib/libc_vfp/vfpdf.S:1.2 Sun Jun 23 06:19:55 2013 +++ src/lib/libc_vfp/vfpdf.S Tue Jul 17 15:03:48 2018 @@ -29,7 +29,7 @@ #include -RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $") +RCSID("$NetBSD: vfpdf.S,v 1.3 2018/07/17 15:03:48 joerg Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -39,6 +39,8 @@ RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 * This file implements the double precision floating point routines. */ +.fpu vfp2 + #ifdef __ARMEL__ #define vmov_arg0 vmov d0, r0, r1 #define vmov_arg1 vmov d1, r2, r3 Index: src/lib/libc_vfp/vfpsf.S diff -u src/lib/libc_vfp/vfpsf.S:1.2 src/lib/libc_vfp/vfpsf.S:1.3 --- src/lib/libc_vfp/vfpsf.S:1.2 Sun Jun 23 06:19:55 2013 +++ src/lib/libc_vfp/vfpsf.S Tue Jul 17 15:03:48 2018 @@ -30,7 +30,7 @@ #include #include -RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 06:19:55 matt Exp $") +RCSID("$NetBSD: vfpsf.S,v 1.3 2018/07/17 15:03:48 joerg Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -40,6 +40,8 @@ RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 * This file implements the single precision floating point routines. */ +.fpu vfp2 + #ifdef __ARM_EABI__ #define __addsf3 __aeabi_fadd #define __divsf3 __aeabi_fdiv
CVS commit: src/lib/libc_vfp
Module Name:src Committed By: matt Date: Mon Aug 19 22:22:23 UTC 2013 Modified Files: src/lib/libc_vfp: Makefile Log Message: Always compile as arm code. To generate a diff of this commit: cvs rdiff -u -r1.2 -r1.3 src/lib/libc_vfp/Makefile Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. Modified files: Index: src/lib/libc_vfp/Makefile diff -u src/lib/libc_vfp/Makefile:1.2 src/lib/libc_vfp/Makefile:1.3 --- src/lib/libc_vfp/Makefile:1.2 Mon Jan 28 18:57:10 2013 +++ src/lib/libc_vfp/Makefile Mon Aug 19 22:22:23 2013 @@ -1,4 +1,4 @@ -# $NetBSD: Makefile,v 1.2 2013/01/28 18:57:10 matt Exp $ +# $NetBSD: Makefile,v 1.3 2013/08/19 22:22:23 matt Exp $ # LIB= c_vfp @@ -7,7 +7,7 @@ USE_SHLIBDIR= yes .include -CPUFLAGS+= -mfpu=vfp +CPUFLAGS+= -mfpu=vfp -marm SRCS= vfpsf.S vfpdf.S
CVS commit: src/lib/libc_vfp
Module Name:src Committed By: matt Date: Sun Jun 23 06:19:55 UTC 2013 Modified Files: src/lib/libc_vfp: vfpdf.S vfpsf.S Log Message: Add EABI (aeabi) support To generate a diff of this commit: cvs rdiff -u -r1.1 -r1.2 src/lib/libc_vfp/vfpdf.S src/lib/libc_vfp/vfpsf.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. Modified files: Index: src/lib/libc_vfp/vfpdf.S diff -u src/lib/libc_vfp/vfpdf.S:1.1 src/lib/libc_vfp/vfpdf.S:1.2 --- src/lib/libc_vfp/vfpdf.S:1.1 Mon Jan 28 17:04:40 2013 +++ src/lib/libc_vfp/vfpdf.S Sun Jun 23 06:19:55 2013 @@ -29,7 +29,7 @@ #include -RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $") +RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -50,6 +50,19 @@ RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 #endif #define vmov_args vmov_arg0; vmov_arg1 +#ifdef __ARM_EABI__ +#define __adddf3 __aeabi_dadd +#define __divdf3 __aeabi_ddiv +#define __muldf3 __aeabi_dmul +#define __subdf3 __aeabi_dsub +#define __negdf2 __aeabi_dneg +#define __extendsfdf2 __aeabi_f2d +#define __fixdfsi __aeabi_d2iz +#define __fixunsdfsi __aeabi_d2uiz +#define __floatsidf __aeabi_i2d +#define __floatunsidf __aeabi_ui2d +#endif + ENTRY(__adddf3) vmov_args vadd.f64 d0, d0, d1 @@ -64,6 +77,15 @@ ENTRY(__subdf3) RET END(__subdf3) +#ifdef __ARM_EABI__ +ENTRY(__aeabi_drsub) + vmov_args + vsub.f64 d0, d1, d0 + vmov_ret + RET +END(__aeabi_drsub) +#endif + ENTRY(__muldf3) vmov_args vmul.f64 d0, d0, d1 @@ -120,6 +142,91 @@ ENTRY(__floatunsidf) RET END(__floatunsidf) +/* + * Effect of a floating point comparision on the condition flags. + * N Z C V + * EQ = 0 1 1 0 + * LT = 1 0 0 0 + * GT = 0 0 1 0 + * UN = 0 0 1 1 + */ +#ifdef __ARM_EABI__ +ENTRY(__aeabi_cdcmpeq) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + RET +END(__aeabi_cdcmpeq) + +ENTRY(__aeabi_cdcmple) + vmov_args + vcmpe.f64 d0, d1 + vmrs APSR_nzcv, fpscr + RET +END(__aeabi_cdcmple) + +ENTRY(__aeabi_cdrcmple) + vmov_args + vcmpe.f64 d1, d0 + vmrs APSR_nzcv, fpscr + RET +END(__aeabi_cdrcmple) + +ENTRY(__aeabi_dcmpeq) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + moveq r0, #1 /* (a == b) */ + movne r0, #0 /* (a != b) or unordered */ + RET +END(__aeabi_dcmpeq) + +ENTRY(__aeabi_dcmplt) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movlt r0, #1 /* (a < b) */ + movcs r0, #0 /* (a >= b) or unordered */ + RET +END(__aeabi_dcmplt) + +ENTRY(__aeabi_dcmple) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movls r0, #1 /* (a <= b) */ + movhi r0, #0 /* (a > b) or unordered */ + RET +END(__aeabi_dcmple) + +ENTRY(__aeabi_dcmpge) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movge r0, #1 /* (a >= b) */ + movlt r0, #0 /* (a < b) or unordered */ + RET +END(__aeabi_dcmpge) + +ENTRY(__aeabi_dcmpgt) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movgt r0, #1 /* (a > b) */ + movle r0, #0 /* (a <= b) or unordered */ + RET +END(__aeabi_dcmpgt) + +ENTRY(__aeabi_dcmpun) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movvs r0, #1 /* (isnan(a) || isnan(b)) */ + movvc r0, #0 /* !isnan(a) && !isnan(b) */ + RET +END(__aeabi_dcmpun) + +#else /* N set if compare <= result */ /* Z set if compare = result */ /* C set if compare (=,>=,UNORD) result */ @@ -163,3 +270,4 @@ ENTRY(__unorddf2) movvc r0, #0 /* isnan(a) || isnan(b) */ RET END(__unorddf2) +#endif /* !__ARM_EABI__ */ Index: src/lib/libc_vfp/vfpsf.S diff -u src/lib/libc_vfp/vfpsf.S:1.1 src/lib/libc_vfp/vfpsf.S:1.2 --- src/lib/libc_vfp/vfpsf.S:1.1 Mon Jan 28 17:04:40 2013 +++ src/lib/libc_vfp/vfpsf.S Sun Jun 23 06:19:55 2013 @@ -30,7 +30,7 @@ #include #include -RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $") +RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 06:19:55 matt Exp $") /* * This file provides softfloat compatible routines which use VFP instructions @@ -40,6 +40,19 @@ RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 * This file implements the single precision floating point routines. */ +#ifdef __ARM_EABI__ +#define __addsf3 __aeabi_fadd +#define __divsf3 __aeabi_fdiv +#define __mulsf3 __aeabi_fmul +#define __subsf3 __aeabi_fsub +#define __negsf2 __aeabi_fneg +#define __truncdfsf2 __aeabi_d2f +#define __fixsfsi __aeabi_f2iz +#define __fixunssfsi __aeabi_f2uiz +#define __floatsisf __aeabi_i2f +#define __floatunsisf __aeabi_ui2f +#endif + ENTRY(__addsf3) vmov s0, s1, r0, r1 vadd.f32 s0, s0, s1 @@ -54,6 +67,15 @@ ENTRY(__subsf3) RET END(__subsf3) +#ifdef __ARM_EABI__ +ENTRY(__aeabi_frsub) + vmov s0, s1, r0, r1 + vsub.f32 s0, s1, s0 + vmov r0, s0 + RET +END(__aeabi_frsub) +#endif + ENTRY(__mulsf3) vmov s0, s1, r0, r1 vmul.f32 s0, s0, s1 @@ -114,6 +136,91 @@ ENTRY(__floatunsisf) RET END(__floatunsisf) +/* + * Effect of a floating point comparision on the condition
CVS commit: src/lib/libc_vfp
Module Name:src Committed By: matt Date: Mon Jan 28 17:04:40 UTC 2013 Added Files: src/lib/libc_vfp: Makefile shlib_version vfpdf.S vfpsf.S Log Message: Add a library for ARM systems with VFP which implements the soft-float ABI but use VFP instructions to do the actual work. This should give near hard-float performance without requiring compiler changes. To generate a diff of this commit: cvs rdiff -u -r0 -r1.1 src/lib/libc_vfp/Makefile \ src/lib/libc_vfp/shlib_version src/lib/libc_vfp/vfpdf.S \ src/lib/libc_vfp/vfpsf.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. Added files: Index: src/lib/libc_vfp/Makefile diff -u /dev/null src/lib/libc_vfp/Makefile:1.1 --- /dev/null Mon Jan 28 17:04:40 2013 +++ src/lib/libc_vfp/Makefile Mon Jan 28 17:04:40 2013 @@ -0,0 +1,12 @@ +# $NetBSD: Makefile,v 1.1 2013/01/28 17:04:40 matt Exp $ +# + +LIB= c_vfp + +.include + +CPUFLAGS+= -mfpu=vfp + +SRCS= vfpsf.S vfpdf.S + +.include Index: src/lib/libc_vfp/shlib_version diff -u /dev/null src/lib/libc_vfp/shlib_version:1.1 --- /dev/null Mon Jan 28 17:04:40 2013 +++ src/lib/libc_vfp/shlib_version Mon Jan 28 17:04:40 2013 @@ -0,0 +1,5 @@ +# $NetBSD: shlib_version,v 1.1 2013/01/28 17:04:40 matt Exp $ +# Remember to update distrib/sets/lists/base/shl.* when changing +# +major=0 +minor=0 Index: src/lib/libc_vfp/vfpdf.S diff -u /dev/null src/lib/libc_vfp/vfpdf.S:1.1 --- /dev/null Mon Jan 28 17:04:40 2013 +++ src/lib/libc_vfp/vfpdf.S Mon Jan 28 17:04:40 2013 @@ -0,0 +1,165 @@ +/*- + * Copyright (c) 2013 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Matt Thomas of 3am Software Foundry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + *notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + *notice, this list of conditions and the following disclaimer in the + *documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $") + +/* + * This file provides softfloat compatible routines which use VFP instructions + * to do the actual work. This should give near hard-float performance while + * being compatible with soft-float code. + * + * This file implements the double precision floating point routines. + */ + +#ifdef __ARMEL__ +#define vmov_arg0 vmov d0, r0, r1 +#define vmov_arg1 vmov d1, r2, r3 +#define vmov_ret vmov r0, r1, d0 +#else +#define vmov_arg0 vmov d0, r1, r0 +#define vmov_arg1 vmov d1, r3, r2 +#define vmov_ret vmov r1, r0, d0 +#endif +#define vmov_args vmov_arg0; vmov_arg1 + +ENTRY(__adddf3) + vmov_args + vadd.f64 d0, d0, d1 + vmov_ret + RET +END(__adddf3) + +ENTRY(__subdf3) + vmov_args + vsub.f64 d0, d0, d1 + vmov_ret + RET +END(__subdf3) + +ENTRY(__muldf3) + vmov_args + vmul.f64 d0, d0, d1 + vmov_ret + RET +END(__muldf3) + +ENTRY(__divdf3) + vmov_args + vdiv.f64 d0, d0, d1 + vmov_ret + RET +END(__divdf3) + +ENTRY(__negdf2) + vmov_arg0 + vneg.f64 d0, d0 + vmov_ret + RET +END(__negdf2) + +ENTRY(__extendsfdf2) + vmov s0, r0 + vcvt.f64.f32 d0, s0 + vmov_ret + RET +END(__extendsfdf2) + +ENTRY(__fixdfsi) + vmov_arg0 + vcvt.s32.f64 s0, d0 + vmov r0, s0 + RET +END(__fixdfsi) + +ENTRY(__fixunsdfsi) + vmov_arg0 + vcvt.u32.f64 s0, d0 + vmov r0, s0 + RET +END(__fixunsdfsi) + +ENTRY(__floatsidf) + vmov s0, r0 + vcvt.f64.s32 d0, s0 + vmov_ret + RET +END(__floatsidf) + +ENTRY(__floatunsidf) + vmov s0, r0 + vcvt.f64.u32 d0, s0 + vmov_ret + RET +END(__floatunsidf) + +/* N set if compare <= result */ +/* Z set if compare = result */ +/* C set if compare (=,>=,UNORD) result */ +/* V set if compare UNORD result */ + +STRONG_ALIAS(__eqdf2, __nedf2) +ENTRY(__nedf2) + vmov_args + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + moveq r0, #0 /* !(a == b) */ + movne r0, #1 /* !(a == b) */ + RET +END(_