> Anyway. As nobody seems to be objecting, it sounds like we are going for > combination of both alternatives? I.e. those who specify specific -march > lower than armv7 would be excused from capability detection and run-time > switch, and those who additionally specify "better" -Wa and > corresponding -D, would be able to build universal binaries of their > liking. I'll give it some extra time for others to ponder and make a > suggestion. It would be easier to discuss details then.
Attached is suggestion on how to implement this. [I thought it would take more tweaking]. I scrapped -Wa, as -D__ARM_MAX_ARCH__=N was sufficient. Most of what needs to be said is said in commentary in ./Configure (see beginning of patch). But this is "most". There are couple of controversial points that are likely to need clarification. The reason for why I didn't add '.arch armv7-a' in all #if __ARM_MAX_ARCH>=7 sections is because '.fpu neon' appears to be sufficient to compile the code, while *not* having '.arch armv7-a' (where possible) would allow to catch attempts to use new instructions in places where it's inappropriate when building universal. Second controversial point is that ARMv8 crypto is compiled even with __ARM_MAX_ARCH__>=7. This is done so to say to popularize ARMv8 crypto among those who won't read commentary section in Configure, as well as among Android [and in future iOS] people. [Well, -D__ARM_MAX_ARCH__=8 would work there too, but there are even more likely to miss the memo].
diff --git a/Configure b/Configure index 2eda5e6..a080616 100755 --- a/Configure +++ b/Configure @@ -352,8 +352,34 @@ my %table=( # throw in -D[BL]_ENDIAN, whichever appropriate... "linux-generic32","gcc:-DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "linux-ppc", "gcc:-DB_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL:${ppc32_asm}:linux32:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", -# It's believed that majority of ARM toolchains predefine appropriate -march. -# If you compiler does not, do complement config command line with one! + +####################################################################### +# Note that -march is not among compiler options in below linux-armv4 +# target line. Not specifying one is intentional to give you choice to: +# +# a) rely on your compiler default by not specifying one; +# b) specify your target platform explicitly for optimal performance, +# e.g. -march=armv6 or -march=armv7-a; +# c) build "universal" binary that targets *range* of platforms by +# specifying minimum and maximum supported architecture; +# +# As for c) option. It actually makes no sense to specify maximum to be +# less than ARMv7, because it's the least requirement for run-time +# switch between platform-specific code paths. And without run-time +# switch performance would be equivalent to one for minimum. Secondly, +# there are some natural limitations that you'd have to accept and +# respect. Most notably you can *not* build "universal" binary for +# big-endian platform. This is because ARMv7 processor always picks +# instructions in little-endian order. Another similar limitation is +# that -mthumb can't "cross" -march=armv6t2 boundary, because that's +# where it became Thumb-2. Well, this limitation is a bit artificial, +# becuase it's not really impossible, but it's deemed too tricky to +# support. And of course you have to be sure that your binutils are +# actually up to the task handling maximum target platform. With all +# this in mind here is an example of how to configure "universal" build: +# +# ./Configure linux-armv4 -march=armv6 -D__ARM_MAX_ARCH__=8 +# "linux-armv4", "gcc:-DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${armv4_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "linux-aarch64","gcc:-DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${aarch64_asm}:linux64:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", # Configure script adds minimally required -march for assembly support, diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl index 923c7f6..12ab307 100755 --- a/crypto/aes/asm/aesv8-armx.pl +++ b/crypto/aes/asm/aesv8-armx.pl @@ -35,7 +35,7 @@ $prefix="aes_v8"; $code=<<___; #include "arm_arch.h" -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .text ___ $code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/); diff --git a/crypto/aes/asm/bsaes-armv7.pl b/crypto/aes/asm/bsaes-armv7.pl index f3d96d9..6671e86 100644 --- a/crypto/aes/asm/bsaes-armv7.pl +++ b/crypto/aes/asm/bsaes-armv7.pl @@ -702,13 +702,14 @@ $code.=<<___; # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK # define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -717,6 +718,7 @@ $code.=<<___; .code 32 #endif +.arch armv7-a .fpu neon .type _bsaes_decrypt8,%function diff --git a/crypto/arm_arch.h b/crypto/arm_arch.h index 6fa8724..373b3d7 100644 --- a/crypto/arm_arch.h +++ b/crypto/arm_arch.h @@ -48,6 +48,18 @@ # endif #endif +#if !defined(__ARM_MAX_ARCH__) +# define __ARM_MAX_ARCH__ __ARM_ARCH__ +#endif + +#if __ARM_MAX_ARCH__<__ARM_ARCH__ +# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__" +#elif __ARM_MAX_ARCH__!=__ARM_ARCH__ +# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__) +# error "can't build universal big-endian binary" +# endif +#endif + #ifdef OPENSSL_FIPSCANISTER #include <openssl/fipssyms.h> #endif diff --git a/crypto/armcap.c b/crypto/armcap.c index 7e46d07..24f7a08 100644 --- a/crypto/armcap.c +++ b/crypto/armcap.c @@ -7,8 +7,12 @@ #include "arm_arch.h" -unsigned int OPENSSL_armcap_P; +unsigned int OPENSSL_armcap_P=0; +#if __ARM_MAX_ARCH__<7 +void OPENSSL_cpuid_setup(void) {} +unsigned long OPENSSL_rdtsc(void) { return 0; } +#else static sigset_t all_masked; static sigjmp_buf ill_jmp; @@ -155,3 +159,4 @@ void OPENSSL_cpuid_setup(void) sigaction (SIGILL,&ill_oact,NULL); sigprocmask(SIG_SETMASK,&oset,NULL); } +#endif diff --git a/crypto/armv4cpuid.S b/crypto/armv4cpuid.S index 0059311..7757441 100644 --- a/crypto/armv4cpuid.S +++ b/crypto/armv4cpuid.S @@ -3,68 +3,52 @@ .text .code 32 -@ Special note about using .byte directives to encode instructions. -@ Initial reason for hand-coding instructions was to allow module to -@ be compilable by legacy tool-chains. At later point it was pointed -@ out that since ARMv7, instructions are always encoded in little-endian -@ order, therefore one has to opt for endian-neutral presentation. -@ Contemporary tool-chains offer .inst directive for this purpose, -@ but not legacy ones. Therefore .byte. But there is an exception, -@ namely ARMv7-R profile still allows for big-endian encoding even for -@ instructions. This raises the question what if probe instructions -@ appear executable to such processor operating in big-endian order? -@ They have to be chosen in a way that avoids this problem. As failed -@ NEON probe disables a number of other probes we have to ensure that -@ only NEON probe instruction doesn't appear executable in big-endian -@ order, therefore 'vorr q8,q8,q8', and not some other register. The -@ only probe that is not bypassed on failed NEON probe is _armv7_tick, -@ where you'll spot 'mov r0,r6' that serves this purpose. Basic idea is -@ that if fetched in alternative byte oder instruction should crash to -@ denote lack of probed capability... +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon .align 5 .global _armv7_neon_probe .type _armv7_neon_probe,%function _armv7_neon_probe: - .byte 0xf0,0x01,0x60,0xf2 @ vorr q8,q8,q8 - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr + vorr q0,q0,q0 + bx lr .size _armv7_neon_probe,.-_armv7_neon_probe .global _armv7_tick .type _armv7_tick,%function _armv7_tick: - .byte 0x06,0x00,0xa0,0xe1 @ mov r0,r6 - .byte 0x1e,0x0f,0x51,0xec @ mrrc p15,1,r0,r1,c14 @ CNTVCT - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr - nop + mrrc p15,1,r0,r1,c14 @ CNTVCT + bx lr .size _armv7_tick,.-_armv7_tick .global _armv8_aes_probe .type _armv8_aes_probe,%function _armv8_aes_probe: .byte 0x00,0x03,0xb0,0xf3 @ aese.8 q0,q0 - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr + bx lr .size _armv8_aes_probe,.-_armv8_aes_probe .global _armv8_sha1_probe .type _armv8_sha1_probe,%function _armv8_sha1_probe: .byte 0x40,0x0c,0x00,0xf2 @ sha1c.32 q0,q0,q0 - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr + bx lr .size _armv8_sha1_probe,.-_armv8_sha1_probe .global _armv8_sha256_probe .type _armv8_sha256_probe,%function _armv8_sha256_probe: .byte 0x40,0x0c,0x00,0xf3 @ sha256h.32 q0,q0,q0 - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr + bx lr .size _armv8_sha256_probe,.-_armv8_sha256_probe .global _armv8_pmull_probe .type _armv8_pmull_probe,%function _armv8_pmull_probe: .byte 0x00,0x0e,0xa0,0xf2 @ vmull.p64 q0,d0,d0 - .byte 0x1e,0xff,0x2f,0xe1 @ bx lr + bx lr .size _armv8_pmull_probe,.-_armv8_pmull_probe +#endif .align 5 .global OPENSSL_atomic_add @@ -142,27 +126,31 @@ OPENSSL_cleanse: .global OPENSSL_wipe_cpu .type OPENSSL_wipe_cpu,%function OPENSSL_wipe_cpu: +#if __ARM_MAX_ARCH__>=7 ldr r0,.LOPENSSL_armcap adr r1,.LOPENSSL_armcap ldr r0,[r1,r0] +#endif eor r2,r2,r2 eor r3,r3,r3 eor ip,ip,ip +#if __ARM_MAX_ARCH__>=7 tst r0,#1 beq .Lwipe_done - .byte 0x50,0x01,0x00,0xf3 @ veor q0, q0, q0 - .byte 0x52,0x21,0x02,0xf3 @ veor q1, q1, q1 - .byte 0x54,0x41,0x04,0xf3 @ veor q2, q2, q2 - .byte 0x56,0x61,0x06,0xf3 @ veor q3, q3, q3 - .byte 0xf0,0x01,0x40,0xf3 @ veor q8, q8, q8 - .byte 0xf2,0x21,0x42,0xf3 @ veor q9, q9, q9 - .byte 0xf4,0x41,0x44,0xf3 @ veor q10, q10, q10 - .byte 0xf6,0x61,0x46,0xf3 @ veor q11, q11, q11 - .byte 0xf8,0x81,0x48,0xf3 @ veor q12, q12, q12 - .byte 0xfa,0xa1,0x4a,0xf3 @ veor q13, q13, q13 - .byte 0xfc,0xc1,0x4c,0xf3 @ veor q14, q14, q14 - .byte 0xfe,0xe1,0x4e,0xf3 @ veor q14, q14, q14 + veor q0, q0, q0 + veor q1, q1, q1 + veor q2, q2, q2 + veor q3, q3, q3 + veor q8, q8, q8 + veor q9, q9, q9 + veor q10, q10, q10 + veor q11, q11, q11 + veor q12, q12, q12 + veor q13, q13, q13 + veor q14, q14, q14 + veor q15, q15, q15 .Lwipe_done: +#endif mov r0,sp #if __ARM_ARCH__>=5 bx lr diff --git a/crypto/bn/asm/armv4-gf2m.pl b/crypto/bn/asm/armv4-gf2m.pl index b781afb..dc3aa57 100644 --- a/crypto/bn/asm/armv4-gf2m.pl +++ b/crypto/bn/asm/armv4-gf2m.pl @@ -41,7 +41,7 @@ $code=<<___; .text .code 32 -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .fpu neon #endif ___ @@ -150,7 +150,7 @@ $code.=<<___; .type bn_GF2m_mul_2x2,%function .align 5 bn_GF2m_mul_2x2: -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 ldr r12,.LOPENSSL_armcap .Lpic: ldr r12,[pc,r12] tst r12,#1 diff --git a/crypto/bn/asm/armv4-mont.pl b/crypto/bn/asm/armv4-mont.pl index 72bad8e..64cea07 100644 --- a/crypto/bn/asm/armv4-mont.pl +++ b/crypto/bn/asm/armv4-mont.pl @@ -72,7 +72,7 @@ $code=<<___; .text .code 32 -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .align 5 .LOPENSSL_armcap: .word OPENSSL_armcap_P-bn_mul_mont @@ -85,7 +85,7 @@ $code=<<___; bn_mul_mont: ldr ip,[sp,#4] @ load num stmdb sp!,{r0,r2} @ sp points at argument block -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 tst ip,#7 bne .Lialu adr r0,bn_mul_mont @@ -256,7 +256,7 @@ my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5)); my ($tinptr,$toutptr,$inner,$outer)=map("r$_",(6..9)); $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .fpu neon .type bn_mul8x_mont_neon,%function @@ -663,7 +663,7 @@ ___ $code.=<<___; .asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" .align 2 -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .comm OPENSSL_armcap_P,4,4 #endif ___ diff --git a/crypto/evp/e_aes.c b/crypto/evp/e_aes.c index 7762623..c9f7a39 100644 --- a/crypto/evp/e_aes.c +++ b/crypto/evp/e_aes.c @@ -911,7 +911,7 @@ const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \ #if defined(OPENSSL_CPUID_OBJ) && (defined(__arm__) || defined(__arm) || defined(__aarch64__)) #include "arm_arch.h" -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 # if defined(BSAES_ASM) # define BSAES_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON) # endif diff --git a/crypto/modes/asm/ghash-armv4.pl b/crypto/modes/asm/ghash-armv4.pl index 0023bf9..8e3a7dd 100644 --- a/crypto/modes/asm/ghash-armv4.pl +++ b/crypto/modes/asm/ghash-armv4.pl @@ -365,7 +365,7 @@ ___ } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .fpu neon .global gcm_init_neon diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c index 484142a..f54a5af 100644 --- a/crypto/modes/gcm128.c +++ b/crypto/modes/gcm128.c @@ -678,7 +678,7 @@ void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len # endif # elif defined(__arm__) || defined(__arm) || defined(__aarch64__) # include "arm_arch.h" -# if __ARM_ARCH__>=7 +# if __ARM_MAX_ARCH__>=7 # define GHASH_ASM_ARM # define GCM_FUNCREF_4BIT # define PMULL_CAPABLE (OPENSSL_armcap_P & ARMV8_PMULL) diff --git a/crypto/sha/asm/sha1-armv4-large.pl b/crypto/sha/asm/sha1-armv4-large.pl index 50bd07b..192340a 100644 --- a/crypto/sha/asm/sha1-armv4-large.pl +++ b/crypto/sha/asm/sha1-armv4-large.pl @@ -174,7 +174,7 @@ $code=<<___; .align 5 sha1_block_data_order: -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 sub r3,pc,#8 @ sha1_block_data_order ldr r12,.LOPENSSL_armcap ldr r12,[r3,r12] @ OPENSSL_armcap_P @@ -476,7 +476,7 @@ sub Xloop() } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .fpu neon .type sha1_block_data_order_neon,%function @@ -563,7 +563,7 @@ my @Kxx=map("q$_",(8..11)); my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14)); $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .type sha1_block_data_order_armv8,%function .align 5 sha1_block_data_order_armv8: diff --git a/crypto/sha/asm/sha256-armv4.pl b/crypto/sha/asm/sha256-armv4.pl index 505ca8f..2a9b818 100644 --- a/crypto/sha/asm/sha256-armv4.pl +++ b/crypto/sha/asm/sha256-armv4.pl @@ -186,7 +186,7 @@ K256: sha256_block_data_order: sub r3,pc,#8 @ sha256_block_data_order add $len,$inp,$len,lsl#6 @ len to point at the end of inp -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 ldr r12,.LOPENSSL_armcap ldr r12,[r3,r12] @ OPENSSL_armcap_P tst r12,#ARMV8_SHA256 @@ -423,7 +423,7 @@ sub body_00_15 () { } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .fpu neon .type sha256_block_data_order_neon,%function @@ -545,7 +545,7 @@ my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15)); my $Ktbl="r3"; $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 .type sha256_block_data_order_armv8,%function .align 5 sha256_block_data_order_armv8: diff --git a/crypto/sha/asm/sha512-armv4.pl b/crypto/sha/asm/sha512-armv4.pl index 1d5275b..25fe88b 100644 --- a/crypto/sha/asm/sha512-armv4.pl +++ b/crypto/sha/asm/sha512-armv4.pl @@ -246,7 +246,7 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) sha512_block_data_order: sub r3,pc,#8 @ sha512_block_data_order add $len,$inp,$len,lsl#7 @ len to point at the end of inp -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 ldr r12,.LOPENSSL_armcap ldr r12,[r3,r12] @ OPENSSL_armcap_P tst r12,#1 @@ -551,7 +551,8 @@ ___ } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a .fpu neon .align 4