Module Name:    src
Committed By:   martin
Date:           Tue Feb 20 12:48:46 UTC 2018

Modified Files:
        src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc: aes-sparcv9.S
            aest4-sparcv9.S bn-sparcv8.S cmllt4-sparcv9.S des_enc-sparc.S
            dest4-sparcv9.S ghash-sparcv9.S sparcv9-mont.S sparcv9a-mont.S
            vis3-mont.S
Removed Files:
        src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc:
            aesfx-sparcv9.S ec.inc ecp_nistz256-sparcv9.S poly1305-sparcv9.S
            poly1305.inc sparcv9_modes.S

Log Message:
Backout previous "regen" - the regen target in this directory is bogus,
there is no "sparcv9" nor any "vis" in the default CPU targets for
NetBSD/sparc.


To generate a diff of this commit:
cvs rdiff -u -r1.2 -r1.3 \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S
cvs rdiff -u -r1.1 -r0 \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aesfx-sparcv9.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ec.inc \
    
src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ecp_nistz256-sparcv9.S 
\
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/poly1305-sparcv9.S 
\
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/poly1305.inc \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9_modes.S
cvs rdiff -u -r1.3 -r1.4 \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S \
    src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aes-sparcv9.S	Tue Feb 20 12:48:46 2018
@@ -1,9 +1,3 @@
-#include "sparc_arch.h"
-
-#ifdef  __arch64__
-.register	%g2,#scratch
-.register	%g3,#scratch
-#endif
 .section	".text",#alloc,#execinstr
 
 .align	256
@@ -302,8 +296,8 @@ AES_Te:
 .align	64
 .skip	16
 _sparcv9_AES_encrypt:
-	save	%sp,-STACK_FRAME-16,%sp
-	stx	%i7,[%sp+STACK_BIAS+STACK_FRAME+0]	! off-load return address
+	save	%sp,-112-16,%sp
+	stx	%i7,[%sp+0+112+0]	! off-load return address
 	ld	[%i5+240],%i7
 	ld	[%i5+0],%l4
 	ld	[%i5+4],%l5			!
@@ -591,7 +585,7 @@ _sparcv9_AES_encrypt:
 	ldub	[%i7+%g5],%g5
 		sll	%o0,16,%o0
 		xor	%l0,%i0,%i0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%i7	! restore return address
+	ldx	[%sp+0+112+0],%i7	! restore return address
 	
 		sll	%o1,8,%o1		!
 		xor	%o0,%i0,%i0
@@ -629,7 +623,7 @@ AES_encrypt:
 	or	%o0,%o1,%g1
 	andcc	%g1,3,%g0
 	bnz,pn	%xcc,.Lunaligned_enc
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 
 	ld	[%i0+0],%o0
 	ld	[%i0+4],%o1
@@ -1034,8 +1028,8 @@ AES_Td:
 .align	64
 .skip	16
 _sparcv9_AES_decrypt:
-	save	%sp,-STACK_FRAME-16,%sp
-	stx	%i7,[%sp+STACK_BIAS+STACK_FRAME+0]	! off-load return address
+	save	%sp,-112-16,%sp
+	stx	%i7,[%sp+0+112+0]	! off-load return address
 	ld	[%i5+240],%i7
 	ld	[%i5+0],%l4
 	ld	[%i5+4],%l5			!
@@ -1323,7 +1317,7 @@ _sparcv9_AES_decrypt:
 	ldub	[%i7+%g5],%g5
 		sll	%o0,16,%o0
 		xor	%l0,%i0,%i0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%i7	! restore return address
+	ldx	[%sp+0+112+0],%i7	! restore return address
 	
 		sll	%o1,8,%o1		!
 		xor	%o0,%i0,%i0
@@ -1361,7 +1355,7 @@ AES_decrypt:
 	or	%o0,%o1,%g1
 	andcc	%g1,3,%g0
 	bnz,pn	%xcc,.Lunaligned_dec
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 
 	ld	[%i0+0],%o0
 	ld	[%i0+4],%o1
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/aest4-sparcv9.S	Tue Feb 20 12:48:46 2018
@@ -1,10 +1,3 @@
-#include "sparc_arch.h"
-
-#ifdef	__arch64__
-.register	%g2,#scratch
-.register	%g3,#scratch
-#endif
-
 .text
 
 .globl	aes_t4_encrypt
@@ -515,9 +508,9 @@ _aes128_load_deckey=_aes128_loadkey
 .globl	aes128_t4_cbc_encrypt
 .align	32
 aes128_t4_cbc_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L128_cbc_enc_abort
+	be,pn		%icc, .L128_cbc_enc_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f0
@@ -536,7 +529,7 @@ aes128_t4_cbc_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 127
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<128 ||
+	movleu		%icc, 0, %l5	!	%i2<128 ||
 	brnz,pn		%l5, .L128cbc_enc_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -662,7 +655,7 @@ aes128_t4_cbc_encrypt:
 .globl	aes128_t4_ctr32_encrypt
 .align	32
 aes128_t4_ctr32_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	prefetch	[%i0], 20
@@ -692,7 +685,7 @@ aes128_t4_ctr32_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128_ctr32_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -911,7 +904,7 @@ aes128_t4_ctr32_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_ctr32_blk_loop2x
+	bgu,pt		%icc, .L128_ctr32_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -929,17 +922,17 @@ aes128_t4_ctr32_encrypt:
 .globl	aes128_t4_xts_encrypt
 .align	32
 aes128_t4_xts_encrypt:
-	save		%sp, -STACK_FRAME-16, %sp
+	save		%sp, -112-16, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	mov		%i5, %o0
-	add		%fp, STACK_BIAS-16, %o1
+	add		%fp, 0-16, %o1
 	call		aes_t4_encrypt
 	mov		%i4, %o2
 
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	ldxa		[%l7]0x88, %g2
-	add		%fp, STACK_BIAS-8, %l7
+	add		%fp, 0-8, %l7
 	ldxa		[%l7]0x88, %g3		! %g3:%g2 is tweak
 
 	sethi		%hi(0x76543210), %l7
@@ -962,7 +955,7 @@ aes128_t4_xts_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128_xts_enblk !	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1260,7 +1253,7 @@ aes128_t4_xts_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_xts_enblk2x
+	bgu,pt		%icc, .L128_xts_enblk2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -1281,11 +1274,11 @@ aes128_t4_xts_encrypt:
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 .align	32
 .L128_xts_ensteal:
-	std		%f0, [%fp + STACK_BIAS-16]	! copy of output
-	std		%f2, [%fp + STACK_BIAS-8]
+	std		%f0, [%fp + 0-16]	! copy of output
+	std		%f2, [%fp + 0-8]
 
 	srl		%l0, 3, %l0
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	add		%i0, %l0, %i0	! original %i0+%i2&-15
 	add		%i1, %l2, %i1	! original %i1+%i2&-15
 	mov		0, %l0
@@ -1313,17 +1306,17 @@ aes128_t4_xts_encrypt:
 .globl	aes128_t4_xts_decrypt
 .align	32
 aes128_t4_xts_decrypt:
-	save		%sp, -STACK_FRAME-16, %sp
+	save		%sp, -112-16, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	mov		%i5, %o0
-	add		%fp, STACK_BIAS-16, %o1
+	add		%fp, 0-16, %o1
 	call		aes_t4_encrypt
 	mov		%i4, %o2
 
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	ldxa		[%l7]0x88, %g2
-	add		%fp, STACK_BIAS-8, %l7
+	add		%fp, 0-8, %l7
 	ldxa		[%l7]0x88, %g3		! %g3:%g2 is tweak
 
 	sethi		%hi(0x76543210), %l7
@@ -1349,7 +1342,7 @@ aes128_t4_xts_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128_xts_deblk !	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1648,7 +1641,7 @@ aes128_t4_xts_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_xts_deblk2x
+	bgu,pt		%icc, .L128_xts_deblk2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -1706,11 +1699,11 @@ aes128_t4_xts_decrypt:
 	.word	0x81b30d80 !fxor	%f12,%f0,%f0		! ^= tweak[0]
 	.word	0x85b38d82 !fxor	%f14,%f2,%f2
 
-	std		%f0, [%fp + STACK_BIAS-16]
-	std		%f2, [%fp + STACK_BIAS-8]
+	std		%f0, [%fp + 0-16]
+	std		%f2, [%fp + 0-8]
 
 	srl		%l0, 3, %l0
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	add		%i0, %l0, %i0	! original %i0+%i2&-15
 	add		%i1, %l2, %i1	! original %i1+%i2&-15
 	mov		0, %l0
@@ -1739,9 +1732,9 @@ aes128_t4_xts_decrypt:
 .globl	aes128_t4_cbc_decrypt
 .align	32
 aes128_t4_cbc_decrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L128_cbc_dec_abort
+	be,pn		%icc, .L128_cbc_dec_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f12	! load ivec
@@ -1760,7 +1753,7 @@ aes128_t4_cbc_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128cbc_dec_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1986,7 +1979,7 @@ aes128_t4_cbc_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_cbc_dec_blk_loop2x
+	bgu,pt		%icc, .L128_cbc_dec_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -2311,9 +2304,9 @@ _aes256_load_deckey=_aes192_loadkey
 .globl	aes256_t4_cbc_encrypt
 .align	32
 aes256_t4_cbc_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L256_cbc_enc_abort
+	be,pn		%icc, .L256_cbc_enc_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f0
@@ -2332,7 +2325,7 @@ aes256_t4_cbc_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 127
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<128 ||
+	movleu		%icc, 0, %l5	!	%i2<128 ||
 	brnz,pn		%l5, .L256cbc_enc_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -2458,9 +2451,9 @@ aes256_t4_cbc_encrypt:
 .globl	aes192_t4_cbc_encrypt
 .align	32
 aes192_t4_cbc_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L192_cbc_enc_abort
+	be,pn		%icc, .L192_cbc_enc_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f0
@@ -2479,7 +2472,7 @@ aes192_t4_cbc_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 127
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<128 ||
+	movleu		%icc, 0, %l5	!	%i2<128 ||
 	brnz,pn		%l5, .L192cbc_enc_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -2605,7 +2598,7 @@ aes192_t4_cbc_encrypt:
 .globl	aes256_t4_ctr32_encrypt
 .align	32
 aes256_t4_ctr32_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	prefetch	[%i0], 20
@@ -2635,7 +2628,7 @@ aes256_t4_ctr32_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256_ctr32_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -2854,7 +2847,7 @@ aes256_t4_ctr32_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_ctr32_blk_loop2x
+	bgu,pt		%icc, .L256_ctr32_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -2872,17 +2865,17 @@ aes256_t4_ctr32_encrypt:
 .globl	aes256_t4_xts_encrypt
 .align	32
 aes256_t4_xts_encrypt:
-	save		%sp, -STACK_FRAME-16, %sp
+	save		%sp, -112-16, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	mov		%i5, %o0
-	add		%fp, STACK_BIAS-16, %o1
+	add		%fp, 0-16, %o1
 	call		aes_t4_encrypt
 	mov		%i4, %o2
 
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	ldxa		[%l7]0x88, %g2
-	add		%fp, STACK_BIAS-8, %l7
+	add		%fp, 0-8, %l7
 	ldxa		[%l7]0x88, %g3		! %g3:%g2 is tweak
 
 	sethi		%hi(0x76543210), %l7
@@ -2905,7 +2898,7 @@ aes256_t4_xts_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256_xts_enblk !	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -3203,7 +3196,7 @@ aes256_t4_xts_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_xts_enblk2x
+	bgu,pt		%icc, .L256_xts_enblk2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -3224,11 +3217,11 @@ aes256_t4_xts_encrypt:
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 .align	32
 .L256_xts_ensteal:
-	std		%f0, [%fp + STACK_BIAS-16]	! copy of output
-	std		%f2, [%fp + STACK_BIAS-8]
+	std		%f0, [%fp + 0-16]	! copy of output
+	std		%f2, [%fp + 0-8]
 
 	srl		%l0, 3, %l0
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	add		%i0, %l0, %i0	! original %i0+%i2&-15
 	add		%i1, %l2, %i1	! original %i1+%i2&-15
 	mov		0, %l0
@@ -3256,17 +3249,17 @@ aes256_t4_xts_encrypt:
 .globl	aes256_t4_xts_decrypt
 .align	32
 aes256_t4_xts_decrypt:
-	save		%sp, -STACK_FRAME-16, %sp
+	save		%sp, -112-16, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	mov		%i5, %o0
-	add		%fp, STACK_BIAS-16, %o1
+	add		%fp, 0-16, %o1
 	call		aes_t4_encrypt
 	mov		%i4, %o2
 
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	ldxa		[%l7]0x88, %g2
-	add		%fp, STACK_BIAS-8, %l7
+	add		%fp, 0-8, %l7
 	ldxa		[%l7]0x88, %g3		! %g3:%g2 is tweak
 
 	sethi		%hi(0x76543210), %l7
@@ -3292,7 +3285,7 @@ aes256_t4_xts_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256_xts_deblk !	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -3591,7 +3584,7 @@ aes256_t4_xts_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_xts_deblk2x
+	bgu,pt		%icc, .L256_xts_deblk2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -3649,11 +3642,11 @@ aes256_t4_xts_decrypt:
 	.word	0x81b30d80 !fxor	%f12,%f0,%f0		! ^= tweak[0]
 	.word	0x85b38d82 !fxor	%f14,%f2,%f2
 
-	std		%f0, [%fp + STACK_BIAS-16]
-	std		%f2, [%fp + STACK_BIAS-8]
+	std		%f0, [%fp + 0-16]
+	std		%f2, [%fp + 0-8]
 
 	srl		%l0, 3, %l0
-	add		%fp, STACK_BIAS-16, %l7
+	add		%fp, 0-16, %l7
 	add		%i0, %l0, %i0	! original %i0+%i2&-15
 	add		%i1, %l2, %i1	! original %i1+%i2&-15
 	mov		0, %l0
@@ -3682,7 +3675,7 @@ aes256_t4_xts_decrypt:
 .globl	aes192_t4_ctr32_encrypt
 .align	32
 aes192_t4_ctr32_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	prefetch	[%i0], 20
@@ -3712,7 +3705,7 @@ aes192_t4_ctr32_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L192_ctr32_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -3931,7 +3924,7 @@ aes192_t4_ctr32_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L192_ctr32_blk_loop2x
+	bgu,pt		%icc, .L192_ctr32_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -3949,9 +3942,9 @@ aes192_t4_ctr32_encrypt:
 .globl	aes192_t4_cbc_decrypt
 .align	32
 aes192_t4_cbc_decrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L192_cbc_dec_abort
+	be,pn		%icc, .L192_cbc_dec_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f12	! load ivec
@@ -3970,7 +3963,7 @@ aes192_t4_cbc_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L192cbc_dec_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -4196,7 +4189,7 @@ aes192_t4_cbc_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L192_cbc_dec_blk_loop2x
+	bgu,pt		%icc, .L192_cbc_dec_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -4217,9 +4210,9 @@ aes192_t4_cbc_decrypt:
 .globl	aes256_t4_cbc_decrypt
 .align	32
 aes256_t4_cbc_decrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L256_cbc_dec_abort
+	be,pn		%icc, .L256_cbc_dec_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f12	! load ivec
@@ -4238,7 +4231,7 @@ aes256_t4_cbc_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256cbc_dec_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -4464,7 +4457,7 @@ aes256_t4_cbc_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_cbc_dec_blk_loop2x
+	bgu,pt		%icc, .L256_cbc_dec_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/bn-sparcv8.S	Tue Feb 20 12:48:46 2018
@@ -3,12 +3,12 @@
 
 /*
  * ====================================================================
- * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved.
+ * Written by Andy Polyakov <ap...@fy.chalmers.se> for the OpenSSL
+ * project.
  *
- * Licensed under the OpenSSL license (the "License").  You may not use
- * this file except in compliance with the License.  You can obtain a copy
- * in the file LICENSE in the source distribution or at
- * https://www.openssl.org/source/license.html
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
  * ====================================================================
  */
 
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/cmllt4-sparcv9.S	Tue Feb 20 12:48:46 2018
@@ -1,5 +1,3 @@
-#include "sparc_arch.h"
-
 .text
 
 .globl	cmll_t4_encrypt
@@ -976,9 +974,9 @@ _cmll256_decrypt_2x:
 .globl	cmll128_t4_cbc_encrypt
 .align	32
 cmll128_t4_cbc_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L128_cbc_enc_abort
+	be,pn		%icc, .L128_cbc_enc_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f0
@@ -997,7 +995,7 @@ cmll128_t4_cbc_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 127
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<128 ||
+	movleu		%icc, 0, %l5	!	%i2<128 ||
 	brnz,pn		%l5, .L128cbc_enc_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1123,9 +1121,9 @@ cmll128_t4_cbc_encrypt:
 .globl	cmll256_t4_cbc_encrypt
 .align	32
 cmll256_t4_cbc_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L256_cbc_enc_abort
+	be,pn		%icc, .L256_cbc_enc_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f0
@@ -1144,7 +1142,7 @@ cmll256_t4_cbc_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 127
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<128 ||
+	movleu		%icc, 0, %l5	!	%i2<128 ||
 	brnz,pn		%l5, .L256cbc_enc_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1270,9 +1268,9 @@ cmll256_t4_cbc_encrypt:
 .globl	cmll128_t4_cbc_decrypt
 .align	32
 cmll128_t4_cbc_decrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L128_cbc_dec_abort
+	be,pn		%icc, .L128_cbc_dec_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f12	! load ivec
@@ -1291,7 +1289,7 @@ cmll128_t4_cbc_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128cbc_dec_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1517,7 +1515,7 @@ cmll128_t4_cbc_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_cbc_dec_blk_loop2x
+	bgu,pt		%icc, .L128_cbc_dec_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -1538,9 +1536,9 @@ cmll128_t4_cbc_decrypt:
 .globl	cmll256_t4_cbc_decrypt
 .align	32
 cmll256_t4_cbc_decrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	cmp		%i2, 0
-	be,pn		SIZE_T_CC, .L256_cbc_dec_abort
+	be,pn		%icc, .L256_cbc_dec_abort
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 	sub		%i0, %i1, %l5	! %i0!=%i1
 	ld		[%i4 + 0], %f12	! load ivec
@@ -1559,7 +1557,7 @@ cmll256_t4_cbc_decrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256cbc_dec_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -1785,7 +1783,7 @@ cmll256_t4_cbc_decrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_cbc_dec_blk_loop2x
+	bgu,pt		%icc, .L256_cbc_dec_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -1806,7 +1804,7 @@ cmll256_t4_cbc_decrypt:
 .globl	cmll128_t4_ctr32_encrypt
 .align	32
 cmll128_t4_ctr32_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	prefetch	[%i0], 20
@@ -1836,7 +1834,7 @@ cmll128_t4_ctr32_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L128_ctr32_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -2055,7 +2053,7 @@ cmll128_t4_ctr32_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L128_ctr32_blk_loop2x
+	bgu,pt		%icc, .L128_ctr32_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
@@ -2073,7 +2071,7 @@ cmll128_t4_ctr32_encrypt:
 .globl	cmll256_t4_ctr32_encrypt
 .align	32
 cmll256_t4_ctr32_encrypt:
-	save		%sp, -STACK_FRAME, %sp
+	save		%sp, -112, %sp
 	srln		%i2, 0, %i2		! needed on v8+, "nop" on v9
 
 	prefetch	[%i0], 20
@@ -2103,7 +2101,7 @@ cmll256_t4_ctr32_encrypt:
 	and		%i1, 7, %l2
 	cmp		%i2, 255
 	movrnz		%l2, 0, %l5		! if (	%i1&7 ||
-	movleu		SIZE_T_CC, 0, %l5	!	%i2<256 ||
+	movleu		%icc, 0, %l5	!	%i2<256 ||
 	brnz,pn		%l5, .L256_ctr32_blk	!	%i0==%i1)
 	srl		%l3, %l2, %l3
 
@@ -2322,7 +2320,7 @@ cmll256_t4_ctr32_encrypt:
 	stda		%f4, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
 	add		%i1, 8, %i1
 	stda		%f6, [%i1]0xe2		! ASI_BLK_INIT, T4-specific
-	bgu,pt		SIZE_T_CC, .L256_ctr32_blk_loop2x
+	bgu,pt		%icc, .L256_ctr32_blk_loop2x
 	add		%i1, 8, %i1
 
 	add		%l5, %i2, %i2
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/dest4-sparcv9.S	Tue Feb 20 12:48:46 2018
@@ -1,10 +1,3 @@
-#include "sparc_arch.h"
-
-#ifdef	__arch64__
-.register       %g2,#scratch
-.register       %g3,#scratch
-#endif
-
 .text
 .align	32
 .globl	des_t4_key_expand
@@ -54,7 +47,7 @@ des_t4_key_expand:
 .align	32
 des_t4_cbc_encrypt:
 	cmp		%o2, 0
-	be,pn		SIZE_T_CC, .Lcbc_abort
+	be,pn		%icc, .Lcbc_abort
 	srln		%o2, 0, %o2		! needed on v8+, "nop" on v9
 	ld		[%o4 + 0], %f0	! load ivec
 	ld		[%o4 + 4], %f1
@@ -155,7 +148,7 @@ des_t4_cbc_encrypt:
 .align	32
 des_t4_cbc_decrypt:
 	cmp		%o2, 0
-	be,pn		SIZE_T_CC, .Lcbc_abort
+	be,pn		%icc, .Lcbc_abort
 	srln		%o2, 0, %o2		! needed on v8+, "nop" on v9
 	ld		[%o4 + 0], %f2	! load ivec
 	ld		[%o4 + 4], %f3
@@ -254,7 +247,7 @@ des_t4_cbc_decrypt:
 .align	32
 des_t4_ede3_cbc_encrypt:
 	cmp		%o2, 0
-	be,pn		SIZE_T_CC, .Lcbc_abort
+	be,pn		%icc, .Lcbc_abort
 	srln		%o2, 0, %o2		! needed on v8+, "nop" on v9
 	ld		[%o4 + 0], %f0	! load ivec
 	ld		[%o4 + 4], %f1
@@ -406,7 +399,7 @@ des_t4_ede3_cbc_encrypt:
 .align	32
 des_t4_ede3_cbc_decrypt:
 	cmp		%o2, 0
-	be,pn		SIZE_T_CC, .Lcbc_abort
+	be,pn		%icc, .Lcbc_abort
 	srln		%o2, 0, %o2		! needed on v8+, "nop" on v9
 	ld		[%o4 + 0], %f2	! load ivec
 	ld		[%o4 + 4], %f3
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9-mont.S	Tue Feb 20 12:48:46 2018
@@ -1,5 +1,3 @@
-#include "sparc_arch.h"
-
 .section	".text",#alloc,#execinstr
 
 .global	bn_mul_mont_int
@@ -12,7 +10,7 @@ bn_mul_mont_int:
 	clr	%o0
 .align	32
 .Lenter:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-128,%sp
 	sll	%i5,2,%i5		! num*=4
 	or	%g1,%lo(0xffffffff),%g1
 	ld	[%i4],%i4
@@ -21,21 +19,21 @@ bn_mul_mont_int:
 	ld	[%i2],%l2		! bp[0]
 	nop
 
-	add	%sp,STACK_BIAS,%o7		! real top of stack
+	add	%sp,0,%o7		! real top of stack
 	ld	[%i1],%o0		! ap[0] ! redundant in squaring context
 	sub	%o7,%i5,%o7
 	ld	[%i1+4],%l5		! ap[1]
 	and	%o7,-1024,%o7
 	ld	[%i3],%o1		! np[0]
-	sub	%o7,STACK_BIAS,%sp		! alloca
+	sub	%o7,0,%sp		! alloca
 	ld	[%i3+4],%l6		! np[1]
-	be,pt	SIZE_T_CC,.Lbn_sqr_mont
+	be,pt	%icc,.Lbn_sqr_mont
 	mov	12,%l1
 
 	mulx	%o0,%l2,%o0	! ap[0]*bp[0]
 	mulx	%l5,%l2,%g4	!prologue! ap[1]*bp[0]
 	and	%o0,%g1,%o3
-	add	%sp,STACK_BIAS+STACK_FRAME,%l4
+	add	%sp,0+128,%l4
 	ld	[%i1+8],%l5		!prologue!
 
 	mulx	%i4,%o3,%l3		! "t[0]"*n0
@@ -94,7 +92,7 @@ bn_mul_mont_int:
 	mov	4,%l0			! i++
 	ld	[%i2+4],%l2		! bp[1]
 .Louter:
-	add	%sp,STACK_BIAS+STACK_FRAME,%l4
+	add	%sp,0+128,%l4
 	ld	[%i1],%o0		! ap[0]
 	ld	[%i1+4],%l5		! ap[1]
 	ld	[%i3],%o1		! np[0]
@@ -213,7 +211,7 @@ bn_mul_mont_int:
 	mulx	%l2,%l2,%o0		! ap[0]*ap[0]
 	mulx	%l5,%l2,%g4		!prologue!
 	and	%o0,%g1,%o3
-	add	%sp,STACK_BIAS+STACK_FRAME,%l4
+	add	%sp,0+128,%l4
 	ld	[%i1+8],%l5			!prologue!
 
 	mulx	%i4,%o3,%l3			! "t[0]"*n0
@@ -222,7 +220,7 @@ bn_mul_mont_int:
 
 	mulx	%o1,%l3,%o1		! np[0]*"t[0]"*n0
 	mulx	%l6,%l3,%o4		!prologue!
-	and	%o0,1,%o5
+	and	%o0,1,%i2
 	ld	[%i3+8],%l6			!prologue!
 	srlx	%o0,1,%o0
 	add	%o3,%o1,%o1
@@ -239,9 +237,9 @@ bn_mul_mont_int:
 	ld	[%i3+%l1],%l6			! np[j]
 	srlx	%o0,32,%o0
 	add	%o3,%o3,%o3
-	or	%o5,%o3,%o3
+	or	%i2,%o3,%o3
 	mov	%g5,%o4
-	srlx	%o3,32,%o5
+	srlx	%o3,32,%i2
 	add	%l1,4,%l1				! j++
 	and	%o3,%g1,%o3
 	cmp	%l1,%i5
@@ -260,8 +258,8 @@ bn_mul_mont_int:
 	and	%o0,%g1,%o3
 	srlx	%o0,32,%o0
 	add	%o3,%o3,%o3
-	or	%o5,%o3,%o3
-	srlx	%o3,32,%o5
+	or	%i2,%o3,%o3
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	add	%o3,%o1,%o1
 	st	%o1,[%l4]
@@ -272,22 +270,22 @@ bn_mul_mont_int:
 	and	%o0,%g1,%o3
 	srlx	%o0,32,%o0
 	add	%o3,%o3,%o3
-	or	%o5,%o3,%o3
-	srlx	%o3,32,%o5
+	or	%i2,%o3,%o3
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	add	%o3,%o1,%o1
 	st	%o1,[%l4+4]
 	srlx	%o1,32,%o1
 
 	add	%o0,%o0,%o0
-	or	%o5,%o0,%o0
+	or	%i2,%o0,%o0
 	add	%o0,%o1,%o1
 	st	%o1,[%l4+8]
 	srlx	%o1,32,%o2
 
-	ld	[%sp+STACK_BIAS+STACK_FRAME],%g4	! tp[0]
-	ld	[%sp+STACK_BIAS+STACK_FRAME+4],%g5	! tp[1]
-	ld	[%sp+STACK_BIAS+STACK_FRAME+8],%l7	! tp[2]
+	ld	[%sp+0+128],%g4	! tp[0]
+	ld	[%sp+0+128+4],%g5	! tp[1]
+	ld	[%sp+0+128+8],%l7	! tp[2]
 	ld	[%i1+4],%l2			! ap[1]
 	ld	[%i1+8],%l5			! ap[2]
 	ld	[%i3],%o1			! np[0]
@@ -306,19 +304,19 @@ bn_mul_mont_int:
 	add	%g5,%o1,%o1
 	srlx	%o0,32,%o0
 	add	%o3,%o1,%o1
-	and	%o0,1,%o5
+	and	%o0,1,%i2
 	add	%o4,%o1,%o1
 	srlx	%o0,1,%o0
 	mov	12,%l1
-	st	%o1,[%sp+STACK_BIAS+STACK_FRAME]	! tp[0]=
+	st	%o1,[%sp+0+128]	! tp[0]=
 	srlx	%o1,32,%o1
-	add	%sp,STACK_BIAS+STACK_FRAME+4,%l4
+	add	%sp,0+128+4,%l4
 
 .Lsqr_2nd:
 	mulx	%l5,%l2,%o3
 	mulx	%l6,%l3,%o4
 	add	%o3,%o0,%o0
-	add	%l7,%o5,%o5
+	add	%l7,%o1,%o1
 	ld	[%i1+%l1],%l5			! ap[j]
 	and	%o0,%g1,%o3
 	ld	[%i3+%l1],%l6			! np[j]
@@ -327,8 +325,8 @@ bn_mul_mont_int:
 	ld	[%l4+8],%l7			! tp[j]
 	add	%o3,%o3,%o3
 	add	%l1,4,%l1				! j++
-	add	%o5,%o3,%o3
-	srlx	%o3,32,%o5
+	or	%i2,%o3,%o3
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	cmp	%l1,%i5
 	add	%o3,%o1,%o1
@@ -341,27 +339,27 @@ bn_mul_mont_int:
 	mulx	%l5,%l2,%o3
 	mulx	%l6,%l3,%o4
 	add	%o3,%o0,%o0
-	add	%l7,%o5,%o5
+	add	%l7,%o1,%o1
 	and	%o0,%g1,%o3
 	srlx	%o0,32,%o0
 	add	%o4,%o1,%o1
 	add	%o3,%o3,%o3
-	add	%o5,%o3,%o3
-	srlx	%o3,32,%o5
+	or	%i2,%o3,%o3
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	add	%o3,%o1,%o1
 	st	%o1,[%l4]			! tp[j-1]
 	srlx	%o1,32,%o1
 
 	add	%o0,%o0,%o0
-	add	%o5,%o0,%o0
+	or	%i2,%o0,%o0
 	add	%o0,%o1,%o1
 	add	%o2,%o1,%o1
 	st	%o1,[%l4+4]
 	srlx	%o1,32,%o2
 
-	ld	[%sp+STACK_BIAS+STACK_FRAME],%g5	! tp[0]
-	ld	[%sp+STACK_BIAS+STACK_FRAME+4],%l7	! tp[1]
+	ld	[%sp+0+128],%g5	! tp[0]
+	ld	[%sp+0+128+4],%l7	! tp[1]
 	ld	[%i1+8],%l2			! ap[2]
 	ld	[%i3],%o1			! np[0]
 	ld	[%i3+4],%l6			! np[1]
@@ -374,9 +372,9 @@ bn_mul_mont_int:
 	and	%o0,%g1,%o3
 	add	%g5,%o1,%o1
 	srlx	%o0,32,%o0
-	add	%sp,STACK_BIAS+STACK_FRAME,%l4
+	add	%sp,0+128,%l4
 	srlx	%o1,32,%o1
-	and	%o0,1,%o5
+	and	%o0,1,%i2
 	srlx	%o0,1,%o0
 	mov	4,%l1
 
@@ -414,7 +412,7 @@ bn_mul_mont_int:
 .Lsqr_inner2:
 	mulx	%l5,%l2,%o3
 	mulx	%l6,%l3,%o4
-	add	%l7,%o5,%o5
+	add	%l7,%o1,%o1
 	add	%o3,%o0,%o0
 	ld	[%i1+%l1],%l5			! ap[j]
 	and	%o0,%g1,%o3
@@ -422,9 +420,9 @@ bn_mul_mont_int:
 	srlx	%o0,32,%o0
 	add	%o3,%o3,%o3
 	ld	[%l4+8],%l7			! tp[j]
-	add	%o5,%o3,%o3
+	or	%i2,%o3,%o3
 	add	%l1,4,%l1				! j++
-	srlx	%o3,32,%o5
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	cmp	%l1,%i5
 	add	%o3,%o1,%o1
@@ -437,13 +435,13 @@ bn_mul_mont_int:
 .Lsqr_no_inner2:
 	mulx	%l5,%l2,%o3
 	mulx	%l6,%l3,%o4
-	add	%l7,%o5,%o5
+	add	%l7,%o1,%o1
 	add	%o3,%o0,%o0
 	and	%o0,%g1,%o3
 	srlx	%o0,32,%o0
 	add	%o3,%o3,%o3
-	add	%o5,%o3,%o3
-	srlx	%o3,32,%o5
+	or	%i2,%o3,%o3
+	srlx	%o3,32,%i2
 	and	%o3,%g1,%o3
 	add	%o3,%o1,%o1
 	add	%o4,%o1,%o1
@@ -451,15 +449,15 @@ bn_mul_mont_int:
 	srlx	%o1,32,%o1
 
 	add	%o0,%o0,%o0
-	add	%o5,%o0,%o0
+	or	%i2,%o0,%o0
 	add	%o0,%o1,%o1
 	add	%o2,%o1,%o1
 	st	%o1,[%l4+4]
 	srlx	%o1,32,%o2
 
 	add	%l0,4,%l0				! i++
-	ld	[%sp+STACK_BIAS+STACK_FRAME],%g5	! tp[0]
-	ld	[%sp+STACK_BIAS+STACK_FRAME+4],%l7	! tp[1]
+	ld	[%sp+0+128],%g5	! tp[0]
+	ld	[%sp+0+128+4],%l7	! tp[1]
 	ld	[%i1+%l0],%l2			! ap[j]
 	ld	[%i3],%o1			! np[0]
 	ld	[%i3+4],%l6			! np[1]
@@ -472,9 +470,9 @@ bn_mul_mont_int:
 	and	%o0,%g1,%o3
 	add	%g5,%o1,%o1
 	srlx	%o0,32,%o0
-	add	%sp,STACK_BIAS+STACK_FRAME,%l4
+	add	%sp,0+128,%l4
 	srlx	%o1,32,%o1
-	and	%o0,1,%o5
+	and	%o0,1,%i2
 	srlx	%o0,1,%o0
 
 	cmp	%g4,%i5			! i<num-1
@@ -496,17 +494,14 @@ bn_mul_mont_int:
 !.Lsqr_last
 
 	mulx	%l6,%l3,%o4
-	add	%l7,%o3,%o3
-	srlx	%o3,32,%g4
-	and	%o3,%g1,%o3
-	add	%g4,%o5,%o5
+	add	%l7,%o1,%o1
 	add	%o3,%o1,%o1
 	add	%o4,%o1,%o1
 	st	%o1,[%l4]
 	srlx	%o1,32,%o1
 
 	add	%o0,%o0,%o0		! recover %o0
-	add	%o5,%o0,%o0
+	or	%i2,%o0,%o0
 	add	%o0,%o1,%o1
 	add	%o2,%o1,%o1
 	st	%o1,[%l4+4]
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/sparcv9a-mont.S	Tue Feb 20 12:48:46 2018
@@ -1,11 +1,9 @@
-#include "sparc_arch.h"
-
 .section	".text",#alloc,#execinstr
 
 .global bn_mul_mont_fpu
 .align  32
 bn_mul_mont_fpu:
-	save	%sp,-STACK_FRAME-64,%sp
+	save	%sp,-128-64,%sp
 
 	cmp	%i5,4
 	bl,a,pn %icc,.Lret
@@ -24,15 +22,15 @@ bn_mul_mont_fpu:
 
 	sll	%i5,3,%i5		! num*=8
 
-	add	%sp,STACK_BIAS,%o0		! real top of stack
+	add	%sp,0,%o0		! real top of stack
 	sll	%i5,2,%o1
 	add	%o1,%i5,%o1		! %o1=num*5
 	sub	%o0,%o1,%o0
 	and	%o0,-2048,%o0		! optimize TLB utilization
-	sub	%o0,STACK_BIAS,%sp		! alloca(5*num*8)
+	sub	%o0,0,%sp		! alloca(5*num*8)
 
 	rd	%asi,%o7		! save %asi
-	add	%sp,STACK_BIAS+STACK_FRAME+64,%l0
+	add	%sp,0+128+64,%l0
 	add	%l0,%i5,%l1
 	add	%l1,%i5,%l1	! [an]p_[lh] point at the vectors' ends !
 	add	%l1,%i5,%l2
@@ -46,7 +44,7 @@ bn_mul_mont_fpu:
 	add	%i2,%i5,%i2
 	add	%i3,%i5,%i3
 
-	stx	%o7,[%sp+STACK_BIAS+STACK_FRAME+48]	! save %asi
+	stx	%o7,[%sp+0+128+48]	! save %asi
 
 	sub	%g0,%i5,%l5		! i=-num
 	sub	%g0,%i5,%l6		! j=-num
@@ -67,7 +65,7 @@ bn_mul_mont_fpu:
 
 	mulx	%o1,%o0,%o0		! ap[0]*bp[0]
 	mulx	%g4,%o0,%o0		! ap[0]*bp[0]*n0
-	stx	%o0,[%sp+STACK_BIAS+STACK_FRAME+0]
+	stx	%o0,[%sp+0+128+0]
 
 	ld	[%o3+0],%f17	! load a[j] as pair of 32-bit words
 	.word	0xa1b00c20	! fzeros %f16
@@ -89,13 +87,13 @@ bn_mul_mont_fpu:
 	fxtod	%f22,%f22
 
 	! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+6]%asi,%f8
+	ldda	[%sp+0+128+6]%asi,%f8
 	fxtod	%f0,%f0
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+4]%asi,%f10
+	ldda	[%sp+0+128+4]%asi,%f10
 	fxtod	%f2,%f2
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+2]%asi,%f12
+	ldda	[%sp+0+128+2]%asi,%f12
 	fxtod	%f4,%f4
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+0]%asi,%f14
+	ldda	[%sp+0+128+0]%asi,%f14
 	fxtod	%f6,%f6
 
 	std	%f16,[%l1+%l6]		! save smashed ap[j] in double format
@@ -141,13 +139,13 @@ bn_mul_mont_fpu:
 	fdtox	%f52,%f52
 	fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
+	std	%f48,[%sp+0+128+0]
 	add	%l6,8,%l6
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
+	std	%f50,[%sp+0+128+8]
 	add	%i1,%l6,%o4
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
+	std	%f52,[%sp+0+128+16]
 	add	%i3,%l6,%o5
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f54,[%sp+0+128+24]
 
 	ld	[%o4+0],%f17	! load a[j] as pair of 32-bit words
 	.word	0xa1b00c20	! fzeros %f16
@@ -163,13 +161,13 @@ bn_mul_mont_fpu:
 	fxtod	%f20,%f20
 	fxtod	%f22,%f22
 
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
+	ldx	[%sp+0+128+0],%o0
 		fmuld	%f16,%f0,%f32
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
+	ldx	[%sp+0+128+8],%o1
 		fmuld	%f20,%f8,%f48
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
+	ldx	[%sp+0+128+16],%o2
 		fmuld	%f16,%f2,%f34
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+24],%o3
 		fmuld	%f20,%f10,%f50
 
 	srlx	%o0,16,%o7
@@ -225,12 +223,12 @@ bn_mul_mont_fpu:
 	fdtox	%f52,%f52
 	fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
+	std	%f48,[%sp+0+128+0]
+	std	%f50,[%sp+0+128+8]
 	addcc	%l6,8,%l6
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
+	std	%f52,[%sp+0+128+16]
 	bz,pn	%icc,.L1stskip
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f54,[%sp+0+128+24]
 
 .align	32			! incidentally already aligned !
 .L1st:
@@ -250,13 +248,13 @@ bn_mul_mont_fpu:
 	fxtod	%f20,%f20
 	fxtod	%f22,%f22
 
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
+	ldx	[%sp+0+128+0],%o0
 		fmuld	%f16,%f0,%f32
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
+	ldx	[%sp+0+128+8],%o1
 		fmuld	%f20,%f8,%f48
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
+	ldx	[%sp+0+128+16],%o2
 		fmuld	%f16,%f2,%f34
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+24],%o3
 		fmuld	%f20,%f10,%f50
 
 	srlx	%o0,16,%o7
@@ -316,10 +314,10 @@ bn_mul_mont_fpu:
 	fdtox	%f52,%f52
 	fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f48,[%sp+0+128+0]
+	std	%f50,[%sp+0+128+8]
+	std	%f52,[%sp+0+128+16]
+	std	%f54,[%sp+0+128+24]
 
 	addcc	%l6,8,%l6
 	bnz,pt	%icc,.L1st
@@ -329,15 +327,15 @@ bn_mul_mont_fpu:
 	fdtox	%f24,%f24
 	fdtox	%f26,%f26
 
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+0],%o0
+	ldx	[%sp+0+128+8],%o1
+	ldx	[%sp+0+128+16],%o2
+	ldx	[%sp+0+128+24],%o3
 
 	srlx	%o0,16,%o7
-	std	%f24,[%sp+STACK_BIAS+STACK_FRAME+32]
+	std	%f24,[%sp+0+128+32]
 	add	%o7,%o1,%o1
-	std	%f26,[%sp+STACK_BIAS+STACK_FRAME+40]
+	std	%f26,[%sp+0+128+40]
 	srlx	%o1,16,%o7
 	add	%o7,%o2,%o2
 	srlx	%o2,16,%o7
@@ -351,9 +349,9 @@ bn_mul_mont_fpu:
 	or	%o1,%o0,%o0
 	or	%o2,%o0,%o0
 	or	%o7,%o0,%o0		! 64-bit result
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+32],%o4
+	ldx	[%sp+0+128+32],%o4
 	addcc	%g1,%o0,%o0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+40],%o5
+	ldx	[%sp+0+128+40],%o5
 	srlx	%o3,16,%g1		! 34-bit carry
 	bcs,a	%xcc,.+8
 	add	%g1,1,%g1
@@ -379,7 +377,7 @@ bn_mul_mont_fpu:
 .align	32
 .Louter:
 	sub	%g0,%i5,%l6		! j=-num
-	add	%sp,STACK_BIAS+STACK_FRAME+64,%l0
+	add	%sp,0+128+64,%l0
 
 	add	%i1,%l6,%o3
 	add	%i2,%l5,%o4
@@ -397,7 +395,7 @@ bn_mul_mont_fpu:
 	mulx	%o1,%o0,%o0
 	addcc	%o2,%o0,%o0
 	mulx	%g4,%o0,%o0		! (ap[0]*bp[i]+t[0])*n0
-	stx	%o0,[%sp+STACK_BIAS+STACK_FRAME+0]
+	stx	%o0,[%sp+0+128+0]
 
 	! transfer b[i] to FPU as 4x16-bit values
 	ldda	[%o4+2]%asi,%f0
@@ -406,13 +404,13 @@ bn_mul_mont_fpu:
 	ldda	[%o4+4]%asi,%f6
 
 	! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+6]%asi,%f8
+	ldda	[%sp+0+128+6]%asi,%f8
 	fxtod	%f0,%f0
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+4]%asi,%f10
+	ldda	[%sp+0+128+4]%asi,%f10
 	fxtod	%f2,%f2
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+2]%asi,%f12
+	ldda	[%sp+0+128+2]%asi,%f12
 	fxtod	%f4,%f4
-	ldda	[%sp+STACK_BIAS+STACK_FRAME+0]%asi,%f14
+	ldda	[%sp+0+128+0]%asi,%f14
 	fxtod	%f6,%f6
 	ldd	[%l1+%l6],%f16		! load a[j] in double format
 	fxtod	%f8,%f8
@@ -457,11 +455,11 @@ bn_mul_mont_fpu:
 	fdtox	%f52,%f52
 	fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
+	std	%f48,[%sp+0+128+0]
+	std	%f50,[%sp+0+128+8]
+	std	%f52,[%sp+0+128+16]
 	add	%l6,8,%l6
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f54,[%sp+0+128+24]
 
 	ldd	[%l1+%l6],%f16		! load a[j] in double format
 	ldd	[%l2+%l6],%f18
@@ -473,15 +471,15 @@ bn_mul_mont_fpu:
 		fmuld	%f16,%f2,%f34
 		fmuld	%f20,%f10,%f50
 		fmuld	%f16,%f4,%f36
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
+	ldx	[%sp+0+128+0],%o0
 		faddd	%f32,%f48,%f48
 		fmuld	%f20,%f12,%f52
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
+	ldx	[%sp+0+128+8],%o1
 		fmuld	%f16,%f6,%f38
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
+	ldx	[%sp+0+128+16],%o2
 		faddd	%f34,%f50,%f50
 		fmuld	%f20,%f14,%f54
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+24],%o3
 		fmuld	%f18,%f0,%f40
 
 	srlx	%o0,16,%o7
@@ -529,12 +527,12 @@ bn_mul_mont_fpu:
 	fdtox	%f52,%f52
 	fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
+	std	%f48,[%sp+0+128+0]
+	std	%f50,[%sp+0+128+8]
 	addcc	%l6,8,%l6
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
+	std	%f52,[%sp+0+128+16]
 	bz,pn	%icc,.Linnerskip
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f54,[%sp+0+128+24]
 
 	ba	.Linner
 	nop
@@ -550,15 +548,15 @@ bn_mul_mont_fpu:
 		fmuld	%f16,%f2,%f34
 		fmuld	%f20,%f10,%f50
 		fmuld	%f16,%f4,%f36
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
+	ldx	[%sp+0+128+0],%o0
 		faddd	%f32,%f48,%f48
 		fmuld	%f20,%f12,%f52
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
+	ldx	[%sp+0+128+8],%o1
 		fmuld	%f16,%f6,%f38
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
+	ldx	[%sp+0+128+16],%o2
 		faddd	%f34,%f50,%f50
 		fmuld	%f20,%f14,%f54
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+24],%o3
 		fmuld	%f18,%f0,%f40
 
 	srlx	%o0,16,%o7
@@ -608,11 +606,11 @@ bn_mul_mont_fpu:
 	stx	%o0,[%l0]		! tp[j-1]
 		fdtox	%f54,%f54
 
-	std	%f48,[%sp+STACK_BIAS+STACK_FRAME+0]
-	std	%f50,[%sp+STACK_BIAS+STACK_FRAME+8]
-	std	%f52,[%sp+STACK_BIAS+STACK_FRAME+16]
+	std	%f48,[%sp+0+128+0]
+	std	%f50,[%sp+0+128+8]
+	std	%f52,[%sp+0+128+16]
 	addcc	%l6,8,%l6
-	std	%f54,[%sp+STACK_BIAS+STACK_FRAME+24]
+	std	%f54,[%sp+0+128+24]
 	bnz,pt	%icc,.Linner
 	add	%l0,8,%l0
 
@@ -620,15 +618,15 @@ bn_mul_mont_fpu:
 	fdtox	%f24,%f24
 	fdtox	%f26,%f26
 
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+0],%o0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+8],%o1
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+16],%o2
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+24],%o3
+	ldx	[%sp+0+128+0],%o0
+	ldx	[%sp+0+128+8],%o1
+	ldx	[%sp+0+128+16],%o2
+	ldx	[%sp+0+128+24],%o3
 
 	srlx	%o0,16,%o7
-	std	%f24,[%sp+STACK_BIAS+STACK_FRAME+32]
+	std	%f24,[%sp+0+128+32]
 	add	%o7,%o1,%o1
-	std	%f26,[%sp+STACK_BIAS+STACK_FRAME+40]
+	std	%f26,[%sp+0+128+40]
 	srlx	%o1,16,%o7
 	add	%o7,%o2,%o2
 	srlx	%o2,16,%o7
@@ -641,9 +639,9 @@ bn_mul_mont_fpu:
 	sllx	%o3,48,%o7
 	or	%o1,%o0,%o0
 	or	%o2,%o0,%o0
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+32],%o4
+	ldx	[%sp+0+128+32],%o4
 	or	%o7,%o0,%o0		! 64-bit result
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+40],%o5
+	ldx	[%sp+0+128+40],%o5
 	addcc	%g1,%o0,%o0
 	ldx	[%l0+8],%o7		! tp[j]
 	srlx	%o3,16,%g1		! 34-bit carry
@@ -730,7 +728,7 @@ bn_mul_mont_fpu:
 	brnz,pt	%o7,.Lzap
 	nop
 
-	ldx	[%sp+STACK_BIAS+STACK_FRAME+48],%o7
+	ldx	[%sp+0+128+48],%o7
 	wr	%g0,%o7,%asi		! restore %asi
 
 	mov	1,%i0
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S:1.2 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S:1.3
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S:1.2	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/vis3-mont.S	Tue Feb 20 12:48:46 2018
@@ -1,15 +1,8 @@
-#include "sparc_arch.h"
-
-#ifdef	__arch64__
-.register	%g2,#scratch
-.register	%g3,#scratch
-#endif
-
 .section	".text",#alloc,#execinstr
 .globl	bn_mul_mont_vis3
 .align	32
 bn_mul_mont_vis3:
-	add	%sp,	STACK_BIAS,	%g4	! real top of stack
+	add	%sp,	0,	%g4	! real top of stack
 	sll	%o5,	2,	%o5	! size in bytes
 	add	%o5,	63,	%g5
 	andn	%g5,	63,	%g5	! buffer size rounded up to 64 bytes
@@ -17,12 +10,12 @@ bn_mul_mont_vis3:
 	add	%g5,	%g1,	%g1	! 3*buffer size
 	sub	%g4,	%g1,	%g1
 	andn	%g1,	63,	%g1	! align at 64 byte
-	sub	%g1,	STACK_FRAME,	%g1	! new top of stack
+	sub	%g1,	112,	%g1	! new top of stack
 	sub	%g1,	%g4,	%g1
 
 	save	%sp,	%g1,	%sp
 	ld	[%i4+0],	%l0	! pull n0[0..1] value
-	add	%sp, STACK_BIAS+STACK_FRAME, %l5
+	add	%sp, 0+112, %l5
 	ld	[%i4+4],	%l1
 	add	%l5,	%g5,	%l7
 	ld	[%i2+0],	%l2	! m0=bp[0]

Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S:1.3 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S:1.4
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S:1.3	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/des_enc-sparc.S	Tue Feb 20 12:48:46 2018
@@ -1,9 +1,26 @@
-! Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.
+!  des_enc.m4
+!  des_enc.S  (generated from des_enc.m4)
 !
-! Licensed under the OpenSSL license (the "License").  You may not use
-! this file except in compliance with the License.  You can obtain a copy
-! in the file LICENSE in the source distribution or at
-! https://www.openssl.org/source/license.html
+!  UltraSPARC assembler version of the LibDES/SSLeay/OpenSSL des_enc.c file.
+!
+!  Version 1.0. 32-bit version.
+!
+!  June 8, 2000.
+!
+!  Version 2.0. 32/64-bit, PIC-ification, blended CPU adaptation
+!		by Andy Polyakov.
+!
+!  January 1, 2003.
+!
+!  Assembler version: Copyright Svend Olaf Mikkelsen.
+!
+!  Original C code: Copyright Eric A. Young.
+!
+!  This code can be freely used by LibDES/SSLeay/OpenSSL users.
+!
+!  The LibDES/SSLeay/OpenSSL copyright notices must be respected.
+!
+!  This version can be redistributed.
 !
 !  To expand the m4 macros: m4 -B 8192 des_enc.m4 > des_enc.S
 !
@@ -31,10 +48,6 @@
 
 #include <openssl/opensslconf.h>
 
-#ifdef OPENSSL_FIPSCANISTER
-#include <openssl/fipssyms.h>
-#endif
-
 #if defined(__SUNPRO_C) && defined(__sparcv9)
 # define ABI64  /* They've said -xarch=v9 at command line */
 #elif defined(__GNUC__) && defined(__arch64__)
@@ -50,6 +63,9 @@
 # define	STPTR	stx
 # define	ARG0	128
 # define	ARGSZ	8
+# ifndef __sparc_v9__
+# define __sparc_v9__
+# endif
 #else
 # define	FRAME	-96
 # define	BIAS	0
@@ -143,7 +159,7 @@
 ! other half (use).
 !
 ! In this version we do two rounds in a loop repeated 7 times
-! and two rounds separately.
+! and two rounds seperately.
 !
 ! One half has the bits for the sboxes in the following positions:
 !
@@ -414,7 +430,11 @@
 	xor	out5, local1, out5            ! 1 finished
 
 	xor	out5, local2, out5            ! 3 finished
+#ifdef __sparc_v9__
+	bne,pt	%icc, .des_enc.1
+#else
 	bne	.des_enc.1
+#endif
 	and	local4, 252, local1       ! sbox 1 next round
 
 ! two rounds more:
@@ -668,7 +688,11 @@
 	xor	in5, local1, in5            ! 1 finished
 
 	xor	in5, local2, in5            ! 3 finished
+#ifdef __sparc_v9__
+	bne,pt	%icc, .des_dec.1
+#else
 	bne	.des_dec.1
+#endif
 	and	local4, 252, local1       ! sbox 1 next round
 
 ! two rounds more:
@@ -805,7 +829,11 @@ DES_encrypt1:
 	ld	[in0], in5                ! left
 	cmp	in2, 0                    ! enc
 
+#ifdef __sparc_v9__
+	be,pn	%icc, .encrypt.dec        ! enc/dec
+#else
 	be	.encrypt.dec
+#endif
 	ld	[in0+4], out5             ! right
 
 	! parameter 6  1/2 for include encryption/decryption
@@ -1033,7 +1061,11 @@ DES_encrypt1:
 	xor	out5, local1, out5            ! 1 finished
 
 	xor	out5, local2, out5            ! 3 finished
+#ifdef __sparc_v9__
+	bne,pt	%icc, .des_encrypt1.1
+#else
 	bne	.des_encrypt1.1
+#endif
 	and	local4, 252, local1       ! sbox 1 next round
 
 ! two rounds more:
@@ -1473,7 +1505,11 @@ DES_encrypt2:
 
 	! we use our own stackframe
 
+#ifdef __sparc_v9__
+	be,pn	%icc, .encrypt2.dec       ! decryption
+#else
 	be	.encrypt2.dec
+#endif
 	STPTR	in0, [%sp+BIAS+ARG0+0*ARGSZ]
 
 	ld	[in3], out0               ! key 7531 first round
@@ -1997,7 +2033,11 @@ DES_ncbc_encrypt:
 
 	cmp	in5, 0                    ! enc   
 
+#ifdef __sparc_v9__
+	be,pn	%icc, .ncbc.dec
+#else
 	be	.ncbc.dec
+#endif
 	STPTR	in4,  [%sp+BIAS+ARG0+4*ARGSZ] 
 
 	! addr  left  right  temp  label
@@ -2008,6 +2048,18 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	in4, 3, global0
+	bne,pn	%icc, .LLE1
+	nop
+
+	lda	[in4] 0x88, in5
+	add	in4, 4, local3
+
+	ba,pt	%icc, .LLE1a
+	lda	[local3] 0x88, out5
+#endif
+
 .LLE1:
 	ldub	[in4+3], in5
 
@@ -2043,7 +2095,11 @@ DES_ncbc_encrypt:
 
 	addcc	in2, -8, in2              ! bytes missing when first block done
 
+#ifdef __sparc_v9__
+	bl,pn	%icc, .ncbc.enc.seven.or.less
+#else
 	bl	.ncbc.enc.seven.or.less
+#endif
 	mov	in3, in4                  ! schedule
 
 .ncbc.enc.next.block:
@@ -2055,6 +2111,18 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	in0, 3, global0
+	bne,pn	%icc, .LLE2
+	nop
+
+	lda	[in0] 0x88, out4
+	add	in0, 4, local3
+
+	ba,pt	%icc, .LLE2a
+	lda	[local3] 0x88, global4
+#endif
+
 .LLE2:
 	ldub	[in0+3], out4
 
@@ -2322,7 +2390,11 @@ DES_ncbc_encrypt:
 	xor	out5, local1, out5            ! 1 finished
 
 	xor	out5, local2, out5            ! 3 finished
+#ifdef __sparc_v9__
+	bne,pt	%icc, .ncbc.enc.1
+#else
 	bne	.ncbc.enc.1
+#endif
 	and	local4, 252, local1       ! sbox 1 next round
 
 ! two rounds more:
@@ -2437,7 +2509,11 @@ DES_ncbc_encrypt:
 	xor	out5, local2, out5
  ! include encryption  ks in3
 
+#ifdef __sparc_v9__
+	bl,pn	%icc, .ncbc.enc.next.block_fp
+#else
 	bl	.ncbc.enc.next.block_fp
+#endif
 	add	in0, 8, in0               ! input address
 
 	! If 8 or more bytes are to be encrypted after this block,
@@ -2451,6 +2527,18 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	in0, 3, global0
+	bne,pn	%icc, .LLE12
+	nop
+
+	lda	[in0] 0x88, global3
+	add	in0, 4, local5
+
+	ba,pt	%icc, .LLE12a
+	lda	[local5] 0x88, global4
+#endif
+
 .LLE12:
 	ldub	[in0+3], global3
 
@@ -2624,6 +2712,18 @@ DES_ncbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	in1, 3, global0
+	bne,pn	%icc, .SLE10
+	nop
+
+	sta	out0, [in1] 0x88
+	add	in1, 4, local3
+
+	ba,pt	%icc, .SLE10a
+	sta	out1, [local3] 0x88
+#endif
+
 .SLE10:
 	and	out0, 255, local3
 	stub	local3, [in1+0]
@@ -2667,7 +2767,7 @@ DES_ncbc_encrypt:
 	xor	global4, local1, out5     ! iv xor next block
 
 	ba	.ncbc.enc.next.block_2
-	add	in1, 8, in1               ! output address
+	add	in1, 8, in1               ! output adress
 
 .ncbc.enc.next.block_fp:
 
@@ -2754,6 +2854,18 @@ DES_ncbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	in1, 3, global0
+	bne,pn	%icc, .SLE1
+	nop
+
+	sta	in5, [in1] 0x88
+	add	in1, 4, local3
+
+	ba,pt	%icc, .SLE1a
+	sta	out5, [local3] 0x88
+#endif
+
 .SLE1:
 	and	in5, 255, local3
 	stub	local3, [in1+0]
@@ -2790,14 +2902,22 @@ DES_ncbc_encrypt:
 
 	addcc   in2, -8, in2              ! bytes missing when next block done
 
+#ifdef __sparc_v9__
+	bpos,pt	%icc, .ncbc.enc.next.block  ! also jumps if 0
+#else
 	bpos	.ncbc.enc.next.block
+#endif
 	add	in1, 8, in1
 
 .ncbc.enc.seven.or.less:
 
 	cmp	in2, -8
 
+#ifdef __sparc_v9__
+	ble,pt	%icc, .ncbc.enc.finish
+#else
 	ble	.ncbc.enc.finish
+#endif
 	nop
 
 	add	in2, 8, local1            ! bytes to load
@@ -2875,6 +2995,18 @@ DES_ncbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local4, 3, global0
+	bne,pn	%icc, .SLE2
+	nop
+
+	sta	in5, [local4] 0x88
+	add	local4, 4, local5
+
+	ba,pt	%icc, .SLE2a
+	sta	out5, [local5] 0x88
+#endif
+
 .SLE2:
 	and	in5, 255, local5
 	stub	local5, [local4+0]
@@ -2920,7 +3052,11 @@ DES_ncbc_encrypt:
 	add	in3, 120, in3
 
 	LDPTR	 [%sp+BIAS+ARG0+4*ARGSZ] , local7              ! ivec
+#ifdef __sparc_v9__
+	ble,pn	%icc, .ncbc.dec.finish
+#else
 	ble	.ncbc.dec.finish
+#endif
 	mov	in3, in4                  ! schedule
 
 	STPTR	in1,  [%sp+BIAS+ARG0+1*ARGSZ] 
@@ -2933,6 +3069,18 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local7, 3, global0
+	bne,pn	%icc, .LLE3
+	nop
+
+	lda	[local7] 0x88, in0
+	add	local7, 4, local3
+
+	ba,pt	%icc, .LLE3a
+	lda	[local3] 0x88, in1
+#endif
+
 .LLE3:
 	ldub	[local7+3], in0
 
@@ -2975,6 +3123,18 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE4
+	nop
+
+	lda	[local5] 0x88, in5
+	add	local5, 4, local3
+
+	ba,pt	%icc, .LLE4a
+	lda	[local3] 0x88, out5
+#endif
+
 .LLE4:
 	ldub	[local5+3], in5
 
@@ -3194,7 +3354,11 @@ DES_ncbc_encrypt:
 	! in2 is compared to 8 in the rounds
 
 	xor	out5, in0, out4           ! iv xor
+#ifdef __sparc_v9__
+	bl,pn	%icc, .ncbc.dec.seven.or.less
+#else
 	bl	.ncbc.dec.seven.or.less
+#endif
 	xor	in5, in1, global4         ! iv xor
 
 	! Load ivec next block now, since input and output address might be the same.
@@ -3206,6 +3370,19 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE5
+	nop
+
+	lda	[local5] 0x88, in0
+	add	local5, 4, local5
+
+	lda	[local5] 0x88, in1
+	ba,pt	%icc, .LLE5a
+	add	local5, 4, local5
+#endif
+
 .LLE5:
 	ldub	[local5+3], in0
 
@@ -3246,6 +3423,18 @@ DES_ncbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local7, 3, global0
+	bne,pn	%icc, .SLE3
+	nop
+
+	sta	out4, [local7] 0x88
+	add	local7, 4, local3
+
+	ba,pt	%icc, .SLE3a
+	sta	global4, [local3] 0x88
+#endif
+
 .SLE3:
 	and	out4, 255, local3
 	stub	local3, [local7+0]
@@ -3284,7 +3473,11 @@ DES_ncbc_encrypt:
 	add	local7, 8, local7
 	addcc   in2, -8, in2
 
+#ifdef __sparc_v9__
+	bg,pt	%icc, .ncbc.dec.next.block
+#else
 	bg	.ncbc.dec.next.block
+#endif
 	STPTR	local7,  [%sp+BIAS+ARG0+1*ARGSZ] 
 
 
@@ -3298,6 +3491,18 @@ DES_ncbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local4, 3, global0
+	bne,pn	%icc, .SLE4
+	nop
+
+	sta	in0, [local4] 0x88
+	add	local4, 4, local5
+
+	ba,pt	%icc, .SLE4a
+	sta	in1, [local5] 0x88
+#endif
+
 .SLE4:
 	and	in0, 255, local5
 	stub	local5, [local4+0]
@@ -3346,6 +3551,19 @@ DES_ncbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE13
+	nop
+
+	lda	[local5] 0x88, in0
+	add	local5, 4, local5
+
+	lda	[local5] 0x88, in1
+	ba,pt	%icc, .LLE13a
+	add	local5, 4, local5
+#endif
+
 .LLE13:
 	ldub	[local5+3], in0
 
@@ -3470,7 +3688,11 @@ DES_ede3_cbc_encrypt:
 	LDPTR	[%fp+BIAS+ARG0+6*ARGSZ], local4          ! ivec
 	cmp	local3, 0                 ! enc
 
+#ifdef __sparc_v9__
+	be,pn	%icc, .ede3.dec
+#else
 	be	.ede3.dec
+#endif
 	STPTR	in4,  [%sp+BIAS+ARG0+4*ARGSZ] 
 
 	STPTR	in5,  [%sp+BIAS+ARG0+5*ARGSZ] 
@@ -3482,6 +3704,18 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local4, 3, global0
+	bne,pn	%icc, .LLE6
+	nop
+
+	lda	[local4] 0x88, in5
+	add	local4, 4, local3
+
+	ba,pt	%icc, .LLE6a
+	lda	[local3] 0x88, out5
+#endif
+
 .LLE6:
 	ldub	[local4+3], in5
 
@@ -3517,7 +3751,11 @@ DES_ede3_cbc_encrypt:
 
 	addcc	in2, -8, in2              ! bytes missing after next block
 
+#ifdef __sparc_v9__
+	bl,pn	%icc,  .ede3.enc.seven.or.less
+#else
 	bl	.ede3.enc.seven.or.less
+#endif
 	STPTR	in3,  [%sp+BIAS+ARG0+3*ARGSZ] 
 
 .ede3.enc.next.block:
@@ -3529,6 +3767,18 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	in0, 3, global0
+	bne,pn	%icc, .LLE7
+	nop
+
+	lda	[in0] 0x88, out4
+	add	in0, 4, local3
+
+	ba,pt	%icc, .LLE7a
+	lda	[local3] 0x88, global4
+#endif
+
 .LLE7:
 	ldub	[in0+3], out4
 
@@ -3676,7 +3926,11 @@ DES_ede3_cbc_encrypt:
 	call .des_enc                     ! ks3 in3  compares in2 to 8
 	nop
 
+#ifdef __sparc_v9__
+	bl,pn	%icc, .ede3.enc.next.block_fp
+#else
 	bl	.ede3.enc.next.block_fp
+#endif
 	add	in0, 8, in0
 
 	! If 8 or more bytes are to be encrypted after this block,
@@ -3690,6 +3944,18 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	in0, 3, global0
+	bne,pn	%icc, .LLE11
+	nop
+
+	lda	[in0] 0x88, global3
+	add	in0, 4, local5
+
+	ba,pt	%icc, .LLE11a
+	lda	[local5] 0x88, global4
+#endif
+
 .LLE11:
 	ldub	[in0+3], global3
 
@@ -3863,6 +4129,18 @@ DES_ede3_cbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	in1, 3, global0
+	bne,pn	%icc, .SLE9
+	nop
+
+	sta	out0, [in1] 0x88
+	add	in1, 4, local3
+
+	ba,pt	%icc, .SLE9a
+	sta	out1, [local3] 0x88
+#endif
+
 .SLE9:
 	and	out0, 255, local3
 	stub	local3, [in1+0]
@@ -3994,6 +4272,18 @@ DES_ede3_cbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	in1, 3, global0
+	bne,pn	%icc, .SLE5
+	nop
+
+	sta	in5, [in1] 0x88
+	add	in1, 4, local3
+
+	ba,pt	%icc, .SLE5a
+	sta	out5, [local3] 0x88
+#endif
+
 .SLE5:
 	and	in5, 255, local3
 	stub	local3, [in1+0]
@@ -4030,14 +4320,22 @@ DES_ede3_cbc_encrypt:
 
 	addcc   in2, -8, in2              ! bytes missing when next block done
 
+#ifdef __sparc_v9__
+	bpos,pt	%icc, .ede3.enc.next.block
+#else
 	bpos	.ede3.enc.next.block
+#endif
 	add	in1, 8, in1
 
 .ede3.enc.seven.or.less:
 
 	cmp	in2, -8
 
+#ifdef __sparc_v9__
+	ble,pt	%icc, .ede3.enc.finish
+#else
 	ble	.ede3.enc.finish
+#endif
 	nop
 
 	add	in2, 8, local1            ! bytes to load
@@ -4112,6 +4410,18 @@ DES_ede3_cbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local4, 3, global0
+	bne,pn	%icc, .SLE6
+	nop
+
+	sta	in5, [local4] 0x88
+	add	local4, 4, local5
+
+	ba,pt	%icc, .SLE6a
+	sta	out5, [local5] 0x88
+#endif
+
 .SLE6:
 	and	in5, 255, local5
 	stub	local5, [local4+0]
@@ -4161,7 +4471,11 @@ DES_ede3_cbc_encrypt:
 	STPTR	in3,  [%sp+BIAS+ARG0+3*ARGSZ] 
 	cmp	in2, 0
 
+#ifdef __sparc_v9__
+	ble	%icc, .ede3.dec.finish
+#else
 	ble	.ede3.dec.finish
+#endif
 	STPTR	in5,  [%sp+BIAS+ARG0+5*ARGSZ] 
 
 	LDPTR	[%fp+BIAS+ARG0+6*ARGSZ], local7          ! iv
@@ -4172,6 +4486,18 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local7, 3, global0
+	bne,pn	%icc, .LLE8
+	nop
+
+	lda	[local7] 0x88, in0
+	add	local7, 4, local3
+
+	ba,pt	%icc, .LLE8a
+	lda	[local3] 0x88, in1
+#endif
+
 .LLE8:
 	ldub	[local7+3], in0
 
@@ -4214,6 +4540,18 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE9
+	nop
+
+	lda	[local5] 0x88, in5
+	add	local5, 4, local3
+
+	ba,pt	%icc, .LLE9a
+	lda	[local3] 0x88, out5
+#endif
+
 .LLE9:
 	ldub	[local5+3], in5
 
@@ -4440,7 +4778,11 @@ DES_ede3_cbc_encrypt:
 	! in2 is compared to 8 in the rounds
 
 	xor	out5, in0, out4
+#ifdef __sparc_v9__
+	bl,pn	%icc, .ede3.dec.seven.or.less
+#else
 	bl	.ede3.dec.seven.or.less
+#endif
 	xor	in5, in1, global4
 
 	
@@ -4450,6 +4792,19 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE10
+	nop
+
+	lda	[local5] 0x88, in0
+	add	local5, 4, local5
+
+	lda	[local5] 0x88, in1
+	ba,pt	%icc, .LLE10a
+	add	local5, 4, local5
+#endif
+
 .LLE10:
 	ldub	[local5+3], in0
 
@@ -4490,6 +4845,18 @@ DES_ede3_cbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local7, 3, global0
+	bne,pn	%icc, .SLE7
+	nop
+
+	sta	out4, [local7] 0x88
+	add	local7, 4, local3
+
+	ba,pt	%icc, .SLE7a
+	sta	global4, [local3] 0x88
+#endif
+
 .SLE7:
 	and	out4, 255, local3
 	stub	local3, [local7+0]
@@ -4528,7 +4895,11 @@ DES_ede3_cbc_encrypt:
 	addcc   in2, -8, in2
 	add	local7, 8, local7
 
+#ifdef __sparc_v9__
+	bg,pt	%icc, .ede3.dec.next.block
+#else
 	bg	.ede3.dec.next.block
+#endif
 	STPTR	local7,  [%sp+BIAS+ARG0+1*ARGSZ] 
 
 .ede3.dec.store.iv:
@@ -4541,6 +4912,18 @@ DES_ede3_cbc_encrypt:
 
 	! rightmost in register to first in memory
 
+#ifdef __sparc_v9__
+	andcc	local4, 3, global0
+	bne,pn	%icc, .SLE8
+	nop
+
+	sta	in0, [local4] 0x88
+	add	local4, 4, local5
+
+	ba,pt	%icc, .SLE8a
+	sta	in1, [local5] 0x88
+#endif
+
 .SLE8:
 	and	in0, 255, local5
 	stub	local5, [local4+0]
@@ -4589,6 +4972,19 @@ DES_ede3_cbc_encrypt:
 
 	! first in memory to rightmost in register
 
+#ifdef __sparc_v9__
+	andcc	local5, 3, global0
+	bne,pn	%icc, .LLE14
+	nop
+
+	lda	[local5] 0x88, in0
+	add	local5, 4, local5
+
+	lda	[local5] 0x88, in1
+	ba,pt	%icc, .LLE14a
+	add	local5, 4, local5
+#endif
+
 .LLE14:
 	ldub	[local5+3], in0
 
Index: src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S
diff -u src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S:1.3 src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S:1.4
--- src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S:1.3	Sun Feb 18 23:38:47 2018
+++ src/crypto/external/bsd/openssl/lib/libcrypto/arch/sparc/ghash-sparcv9.S	Tue Feb 20 12:48:46 2018
@@ -1,10 +1,3 @@
-#include "sparc_arch.h"
-
-#ifdef  __arch64__
-.register	%g2,#scratch
-.register	%g3,#scratch
-#endif
-
 .section	".text",#alloc,#execinstr
 
 .align	64
@@ -19,7 +12,7 @@ rem_4bit:
 .globl	gcm_ghash_4bit
 .align	32
 gcm_ghash_4bit:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 	ldub	[%i2+15],%l1
 	ldub	[%i0+15],%l2
 	ldub	[%i0+14],%l3
@@ -108,7 +101,7 @@ gcm_ghash_4bit:
 
 	add	%i2,16,%i2
 	cmp	%i2,%i3
-	be,pn	SIZE_T_CC,.Ldone
+	be,pn	%icc,.Ldone
 	and	%o1,0xf,%l5
 
 	ldx	[%l6+%l0],%o3
@@ -154,7 +147,7 @@ gcm_ghash_4bit:
 .globl	gcm_gmult_4bit
 .align	32
 gcm_gmult_4bit:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 	ldub	[%i0+15],%l1
 	add	%i1,8,%l6
 
@@ -256,7 +249,7 @@ gcm_gmult_4bit:
 .globl	gcm_init_vis3
 .align	32
 gcm_init_vis3:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 
 	ldx	[%i1+0],%o2
 	ldx	[%i1+8],%o1
@@ -289,7 +282,7 @@ gcm_init_vis3:
 .globl	gcm_gmult_vis3
 .align	32
 gcm_gmult_vis3:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 
 	ldx	[%i0+8],%o3		! load Xi
 	ldx	[%i0+0],%o4
@@ -343,7 +336,7 @@ gcm_gmult_vis3:
 .globl	gcm_ghash_vis3
 .align	32
 gcm_ghash_vis3:
-	save	%sp,-STACK_FRAME,%sp
+	save	%sp,-112,%sp
 	nop
 	srln	%i3,0,%i3		! needed on v8+, "nop" on v9
 

Reply via email to