Juergen Menden <[EMAIL PROTECTED]> writes:

|> Hi!
|> dchanges 3.0 forces us to use pgp to sign the .changes files (well
|> at least if you don't explicitely turn it of). i've looked at the
|> source package but there i found really much assembler code. as i'm
|> only familiar with the 6502 assembler i wouldn't try to port it... ;-)))

I have already sent a patch to the pgp maintainer that adds the
necessary assembler code.  It will be included in the next version.
If you want to try out, here is the patch:

-----BEGIN PGP SIGNED MESSAGE-----

The linux-68k version of pgp-2.6.3i currently does not use assembler
versions of the mp functions due to problems with the assembler
syntax.  Therefore i have created a version that works with linux-68k,
based on mc68020.s, but using SMITH instead of UPTON.

diff -urPX exclude pgp263is.orig/src/makefile pgp263is/src/makefile
- --- pgp263is.orig/src/makefile        Thu Jan 18 11:42:36 1996
+++ pgp263is/src/makefile       Fri Feb  2 23:44:11 1996
@@ -135,6 +135,9 @@
 mc68020.o:
        $(ASM) -o mc68020.o mc68020.s
 
+mc68020l.o: mc68020l.S
+       $(CC) -c mc68020l.S -o mc68020l.o
+
 ZIPOBJS= zbits.o zdeflate.o zfile_io.o zglobals.o \
        zinflate.o zip.o zipup.o ztrees.o zunzip.o 
 
@@ -168,8 +171,8 @@
        CFLAGS="$(RSAINCDIR) -O6 -g3 -DUNIX -DLINUX -DIDEA32 -DASM"
 
 linux-68k:
- -     $(MAKE) all CC=gcc LD=gcc \
- -     CFLAGS="$(RSAINCDIR) -O2 -DHIGHFIRST -DUNIX -DPORTABLE -DIDEA32"
+       $(MAKE) all CC=gcc LD=gcc OBJS_EXT="mc68020l.o" \
+       CFLAGS="$(RSAINCDIR) -O2 -DHIGHFIRST -DUNIX -DIDEA32"
 
 freebsd:
        $(MAKE) all LD="$(CC) -s" OBJS_EXT="_80386.o _zmatch.o" \
diff -urPX exclude pgp263is.orig/src/mc68020l.S pgp263is/src/mc68020l.S
- --- pgp263is.orig/src/mc68020l.S      Thu Jan  1 01:00:00 1970
+++ pgp263is/src/mc68020l.S     Sat Feb 10 17:25:41 1996
@@ -0,0 +1,399 @@
+|      Fast assembly routines for MC68020 (Linux/68k)
+|      Adapted from mc68020.s by Andreas Schwab,
+|       <[EMAIL PROTECTED]>
+|
+|      Assumptions:
+|              Arguments start at %sp@(0x4)
+|              Return value is in %d0
+|              %d0/%d1/%a0/%a1 are scratch
+|              P_SMULA needs MULTUNIT set to "unsigned long" in mpilib.c
+|              P_DMUL replaces mp_smul and mp_dmul in mpilib.c
+|
+|      92.9.21 - Tsutomu Shimomura, [EMAIL PROTECTED]
+|       93.5.14 - Bug in P_DMUL fixed -- now works with small bignums
+
+#ifdef __ELF__
+#define ENTRY(name)    .globl name; name##:
+#define SYM(name)      name
+#define EPILOG(name)   .type name, @function; .size name, . - name
+#else
+#define ENTRY(name)    .globl _##name; _##name##:
+#define SYM(name)      _##name
+#define EPILOG(name)
+#endif
+
+       .text
+
+|      P_SETP(p) sets the current precision to be p bits.  No-op.
+ENTRY(P_SETP)
+       rts
+EPILOG(P_SETP)
+
+|      P_ADDC(*a, *b, c) performs a += b + c (carry).  Carry is returned.
+ENTRY(P_ADDC)
+       movel   %sp@(0x4), %a0  | claim arguments
+       movel   %sp@(0x8), %a1
+       movel   %sp@(0xc), %d0
+       movel   %d2, [EMAIL PROTECTED]  | preserve %d2
+
+       movew   SYM(global_precision), %d1      | longword count
+       movew   %d1, %d2        | save a copy
+
+       lslw    #2, %d1
+       addw    %d1, %a0        | adjust array pointers
+       addw    %d1, %a1
+
+       lsrw    #1, %d1         | compute initial branch offset
+       andw    #0xe, %d1
+       negw    %d1             | branch offset in %d1
+
+       lsrw    #3, %d2         | 8 longwords/loop; count in %d2
+
+       asrl    #1, %d0         | set X if necessary
+
+       jmp     %pc@(2f, %d1:w)
+1:
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       addxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+2:     dbf     %d2, 1b
+
+       addxl   %d0, %d0
+
+       movel   [EMAIL PROTECTED], %d2
+       rts
+EPILOG(P_ADDC)
+
+|      P_SUBB(*a, *b, c) performs a -= b + c (borrow).  Borrow is returned.
+ENTRY(P_SUBB)
+       movel   %sp@(0x4), %a0  | claim arguments
+       movel   %sp@(0x8), %a1
+       movel   %sp@(0xc), %d0
+       movel   %d2, [EMAIL PROTECTED]  | preserve %d2
+
+       movew   SYM(global_precision), %d1      | longword count
+       movew   %d1, %d2        | save a copy
+
+       lslw    #2, %d1
+       addw    %d1, %a0        | adjust array pointers
+       addw    %d1, %a1
+
+       lsrw    #1, %d1         | compute initial branch offset
+       andw    #0xe, %d1
+       negw    %d1             | branch offset in %d1
+
+       lsrw    #3, %d2         | 8 longwords/loop; count in %d2
+
+       asrl    #1, %d0         | set X if necessary
+
+       jmp     %pc@(2f, %d1:w)
+1:
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+       subxl   [EMAIL PROTECTED], [EMAIL PROTECTED]
+2:     dbf     %d2, 1b
+
+       addxl   %d0, %d0
+
+       movel   [EMAIL PROTECTED], %d2
+       rts
+EPILOG(P_SUBB)
+
+|      P_ROTL(*a, c) performs a = (a<<1) | c (lo-bit).  Hi-bit is returned.
+ENTRY(P_ROTL)
+       movel   %sp@(0x4), %a0  | claim arguments
+       movel   %sp@(0x8), %d0
+       movel   %d2, %a1        | preserve %d2
+
+       movew   SYM(global_precision), %d1      | longword count
+       movew   %d1, %d2        | save a copy
+
+       lslw    #2, %d1
+       addw    %d1, %a0        | adjust array pointer
+
+       andw    #0x1c, %d1
+       negw    %d1             | branch offset in %d1
+
+       lsrw    #3, %d2         | 8 longwords/loop; count in %d2
+
+       lsrl    #1, %d0         | set X if necessary
+
+       jmp     %pc@(2f, %d1:w)
+1:
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+       roxlw   [EMAIL PROTECTED]
+2:     dbf     %d2, 1b
+
+       addxl   %d0, %d0
+
+       movel   %a1, %d2
+       rts
+EPILOG(P_ROTL)
+
+|      P_SMULA(*a, *b, x) performs a += b * x.  Pointers are to the LSB.
+ENTRY(P_SMULA)
+       movel   %sp@(0x4), %a0  | claim arguments
+       movel   %sp@(0x8), %a1
+       movel   %sp@(0xc), %d1
+       jeq     3f              | horrible kludge to speed multiply by 0
+       moveml  %d2/%d3/%d4/%d5, [EMAIL PROTECTED]
+       movew   SYM(global_precision), %d5      | longword count; 0 will fail
+
+       subqw   #2, %d5         | first longword not handled in loop
+       clrl    %d4
+
+       movel   %a1@, %d2
+       mulul   %d1, %d3:%d2    | %d3 is carry
+       addl    %d2, %a0@       | accumulate
+
+|      tstw    %d5             | This code needed if global_precision < 2
+|      jlt     2f              | only one longword?
+1:
+       movel   [EMAIL PROTECTED], %d0
+       addxl   %d3, %d0        | accumulate carry and X-bit
+       movel   [EMAIL PROTECTED], %d2
+       mulul   %d1, %d3:%d2    | %d3 is carry
+       addxl   %d4, %d3        | add X-bit to carry
+       addl    %d2, %d0        | accumulate
+       movel   %d0, %a0@
+       dbf     %d5, 1b
+2:
+       addxl   %d4, %d3        | add X-bit to carry
+       addl    %d3, [EMAIL PROTECTED]  | accumulate
+
+       moveml  [EMAIL PROTECTED], %d2/%d3/%d4/%d5
+3:
+       rts
+EPILOG(P_SMULA)
+
+|      P_DMUL(*a, *b, *c) performs a = b * c.
+ENTRY(P_DMUL)
+       moveml  %d2-%d7/%a2-%a4, [EMAIL PROTECTED]
+       movel   %sp@(0x28), %a0 | claim arguments
+       movel   %sp@(0x2c), %a1
+       movel   %sp@(0x30), %a2
+       clrl    %d0
+       movew   SYM(global_precision), %d0
+       subql   #2, %d0 | global_precision - 2
+
+       movel   %a0, %a4        | product
+
+       movel   %d0, %d7        | count for multiplicand
+       movel   %a1, %a3        | multiplicand
+       tstl    [EMAIL PROTECTED]
+       jne     2f
+       subql   #1, %d7
+1:
+       clrl    [EMAIL PROTECTED]
+       tstl    [EMAIL PROTECTED]
+       dbne    %d7, 1b
+       addqw   #1, %d7 | %d7 contains effective size of the multiplicand-2
+2:
+       movel   %d0, %d6        | count for multiplier
+       movel   %a2, %a3        | multiplier
+       tstl    [EMAIL PROTECTED]
+       jne     2f
+       subql   #1, %d6
+1:
+       clrl    [EMAIL PROTECTED]
+       tstl    [EMAIL PROTECTED]
+       dbne    %d6, 1b
+       addqw   #1, %d6 | %d6 contains effective size of the multiplier-2
+2:
+
+       addql   #1, %d0 | global_precision - 1
+       lsll    #2, %d0
+       addl    %d0, %a1        | pointer to LSB of the multiplicand
+       addl    %d0, %a2        | pointer to LSB of the multiplier
+       addl    %d0, %a0
+       addl    %d0, %a0
+       addql   #4, %a0 | pointer to LSB of product - KLUDGE!
+
+|      First partial product not handled in loop
+|      Assumes that the X-bit is clear from the above contortions.
+       clrl    %d4
+
+       movel   %a0, %a3        | product
+       movel   %a1, %a4        | multiplicand
+       movel   %a2@, %d1       | one longword of the multiplier
+       movel   %d7, %d5        | loop count
+
+       movel   %a4@, %d2
+       mulul   %d1, %d3:%d2    | %d3 is carry
+       movel   %d2, %a3@       | store product
+1:
+       movel   [EMAIL PROTECTED], %d2
+       mulul   %d1, %d0:%d2
+       addxl   %d3, %d2
+       movel   %d0, %d3
+       movel   %d2, [EMAIL PROTECTED]
+       dbf     %d5, 1b
+
+       addxl   %d4, %d3
+       movel   %d3, [EMAIL PROTECTED]
+
+| The other partial products
+
+2:
+       movel   %a1, %a4        | multiplicand
+       movel   [EMAIL PROTECTED], %d1  | another longword of the multiplier
+       movel   %d7, %d5        | loop count
+
+       movel   %a4@, %d2
+       mulul   %d1, %d3:%d2    | %d3 is carry
+       addl    %d2, [EMAIL PROTECTED]  | accumulate
+
+       movel   %a0, %a3        | product
+
+1:
+       movel   [EMAIL PROTECTED], %d0
+       addxl   %d3, %d0        | accumulate carry and X-bit
+       movel   [EMAIL PROTECTED], %d2
+       mulul   %d1, %d3:%d2    | %d3 is carry
+       addxl   %d4, %d3        | add X-bit to carry
+       addl    %d2, %d0        | accumulate
+       movel   %d0, %a3@
+       dbf     %d5, 1b
+
+       addxl   %d4, %d3        | add X-bit to carry
+       movel   %d3, [EMAIL PROTECTED]  
+
+       dbf     %d6, 2b
+
+       moveml  [EMAIL PROTECTED], %d2-%d7/%a2-%a4
+       rts
+EPILOG(P_DMUL)
+
+.lcomm _reciph, 4
+.lcomm _recipl, 4
+.lcomm _mshift, 4
+
+ENTRY(p_setrecip)
+       movel   %sp@(4), _reciph
+       movel   %sp@(8), _recipl
+       movel   %sp@(12), _mshift
+       rts
+EPILOG(p_setrecip)
+
+ENTRY(p_quo_digit)
+       moveml  %d2-%d7, [EMAIL PROTECTED]
+       movel   %sp@(28), %a0
+
+       movel   %a0@(8), %d0            | dividend[2]
+       notl    %d0
+       movel   _reciph, %d7
+       mulul   %d7, %d5:%d0
+       addl    %d7, %d0
+       clrl    %d6
+       addxl   %d6, %d5                | %d5:%d0 = q1
+
+       movel   %a0@(4), %d2            | dividend[1]
+       notl    %d2
+       mulul   _recipl, %d3:%d2
+       addql   #1, %d3                 | %d3:%d2 = q2
+
+       movel   %d5, %d4
+       andl    %d3, %d4
+       moveq   #1, %d6
+       andl    %d6, %d4                | %d4 = lsb_factor
+
+       addl    %d2, %d0
+       addxl   %d3, %d5
+       roxrl   #1, %d5                 | %d5 = MS word of q0
+
+       movel   %a0@(4), %d0            | dividend[1]
+       notl    %d0
+       mulul   %d7, %d1:%d0            | %d1:%d0 = q1
+
+       movel   %a0@, %d2               | dividend[0]
+       notl    %d2
+       mulul   _recipl, %d3:%d2        | %d3:%d2 = q2
+       eorl    %d0, %d2
+       andl    %d2, %d4                | lsb correction
+       eorl    %d0, %d2                | restore %d2
+
+       addl    %d0, %d2
+       addxl   %d1, %d3
+       roxrl   #1, %d3
+       roxrl   #1, %d2                 | %d3:%d2 = q
+
+       addl    %d5, %d2                | + scaled q0
+       clrl    %d6
+       addxl   %d6, %d3
+       addl    %d4, %d2                | + lsb correction
+       addxl   %d6, %d3                | q
+
+       lsll    #1, %d2
+       addxl   %d3, %d3
+       addxl   %d2, %d2
+       addxl   %d3, %d3
+       addxl   %d2, %d2
+       moveq   #3, %d6
+       andl    %d6, %d2                | %d2:%d3 = q >> 30
+
+       movel   %a0@, %d0
+       notl    %d0
+       mulul   %d7, %d1:%d0
+       lsll    #1, %d0
+       addxl   %d1, %d1
+       addl    %d3, %d0
+       addxl   %d2, %d1                | q
+
+       movel   _mshift, %d2
+       jeq     4f
+       moveq   #32, %d3
+       cmpl    %d3, %d2
+       jeq     1f
+
+       moveq   #16, %d3
+       cmpl    %d3, %d2
+       jlt     2f
+
+       movew   %d1, %d0                | shift right by 16
+       swap    %d0
+       clrw    %d1
+       swap    %d1
+       subl    %d3, %d2
+       jeq     4f
+
+2:
+       subql   #1, %d2                 | adjust loop count
+3:
+       lsrl    #1, %d1
+       roxrl   #1, %d0
+       dbra    %d2, 3b
+
+4:     tstl    %d1
+       jeq     2f
+       moveq   #-1, %d0
+       jra     2f
+1:
+       movel   %d1, %d0
+2:
+       moveml  [EMAIL PROTECTED], %d2-%d7
+       rts
+EPILOG(p_quo_digit)
diff -urPX exclude pgp263is.orig/src/platform.h pgp263is/src/platform.h
- --- pgp263is.orig/src/platform.h      Sat Jan  6 21:01:34 1996
+++ pgp263is/src/platform.h     Fri Feb  2 23:36:45 1996
@@ -203,6 +203,14 @@
 #define mp_subb                P_SUBB
 #define mp_rotate_left P_ROTL
 #define unitfill0(r,ct) memset((void*)r, 0, (ct)*sizeof(unit))
+#if defined (linux) || defined (atarist)
+#define SMITH
+#define MUNIT32
+#define mp_smula       P_SMULA
+#define mp_dmul                P_DMUL
+#define mp_quo_digit   p_quo_digit
+#define mp_set_recip   p_setrecip
+#else
 #if defined(sun3) || defined(mc68020)
 # define UPTON
 # define MUNIT32
@@ -211,6 +219,7 @@
 #else
 # define SMITH
 # define MUNIT16
+#endif
 #endif
 #define PLATFORM_SPECIFIED
 #endif /* mc68000 */

-----BEGIN PGP SIGNATURE-----
Version: 2.6.3i
Charset: latin1

iQCVAwUBMSBhT6OO9lA417lBAQFqegP/d2hCyZzSs0OqRvm6v6AXGigHkKUI5cbm
66bDtulVXU8Q2wAO5EZHpgEFqqWs2huLl6EqRoFUSMNW8t1+BrGHINSVFgZVW6+M
k1r0DA7ThsZs+eRdcpJErF9+RITTlg/Iio4EcwOextm+EfJOZo8XAxz2vjPDhvJD
SIhgLIvzpmU=
=O2Ip
-----END PGP SIGNATURE-----

-- 
Andreas Schwab                                      "And now for something
[EMAIL PROTECTED]              completely different"

  • pgp Juergen Menden
    • Re: pgp Geert Uytterhoeven
    • Andreas Schwab

Reply via email to