arch/i386/kernel/semaphore.c

2001-07-05 Thread Erik Meusel

Hi,

I patched semaphore.c to compile cleanly and without any warning when
using GCC 3.0.

Regards,
Erik


/*
 * i386 semaphore implementation.
 *
 * (C) Copyright 1999 Linus Torvalds
 *
 * Portions Copyright 1999 Red Hat, Inc.
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 * rw semaphores implemented November 1999 by Benjamin LaHaise <[EMAIL PROTECTED]>
 */
#include 
#include 
#include 

/*
 * Semaphores are implemented using a two-way counter:
 * The "count" variable is decremented for each process
 * that tries to acquire the semaphore, while the "sleeping"
 * variable is a count of such acquires.
 *
 * Notably, the inline "up()" and "down()" functions can
 * efficiently test if they need to do any extra work (up
 * needs to do something only if count was negative before
 * the increment operation.
 *
 * "sleeping" and the contention routine ordering is
 * protected by the semaphore spinlock.
 *
 * Note that these functions are only called when there is
 * contention on the lock, and as such all this is the
 * "non-critical" part of the whole semaphore business. The
 * critical part is the inline stuff in 
 * where we want to avoid any extra jumps and calls.
 */

/*
 * Logic:
 *  - only on a boundary condition do we need to care. When we go
 *from a negative count to a non-negative, we wake people up.
 *  - when we go from a non-negative count to a negative do we
 *(a) synchronize with the "sleeper" count and (b) make sure
 *that we're on the wakeup list before we synchronize so that
 *we cannot lose wakeup events.
 */

void __up(struct semaphore *sem)
{
	wake_up(>wait);
}

static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;

void __down(struct semaphore * sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	tsk->state = TASK_UNINTERRUPTIBLE;
	add_wait_queue_exclusive(>wait, );

	spin_lock_irq(_lock);
	sem->sleepers++;
	for (;;) {
		int sleepers = sem->sleepers;

		/*
		 * Add "everybody else" into it. They aren't
		 * playing, because we own the spinlock.
		 */
		if (!atomic_add_negative(sleepers - 1, >count)) {
			sem->sleepers = 0;
			break;
		}
		sem->sleepers = 1;	/* us - see -1 above */
		spin_unlock_irq(_lock);

		schedule();
		tsk->state = TASK_UNINTERRUPTIBLE;
		spin_lock_irq(_lock);
	}
	spin_unlock_irq(_lock);
	remove_wait_queue(>wait, );
	tsk->state = TASK_RUNNING;
	wake_up(>wait);
}

int __down_interruptible(struct semaphore * sem)
{
	int retval = 0;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	tsk->state = TASK_INTERRUPTIBLE;
	add_wait_queue_exclusive(>wait, );

	spin_lock_irq(_lock);
	sem->sleepers ++;
	for (;;) {
		int sleepers = sem->sleepers;

		/*
		 * With signals pending, this turns into
		 * the trylock failure case - we won't be
		 * sleeping, and we* can't get the lock as
		 * it has contention. Just correct the count
		 * and exit.
		 */
		if (signal_pending(current)) {
			retval = -EINTR;
			sem->sleepers = 0;
			atomic_add(sleepers, >count);
			break;
		}

		/*
		 * Add "everybody else" into it. They aren't
		 * playing, because we own the spinlock. The
		 * "-1" is because we're still hoping to get
		 * the lock.
		 */
		if (!atomic_add_negative(sleepers - 1, >count)) {
			sem->sleepers = 0;
			break;
		}
		sem->sleepers = 1;	/* us - see -1 above */
		spin_unlock_irq(_lock);

		schedule();
		tsk->state = TASK_INTERRUPTIBLE;
		spin_lock_irq(_lock);
	}
	spin_unlock_irq(_lock);
	tsk->state = TASK_RUNNING;
	remove_wait_queue(>wait, );
	wake_up(>wait);
	return retval;
}

/*
 * Trylock failed - make sure we correct for
 * having decremented the count.
 *
 * We could have done the trylock with a
 * single "cmpxchg" without failure cases,
 * but then it wouldn't work on a 386.
 */
int __down_trylock(struct semaphore * sem)
{
	int sleepers;
	unsigned long flags;

	spin_lock_irqsave(_lock, flags);
	sleepers = sem->sleepers + 1;
	sem->sleepers = 0;

	/*
	 * Add "everybody else" and us into it. They aren't
	 * playing, because we own the spinlock.
	 */
	if (!atomic_add_negative(sleepers, >count))
		wake_up(>wait);

	spin_unlock_irqrestore(_lock, flags);
	return 1;
}


/*
 * The semaphore operations have a special calling sequence that
 * allow us to do a simpler in-line version of them. These routines
 * need to convert that sequence back into the C sequence when
 * there is contention on the semaphore.
 *
 * %ecx contains the semaphore pointer on entry. Save the C-clobbered
 * registers (%eax, %edx and %ecx) except %eax when used as a return
 * value..
 */
asm(
".text\n"
".align 4\n"
".globl __down_failed\n"
"__down_failed:\n\t"
	

arch/i386/kernel/semaphore.c

2001-07-05 Thread Erik Meusel

Hi,

I patched semaphore.c to compile cleanly and without any warning when
using GCC 3.0.

Regards,
Erik


/*
 * i386 semaphore implementation.
 *
 * (C) Copyright 1999 Linus Torvalds
 *
 * Portions Copyright 1999 Red Hat, Inc.
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 * rw semaphores implemented November 1999 by Benjamin LaHaise [EMAIL PROTECTED]
 */
#include linux/config.h
#include linux/sched.h
#include asm/semaphore.h

/*
 * Semaphores are implemented using a two-way counter:
 * The count variable is decremented for each process
 * that tries to acquire the semaphore, while the sleeping
 * variable is a count of such acquires.
 *
 * Notably, the inline up() and down() functions can
 * efficiently test if they need to do any extra work (up
 * needs to do something only if count was negative before
 * the increment operation.
 *
 * sleeping and the contention routine ordering is
 * protected by the semaphore spinlock.
 *
 * Note that these functions are only called when there is
 * contention on the lock, and as such all this is the
 * non-critical part of the whole semaphore business. The
 * critical part is the inline stuff in asm/semaphore.h
 * where we want to avoid any extra jumps and calls.
 */

/*
 * Logic:
 *  - only on a boundary condition do we need to care. When we go
 *from a negative count to a non-negative, we wake people up.
 *  - when we go from a non-negative count to a negative do we
 *(a) synchronize with the sleeper count and (b) make sure
 *that we're on the wakeup list before we synchronize so that
 *we cannot lose wakeup events.
 */

void __up(struct semaphore *sem)
{
	wake_up(sem-wait);
}

static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;

void __down(struct semaphore * sem)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	tsk-state = TASK_UNINTERRUPTIBLE;
	add_wait_queue_exclusive(sem-wait, wait);

	spin_lock_irq(semaphore_lock);
	sem-sleepers++;
	for (;;) {
		int sleepers = sem-sleepers;

		/*
		 * Add everybody else into it. They aren't
		 * playing, because we own the spinlock.
		 */
		if (!atomic_add_negative(sleepers - 1, sem-count)) {
			sem-sleepers = 0;
			break;
		}
		sem-sleepers = 1;	/* us - see -1 above */
		spin_unlock_irq(semaphore_lock);

		schedule();
		tsk-state = TASK_UNINTERRUPTIBLE;
		spin_lock_irq(semaphore_lock);
	}
	spin_unlock_irq(semaphore_lock);
	remove_wait_queue(sem-wait, wait);
	tsk-state = TASK_RUNNING;
	wake_up(sem-wait);
}

int __down_interruptible(struct semaphore * sem)
{
	int retval = 0;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	tsk-state = TASK_INTERRUPTIBLE;
	add_wait_queue_exclusive(sem-wait, wait);

	spin_lock_irq(semaphore_lock);
	sem-sleepers ++;
	for (;;) {
		int sleepers = sem-sleepers;

		/*
		 * With signals pending, this turns into
		 * the trylock failure case - we won't be
		 * sleeping, and we* can't get the lock as
		 * it has contention. Just correct the count
		 * and exit.
		 */
		if (signal_pending(current)) {
			retval = -EINTR;
			sem-sleepers = 0;
			atomic_add(sleepers, sem-count);
			break;
		}

		/*
		 * Add everybody else into it. They aren't
		 * playing, because we own the spinlock. The
		 * -1 is because we're still hoping to get
		 * the lock.
		 */
		if (!atomic_add_negative(sleepers - 1, sem-count)) {
			sem-sleepers = 0;
			break;
		}
		sem-sleepers = 1;	/* us - see -1 above */
		spin_unlock_irq(semaphore_lock);

		schedule();
		tsk-state = TASK_INTERRUPTIBLE;
		spin_lock_irq(semaphore_lock);
	}
	spin_unlock_irq(semaphore_lock);
	tsk-state = TASK_RUNNING;
	remove_wait_queue(sem-wait, wait);
	wake_up(sem-wait);
	return retval;
}

/*
 * Trylock failed - make sure we correct for
 * having decremented the count.
 *
 * We could have done the trylock with a
 * single cmpxchg without failure cases,
 * but then it wouldn't work on a 386.
 */
int __down_trylock(struct semaphore * sem)
{
	int sleepers;
	unsigned long flags;

	spin_lock_irqsave(semaphore_lock, flags);
	sleepers = sem-sleepers + 1;
	sem-sleepers = 0;

	/*
	 * Add everybody else and us into it. They aren't
	 * playing, because we own the spinlock.
	 */
	if (!atomic_add_negative(sleepers, sem-count))
		wake_up(sem-wait);

	spin_unlock_irqrestore(semaphore_lock, flags);
	return 1;
}


/*
 * The semaphore operations have a special calling sequence that
 * allow us to do a simpler in-line version of them. These routines
 * need to convert that sequence back into the C sequence when
 * there is contention on the semaphore.
 *
 * %ecx contains the semaphore pointer on entry. Save the C-clobbered
 * registers 

Re: include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

Ok.

Sending dozens of patches today, I was asked if I could mail the whole
files, not only the patch and send it Cc'ed to you, Linus.

Here they are:
linux/include/asm-i386/checksum.h and
linux/include/asm-i386/floppy.h
both based on stable linux-2.4.5.

Well, have a lot of fun and thanks in advance!

mfg, Erik


#ifndef _I386_CHECKSUM_H
#define _I386_CHECKSUM_H


/*
 * computes the checksum of a memory block at buff, length len,
 * and adds in "sum" (32-bit)
 *
 * returns a 32-bit number suitable for feeding into itself
 * or csum_tcpudp_magic
 *
 * this function must be called with even lengths, except
 * for the last fragment, which may be odd
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);

/*
 * the same as csum_partial, but copies from src while it
 * checksums, and handles user-space pointer exceptions correctly, when needed.
 *
 * here even more important to align src and dst on a 32-bit (or even
 * better 64-bit) boundary
 */

asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
		   int *src_err_ptr, int *dst_err_ptr);

/*
 *	Note: when you get a NULL pointer exception here this means someone
 *	passed in an incorrect kernel address to one of these functions. 
 *	
 *	If you use these functions directly please don't forget the 
 *	verify_area().
 */
extern __inline__
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
	int len, int sum)
{
	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
}

extern __inline__
unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
		int len, int sum, int *err_ptr)
{
	return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}

/*
 * These are the old (and unsafe) way of doing checksums, a warning message will be
 * printed if they are used and an exeption occurs.
 *
 * these functions should go away after some time.
 */

#define csum_partial_copy_fromuser csum_partial_copy
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);

/*
 *	This is a version of ip_compute_csum() optimized for IP headers,
 *	which always checksum on 4 octet boundaries.
 *
 *	By Jorge Cwik <[EMAIL PROTECTED]>, adapted for linux by
 *	Arnt Gulbrandsen.
 */
static inline unsigned short ip_fast_csum(unsigned char * iph,
	  unsigned int ihl) {
	unsigned int sum;

	__asm__ __volatile__("\
	movl (%1), %0 \
	subl $4, %2 \
	jbe 2f \
	addl 4(%1), %0 \
	adcl 8(%1), %0 \
	adcl 12(%1), %0 \
1:	adcl 16(%1), %0 \
	lea 4(%1), %1 \
	decl %2 \
	jne	1b \
	adcl $0, %0 \
	movl %0, %2 \
	shrl $16, %0 \
	addw %w2, %w0 \
	adcl $0, %0 \
	notl %0 \
2: \
	"
	/* Since the input registers which are loaded with iph and ipl
	   are modified, we must also specify them as outputs, or gcc
	   will assume they contain their original values. */
	: "=r" (sum), "=r" (iph), "=r" (ihl)
	: "1" (iph), "2" (ihl));
	return(sum);
}

/*
 *	Fold a partial checksum
 */

static inline unsigned int csum_fold(unsigned int sum)
{
	__asm__("\
		addl %1, %0 \
		adcl $0x, %0 \
		"
		: "=r" (sum)
		: "r" (sum << 16), "0" (sum & 0x)
	);
	return (~sum) >> 16;
}
 
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
		   unsigned long daddr,
		   unsigned short len,
		   unsigned short proto,
		   unsigned int sum) 
{
__asm__("\
	addl %1, %0 \
	adcl %2, %0 \
	adcl %3, %0 \
	adcl $0, %0 \
	"
	: "=r" (sum)
	: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
return sum;
}

/*
 * computes the checksum of the TCP/UDP pseudo-header
 * returns a 16-bit checksum, already complemented
 */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
		   unsigned long daddr,
		   unsigned short len,
		   unsigned short proto,
		   unsigned int sum) 
{
	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}

/*
 * this routine is used for miscellaneous IP-like checksums, mainly
 * in icmp.c
 */

static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
return csum_fold (csum_partial(buff, len, 0));
}

#define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
		 struct in6_addr *daddr,
		 __u32 len,
		 unsigned short proto,
		 unsigned int sum) 
{
	__asm__("\
		addl 0(%1), %0 \
		adcl 4(%1), %0 \
		adcl 8(%1), %0 \
		adcl 12(%1), %0 \
		adcl 0(%2), %0 \
		adcl 4(%2), %0 \
		adcl 8(%2), %0 \
		adcl 12(%2), %0 \
		adcl %3, %0 \
		adcl %4, %0 \
		adcl $0, %0 \
		"
		: "=" (sum)
		: "r" (saddr), "r" (daddr), 
		  "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));


Re: include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

On Tue, 3 Jul 2001, J . A . Magallon wrote:

> make a couple symlinks and you will not have to touch kernel makefiles:
> ln -s /usr/local/include/ncurses /usr/include
That's the thing I wanted to work around, but ok. It was just a
suggestion.

mfg, Erik

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/



include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

Hi,

compiling the new 2.4.5 kernel with GCC 3.0 came with several errors and
warnings. One of the most ugly warnings was:

include/asm/checksum.h: warning: multi-line string literals are deprecated

The diff to version 2.4.5 of it is attached.

Regards,
Erik Meusel

P.S.: would it be possible to patch the menuconfig in that way, that it
does look in the whole include-path for the  and relating
files? they aren't in /usr/include/ in my system and I'm tired of patching
linux/scripts/lxdialog/Makefile all the time. :)


--- include/asm-i386/checksum.h Tue Feb  1 08:41:14 2000
+++ /scratch/backup/src/linux/include/asm/checksum.hTue Jul  3 08:35:27 2001
@@ -72,18 +72,18 @@
-   __asm__ __volatile__("
-   movl (%1), %0
-   subl $4, %2
-   jbe 2f
-   addl 4(%1), %0
-   adcl 8(%1), %0
-   adcl 12(%1), %0
-1: adcl 16(%1), %0
-   lea 4(%1), %1
-   decl %2
-   jne 1b
-   adcl $0, %0
-   movl %0, %2
-   shrl $16, %0
-   addw %w2, %w0
-   adcl $0, %0
-   notl %0
-2:
+   __asm__ __volatile__("\
+   movl (%1), %0 \
+   subl $4, %2 \
+   jbe 2f \
+   addl 4(%1), %0 \
+   adcl 8(%1), %0 \
+   adcl 12(%1), %0 \
+1: adcl 16(%1), %0 \
+   lea 4(%1), %1 \
+   decl %2 \
+   jne 1b \
+   adcl $0, %0 \
+   movl %0, %2 \
+   shrl $16, %0 \
+   addw %w2, %w0 \
+   adcl $0, %0 \
+   notl %0 \
+2: \
@@ -105,3 +105,3 @@
-   __asm__("
-   addl %1, %0
-   adcl $0x, %0
+   __asm__("\
+   addl %1, %0 \
+   adcl $0x, %0 \
@@ -121,5 +121,5 @@
-__asm__("
-   addl %1, %0
-   adcl %2, %0
-   adcl %3, %0
-   adcl $0, %0
+__asm__("\
+   addl %1, %0 \
+   adcl %2, %0 \
+   adcl %3, %0 \
+   adcl $0, %0 \
@@ -161,12 +161,12 @@
-   __asm__("
-   addl 0(%1), %0
-   adcl 4(%1), %0
-   adcl 8(%1), %0
-   adcl 12(%1), %0
-   adcl 0(%2), %0
-   adcl 4(%2), %0
-   adcl 8(%2), %0
-   adcl 12(%2), %0
-   adcl %3, %0
-   adcl %4, %0
-   adcl $0, %0
+   __asm__("\
+   addl 0(%1), %0 \
+   adcl 4(%1), %0 \
+   adcl 8(%1), %0 \
+   adcl 12(%1), %0 \
+   adcl 0(%2), %0 \
+   adcl 4(%2), %0 \
+   adcl 8(%2), %0 \
+   adcl 12(%2), %0 \
+   adcl %3, %0 \
+   adcl %4, %0 \
+   adcl $0, %0 \



include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

Hi,

compiling the new 2.4.5 kernel with GCC 3.0 came with several errors and
warnings. One of the most ugly warnings was:

include/asm/checksum.h: warning: multi-line string literals are deprecated

The diff to version 2.4.5 of it is attached.

Regards,
Erik Meusel

P.S.: would it be possible to patch the menuconfig in that way, that it
does look in the whole include-path for the ncurses.h and relating
files? they aren't in /usr/include/ in my system and I'm tired of patching
linux/scripts/lxdialog/Makefile all the time. :)


--- include/asm-i386/checksum.h Tue Feb  1 08:41:14 2000
+++ /scratch/backup/src/linux/include/asm/checksum.hTue Jul  3 08:35:27 2001
@@ -72,18 +72,18 @@
-   __asm__ __volatile__(
-   movl (%1), %0
-   subl $4, %2
-   jbe 2f
-   addl 4(%1), %0
-   adcl 8(%1), %0
-   adcl 12(%1), %0
-1: adcl 16(%1), %0
-   lea 4(%1), %1
-   decl %2
-   jne 1b
-   adcl $0, %0
-   movl %0, %2
-   shrl $16, %0
-   addw %w2, %w0
-   adcl $0, %0
-   notl %0
-2:
+   __asm__ __volatile__(\
+   movl (%1), %0 \
+   subl $4, %2 \
+   jbe 2f \
+   addl 4(%1), %0 \
+   adcl 8(%1), %0 \
+   adcl 12(%1), %0 \
+1: adcl 16(%1), %0 \
+   lea 4(%1), %1 \
+   decl %2 \
+   jne 1b \
+   adcl $0, %0 \
+   movl %0, %2 \
+   shrl $16, %0 \
+   addw %w2, %w0 \
+   adcl $0, %0 \
+   notl %0 \
+2: \
@@ -105,3 +105,3 @@
-   __asm__(
-   addl %1, %0
-   adcl $0x, %0
+   __asm__(\
+   addl %1, %0 \
+   adcl $0x, %0 \
@@ -121,5 +121,5 @@
-__asm__(
-   addl %1, %0
-   adcl %2, %0
-   adcl %3, %0
-   adcl $0, %0
+__asm__(\
+   addl %1, %0 \
+   adcl %2, %0 \
+   adcl %3, %0 \
+   adcl $0, %0 \
@@ -161,12 +161,12 @@
-   __asm__(
-   addl 0(%1), %0
-   adcl 4(%1), %0
-   adcl 8(%1), %0
-   adcl 12(%1), %0
-   adcl 0(%2), %0
-   adcl 4(%2), %0
-   adcl 8(%2), %0
-   adcl 12(%2), %0
-   adcl %3, %0
-   adcl %4, %0
-   adcl $0, %0
+   __asm__(\
+   addl 0(%1), %0 \
+   adcl 4(%1), %0 \
+   adcl 8(%1), %0 \
+   adcl 12(%1), %0 \
+   adcl 0(%2), %0 \
+   adcl 4(%2), %0 \
+   adcl 8(%2), %0 \
+   adcl 12(%2), %0 \
+   adcl %3, %0 \
+   adcl %4, %0 \
+   adcl $0, %0 \



Re: include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

On Tue, 3 Jul 2001, J . A . Magallon wrote:

 make a couple symlinks and you will not have to touch kernel makefiles:
 ln -s /usr/local/include/ncurses /usr/include
That's the thing I wanted to work around, but ok. It was just a
suggestion.

mfg, Erik

-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/



Re: include/asm-i386/checksum.h

2001-07-03 Thread Erik Meusel

Ok.

Sending dozens of patches today, I was asked if I could mail the whole
files, not only the patch and send it Cc'ed to you, Linus.

Here they are:
linux/include/asm-i386/checksum.h and
linux/include/asm-i386/floppy.h
both based on stable linux-2.4.5.

Well, have a lot of fun and thanks in advance!

mfg, Erik


#ifndef _I386_CHECKSUM_H
#define _I386_CHECKSUM_H


/*
 * computes the checksum of a memory block at buff, length len,
 * and adds in sum (32-bit)
 *
 * returns a 32-bit number suitable for feeding into itself
 * or csum_tcpudp_magic
 *
 * this function must be called with even lengths, except
 * for the last fragment, which may be odd
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);

/*
 * the same as csum_partial, but copies from src while it
 * checksums, and handles user-space pointer exceptions correctly, when needed.
 *
 * here even more important to align src and dst on a 32-bit (or even
 * better 64-bit) boundary
 */

asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
		   int *src_err_ptr, int *dst_err_ptr);

/*
 *	Note: when you get a NULL pointer exception here this means someone
 *	passed in an incorrect kernel address to one of these functions. 
 *	
 *	If you use these functions directly please don't forget the 
 *	verify_area().
 */
extern __inline__
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
	int len, int sum)
{
	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
}

extern __inline__
unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
		int len, int sum, int *err_ptr)
{
	return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}

/*
 * These are the old (and unsafe) way of doing checksums, a warning message will be
 * printed if they are used and an exeption occurs.
 *
 * these functions should go away after some time.
 */

#define csum_partial_copy_fromuser csum_partial_copy
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);

/*
 *	This is a version of ip_compute_csum() optimized for IP headers,
 *	which always checksum on 4 octet boundaries.
 *
 *	By Jorge Cwik [EMAIL PROTECTED], adapted for linux by
 *	Arnt Gulbrandsen.
 */
static inline unsigned short ip_fast_csum(unsigned char * iph,
	  unsigned int ihl) {
	unsigned int sum;

	__asm__ __volatile__(\
	movl (%1), %0 \
	subl $4, %2 \
	jbe 2f \
	addl 4(%1), %0 \
	adcl 8(%1), %0 \
	adcl 12(%1), %0 \
1:	adcl 16(%1), %0 \
	lea 4(%1), %1 \
	decl %2 \
	jne	1b \
	adcl $0, %0 \
	movl %0, %2 \
	shrl $16, %0 \
	addw %w2, %w0 \
	adcl $0, %0 \
	notl %0 \
2: \
	
	/* Since the input registers which are loaded with iph and ipl
	   are modified, we must also specify them as outputs, or gcc
	   will assume they contain their original values. */
	: =r (sum), =r (iph), =r (ihl)
	: 1 (iph), 2 (ihl));
	return(sum);
}

/*
 *	Fold a partial checksum
 */

static inline unsigned int csum_fold(unsigned int sum)
{
	__asm__(\
		addl %1, %0 \
		adcl $0x, %0 \
		
		: =r (sum)
		: r (sum  16), 0 (sum  0x)
	);
	return (~sum)  16;
}
 
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
		   unsigned long daddr,
		   unsigned short len,
		   unsigned short proto,
		   unsigned int sum) 
{
__asm__(\
	addl %1, %0 \
	adcl %2, %0 \
	adcl %3, %0 \
	adcl $0, %0 \
	
	: =r (sum)
	: g (daddr), g(saddr), g((ntohs(len)16)+proto*256), 0(sum));
return sum;
}

/*
 * computes the checksum of the TCP/UDP pseudo-header
 * returns a 16-bit checksum, already complemented
 */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
		   unsigned long daddr,
		   unsigned short len,
		   unsigned short proto,
		   unsigned int sum) 
{
	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}

/*
 * this routine is used for miscellaneous IP-like checksums, mainly
 * in icmp.c
 */

static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
return csum_fold (csum_partial(buff, len, 0));
}

#define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
		 struct in6_addr *daddr,
		 __u32 len,
		 unsigned short proto,
		 unsigned int sum) 
{
	__asm__(\
		addl 0(%1), %0 \
		adcl 4(%1), %0 \
		adcl 8(%1), %0 \
		adcl 12(%1), %0 \
		adcl 0(%2), %0 \
		adcl 4(%2), %0 \
		adcl 8(%2), %0 \
		adcl 12(%2), %0 \
		adcl %3, %0 \
		adcl %4, %0 \
		adcl $0, %0 \
		
		: =r (sum)
		: r (saddr), r (daddr), 
		  r(htonl(len)), r(htonl(proto)), 0(sum));

	return csum_fold(sum);
}

/* 
 *	Copy and checksum