diff -c e1000.h linux-2.6.24.7/drivers/net/e1000-7.3.20-fixed-piers/e1000.h
*** e1000.h	2008-07-22 14:08:25.000000000 +0100
--- linux-2.6.24.7/drivers/net/e1000-7.3.20-fixed-piers/e1000.h	2008-10-09 16:54:19.000000000 +0100
***************
*** 321,326 ****
--- 321,327 ----
  				int cleaned_count);
  	struct e1000_rx_ring *rx_ring;      /* One per active queue */
  #ifdef CONFIG_E1000_NAPI
+         struct napi_struct napi;
  	struct net_device *polling_netdev;  /* One per active queue */
  #endif
  	int num_tx_queues;
diff -c e1000_main.c linux-2.6.24.7/drivers/net/e1000-7.3.20-fixed-piers/e1000_main.c
*** e1000_main.c	2008-07-22 14:08:25.000000000 +0100
--- linux-2.6.24.7/drivers/net/e1000-7.3.20-fixed-piers/e1000_main.c	2008-11-07 10:01:27.000000000 +0000
***************
*** 182,188 ****
  static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                      struct e1000_tx_ring *tx_ring);
  #ifdef CONFIG_E1000_NAPI
! static int e1000_clean(struct net_device *poll_dev, int *budget);
  static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
                                      struct e1000_rx_ring *rx_ring,
                                      int *work_done, int work_to_do);
--- 182,189 ----
  static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                      struct e1000_tx_ring *tx_ring);
  #ifdef CONFIG_E1000_NAPI
! //static int e1000_clean(struct net_device *poll_dev, int *budget);
! static int e1000_clean(struct napi_struct *napi, int budget);
  static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
                                      struct e1000_rx_ring *rx_ring,
                                      int *work_done, int work_to_do);
***************
*** 213,219 ****
  static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  static void e1000_tx_timeout(struct net_device *dev);
! static void e1000_reset_task(struct net_device *dev);
  static void e1000_smartspeed(struct e1000_adapter *adapter);
  static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                         struct sk_buff *skb);
--- 214,221 ----
  static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  static void e1000_tx_timeout(struct net_device *dev);
! //static void e1000_reset_task(struct net_device *dev);
! static void e1000_reset_task(struct work_struct *work);
  static void e1000_smartspeed(struct e1000_adapter *adapter);
  static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                         struct sk_buff *skb);
***************
*** 447,453 ****
  	uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  	uint16_t old_vid = adapter->mng_vlan_id;
  	if (adapter->vlgrp) {
! 		if (!adapter->vlgrp->vlan_devices[vid]) {
  			if (adapter->hw.mng_cookie.status &
  				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  				e1000_vlan_rx_add_vid(netdev, vid);
--- 449,456 ----
  	uint16_t vid = adapter->hw.mng_cookie.vlan_id;
  	uint16_t old_vid = adapter->mng_vlan_id;
  	if (adapter->vlgrp) {
! /*		if (!adapter->vlgrp->vlan_devices[vid]) {*/
! if (!vlan_group_get_device(adapter->vlgrp, vid)) {
  			if (adapter->hw.mng_cookie.status &
  				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
  				e1000_vlan_rx_add_vid(netdev, vid);
***************
*** 457,463 ****
  
  			if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  					(vid != old_vid) &&
! 					!adapter->vlgrp->vlan_devices[old_vid])
  				e1000_vlan_rx_kill_vid(netdev, old_vid);
  		} else
  			adapter->mng_vlan_id = vid;
--- 460,467 ----
  
  			if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
  					(vid != old_vid) &&
! 					!vlan_group_get_device(adapter->vlgrp, old_vid))
! /*					!adapter->vlgrp->vlan_devices[old_vid])*/
  				e1000_vlan_rx_kill_vid(netdev, old_vid);
  		} else
  			adapter->mng_vlan_id = vid;
***************
*** 626,632 ****
  	adapter->tx_queue_len = netdev->tx_queue_len;
  
  #ifdef CONFIG_E1000_NAPI
! 	netif_poll_enable(netdev);
  #endif
  	e1000_irq_enable(adapter);
  
--- 630,638 ----
  	adapter->tx_queue_len = netdev->tx_queue_len;
  
  #ifdef CONFIG_E1000_NAPI
! 	//	netif_poll_enable(netdev);
!         napi_disable(&adapter->napi);
!         atomic_set(&adapter->irq_sem, 0);
  #endif
  	e1000_irq_enable(adapter);
  
***************
*** 723,729 ****
  	del_timer_sync(&adapter->phy_info_timer);
  
  #ifdef CONFIG_E1000_NAPI
! 	netif_poll_disable(netdev);
  #endif
  	netdev->tx_queue_len = adapter->tx_queue_len;
  	adapter->link_speed = 0;
--- 729,737 ----
  	del_timer_sync(&adapter->phy_info_timer);
  
  #ifdef CONFIG_E1000_NAPI
! 	//	netif_poll_disable(netdev);
!         napi_disable(&adapter->napi);
!         atomic_set(&adapter->irq_sem, 0);
  #endif
  	netdev->tx_queue_len = adapter->tx_queue_len;
  	adapter->link_speed = 0;
***************
*** 984,990 ****
  	if (!netdev)
  		goto err_alloc_etherdev;
  
! 	SET_MODULE_OWNER(netdev);
  	SET_NETDEV_DEV(netdev, &pdev->dev);
  
  	pci_set_drvdata(pdev, netdev);
--- 992,998 ----
  	if (!netdev)
  		goto err_alloc_etherdev;
  
! 	//SET_MODULE_OWNER(netdev);
  	SET_NETDEV_DEV(netdev, &pdev->dev);
  
  	pci_set_drvdata(pdev, netdev);
***************
*** 1026,1033 ****
  	netdev->watchdog_timeo = 5 * HZ;
  #endif
  #ifdef CONFIG_E1000_NAPI
! 	netdev->poll = &e1000_clean;
! 	netdev->weight = 64;
  #endif
  #ifdef NETIF_F_HW_VLAN_TX
  	netdev->vlan_rx_register = e1000_vlan_rx_register;
--- 1034,1042 ----
  	netdev->watchdog_timeo = 5 * HZ;
  #endif
  #ifdef CONFIG_E1000_NAPI
! 	//	netdev->poll = &e1000_clean;
! 	//	netdev->weight = 64;
! 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
  #endif
  #ifdef NETIF_F_HW_VLAN_TX
  	netdev->vlan_rx_register = e1000_vlan_rx_register;
***************
*** 1169,1177 ****
  	adapter->phy_info_timer.function = &e1000_update_phy_info;
  	adapter->phy_info_timer.data = (unsigned long) adapter;
  
! 	INIT_WORK(&adapter->reset_task,
! 		(void (*)(void *))e1000_reset_task, netdev);
! 
  	e1000_check_options(adapter);
  
  	/* Initial Wake on LAN setting
--- 1178,1187 ----
  	adapter->phy_info_timer.function = &e1000_update_phy_info;
  	adapter->phy_info_timer.data = (unsigned long) adapter;
  
! 	//INIT_WORK(&adapter->reset_task,
! 	//	  (void (*)(void *))e1000_reset_task);
! 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
!        
  	e1000_check_options(adapter);
  
  	/* Initial Wake on LAN setting
***************
*** 1450,1457 ****
  #ifdef CONFIG_E1000_NAPI
  	for (i = 0; i < adapter->num_rx_queues; i++) {
  		adapter->polling_netdev[i].priv = adapter;
! 		adapter->polling_netdev[i].poll = &e1000_clean;
! 		adapter->polling_netdev[i].weight = 64;
  		dev_hold(&adapter->polling_netdev[i]);
  		set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  	}
--- 1460,1467 ----
  #ifdef CONFIG_E1000_NAPI
  	for (i = 0; i < adapter->num_rx_queues; i++) {
  		adapter->polling_netdev[i].priv = adapter;
! 		//adapter->polling_netdev[i].poll = &e1000_clean;
! 		//adapter->polling_netdev[i].weight = 64;
  		dev_hold(&adapter->polling_netdev[i]);
  		set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
  	}
***************
*** 1606,1612 ****
  	if ((adapter->hw.mng_cookie.status &
  			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  	     !(adapter->vlgrp &&
! 			  adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
  		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  	}
  #endif
--- 1616,1623 ----
  	if ((adapter->hw.mng_cookie.status &
  			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
  	     !(adapter->vlgrp &&
! 		vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
! /*			  adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {*/
  		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
  	}
  #endif
***************
*** 3043,3078 ****
  				return err;
  		}
  
! 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  		mss = skb_shinfo(skb)->gso_size;
  		if (skb->protocol == htons(ETH_P_IP)) {
! 			skb->nh.iph->tot_len = 0;
! 			skb->nh.iph->check = 0;
! 			skb->h.th->check =
! 				~csum_tcpudp_magic(skb->nh.iph->saddr,
! 						   skb->nh.iph->daddr,
! 						   0,
! 						   IPPROTO_TCP,
! 						   0);
  			cmd_length = E1000_TXD_CMD_IP;
! 			ipcse = skb->h.raw - skb->data - 1;
  #ifdef NETIF_F_TSO6
  		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
! 			skb->nh.ipv6h->payload_len = 0;
! 			skb->h.th->check =
! 				~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
! 						 &skb->nh.ipv6h->daddr,
! 						 0,
! 						 IPPROTO_TCP,
! 						 0);
! 			ipcse = 0;
  #endif
  		}
! 		ipcss = skb->nh.raw - skb->data;
! 		ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
! 		tucss = skb->h.raw - skb->data;
! 		tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
! 		tucse = 0;
  
  		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
--- 3054,3086 ----
  				return err;
  		}
  
! 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  		mss = skb_shinfo(skb)->gso_size;
  		if (skb->protocol == htons(ETH_P_IP)) {
! 			struct iphdr *iph = ip_hdr(skb);
!                         iph->tot_len = 0;
!                         iph->check = 0;
!                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
!                                                                  iph->daddr, 0,
!                                                                  IPPROTO_TCP,
!                                                                  0);
  			cmd_length = E1000_TXD_CMD_IP;
!  			ipcse = skb_transport_offset(skb) - 1;
  #ifdef NETIF_F_TSO6
  		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
! 			ipv6_hdr(skb)->payload_len = 0;
!                         tcp_hdr(skb)->check =
!                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
!                                                  &ipv6_hdr(skb)->daddr,
!                                                  0, IPPROTO_TCP, 0);
!                         ipcse = 0;
  #endif
  		}
!                 ipcss = skb_network_offset(skb);
!                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
!                 tucss = skb_transport_offset(skb);
!                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
!                 tucse = 0;
  
  		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
  			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
***************
*** 3114,3120 ****
  	uint8_t css;
  
  	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
! 		css = skb->h.raw - skb->data;
  
  		i = tx_ring->next_to_use;
  		buffer_info = &tx_ring->buffer_info[i];
--- 3122,3128 ----
  	uint8_t css;
  
  	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
! 		css = skb_transport_offset(skb);
  
  		i = tx_ring->next_to_use;
  		buffer_info = &tx_ring->buffer_info[i];
***************
*** 3122,3128 ****
  
  		context_desc->lower_setup.ip_config = 0;
  		context_desc->upper_setup.tcp_fields.tucss = css;
! 		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
  		context_desc->upper_setup.tcp_fields.tucse = 0;
  		context_desc->tcp_seg_setup.data = 0;
  		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
--- 3130,3136 ----
  
  		context_desc->lower_setup.ip_config = 0;
  		context_desc->upper_setup.tcp_fields.tucss = css;
! 		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
  		context_desc->upper_setup.tcp_fields.tucse = 0;
  		context_desc->tcp_seg_setup.data = 0;
  		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
***************
*** 3469,3475 ****
  		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
  		* points to just header, pull a few bytes of payload from
  		* frags into skb->data */
! 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
  		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
  			switch (adapter->hw.mac_type) {
  				unsigned int pull_size;
--- 3477,3483 ----
  		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
  		* points to just header, pull a few bytes of payload from
  		* frags into skb->data */
! 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
  			switch (adapter->hw.mac_type) {
  				unsigned int pull_size;
***************
*** 3626,3636 ****
  }
  
  static void
! e1000_reset_task(struct net_device *netdev)
  {
! 	struct e1000_adapter *adapter = netdev_priv(netdev);
! 
! 	e1000_reinit_locked(adapter);
  }
  
  /**
--- 3634,3646 ----
  }
  
  static void
! e1000_reset_task(struct work_struct *work)
! //e1000_reset_task(struct net_device *netdev)
  {
!   //struct e1000_adapter *adapter = netdev_priv(netdev);
!   struct e1000_adapter *adapter =
!     container_of(work, struct e1000_adapter, reset_task);
!   e1000_reinit_locked(adapter);
  }
  
  /**
***************
*** 4055,4066 ****
  		E1000_WRITE_REG(hw, IMC, ~0);
  		E1000_WRITE_FLUSH(hw);
  	}
! 	if (likely(netif_rx_schedule_prep(netdev))) {
  		adapter->total_tx_bytes = 0;
  		adapter->total_tx_packets = 0;
  		adapter->total_rx_bytes = 0;
  		adapter->total_rx_packets = 0;
! 		__netif_rx_schedule(netdev);
  	} else
  		/* this really should not happen! if it does it is basically a
  		 * bug, but not a hard error, so enable ints and continue */
--- 4065,4077 ----
  		E1000_WRITE_REG(hw, IMC, ~0);
  		E1000_WRITE_FLUSH(hw);
  	}
! 	//if (likely(netif_rx_schedule_prep(netdev))) {
! 	if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
  		adapter->total_tx_bytes = 0;
  		adapter->total_tx_packets = 0;
  		adapter->total_rx_bytes = 0;
  		adapter->total_rx_packets = 0;
! 		__netif_rx_schedule(netdev, &adapter->napi);
  	} else
  		/* this really should not happen! if it does it is basically a
  		 * bug, but not a hard error, so enable ints and continue */
***************
*** 4130,4176 ****
   **/
  
  static int
! e1000_clean(struct net_device *poll_dev, int *budget)
  {
! 	struct e1000_adapter *adapter;
! 	int work_to_do = min(*budget, poll_dev->quota);
! 	int tx_cleaned = 0, work_done = 0;
  
! 	/* Must NOT use netdev_priv macro here. */
! 	adapter = poll_dev->priv;
  
! 	/* Keep link state information with original netdev */
! 	if (!netif_carrier_ok(poll_dev))
! 		goto quit_polling;
! 
! 	/* e1000_clean is called per-cpu.  This lock protects
! 	 * tx_ring[0] from being cleaned by multiple cpus
! 	 * simultaneously.  A failure obtaining the lock means
! 	 * tx_ring[0] is currently being cleaned anyway. */
! 	if (spin_trylock(&adapter->tx_queue_lock)) {
! 		tx_cleaned = e1000_clean_tx_irq(adapter,
! 		                                &adapter->tx_ring[0]);
! 		spin_unlock(&adapter->tx_queue_lock);
! 	}
! 
! 	adapter->clean_rx(adapter, &adapter->rx_ring[0],
! 	                  &work_done, work_to_do);
! 
! 	*budget -= work_done;
! 	poll_dev->quota -= work_done;
! 
! 	/* If no Tx and not enough Rx work done, exit the polling mode */
! 	if ((tx_cleaned && (work_done < work_to_do)) ||
! 	   !netif_running(poll_dev)) {
! quit_polling:
! 		if (likely(adapter->itr_setting & 3))
! 			e1000_set_itr(adapter);
! 		netif_rx_complete(poll_dev);
! 		e1000_irq_enable(adapter);
! 		return 0;
! 	}
  
- 	return 1;
  }
  
  #endif
--- 4141,4182 ----
   **/
  
  static int
! //e1000_clean(struct net_device *poll_dev, int *budget)
! e1000_clean(struct napi_struct *napi, int budget)
  {
!   struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
!   struct net_device *poll_dev = adapter->netdev;
!   int tx_cleaned = 0, work_done = 0;
! 
!   /* Must NOT use netdev_priv macro here. */
!   adapter = poll_dev->priv;
! 
!   /* e1000_clean is called per-cpu.  This lock protects
!    * tx_ring[0] from being cleaned by multiple cpus
!    * simultaneously.  A failure obtaining the lock means
!    * tx_ring[0] is currently being cleaned anyway. */
!   if (spin_trylock(&adapter->tx_queue_lock)) {
!     tx_cleaned = e1000_clean_tx_irq(adapter,
! 				    &adapter->tx_ring[0]);
!     spin_unlock(&adapter->tx_queue_lock);
!   }
  
!   adapter->clean_rx(adapter, &adapter->rx_ring[0],
! 		    &work_done, budget);
  
!   if (tx_cleaned)
!     work_done = budget;
! 
!   /* If budget not fully consumed, exit the polling mode */
!   if (work_done < budget) {
!     if (likely(adapter->itr_setting & 3))
!       e1000_set_itr(adapter);
!     netif_rx_complete(poll_dev, napi);
!     e1000_irq_enable(adapter);
!   }
! 
!   return work_done;
  
  }
  
  #endif
***************
*** 5176,5185 ****
  	uint32_t vfta, index;
  
  	e1000_irq_disable(adapter);
! 
! 	if (adapter->vlgrp)
! 		adapter->vlgrp->vlan_devices[vid] = NULL;
! 
  	e1000_irq_enable(adapter);
  
  	if ((adapter->hw.mng_cookie.status &
--- 5182,5188 ----
  	uint32_t vfta, index;
  
  	e1000_irq_disable(adapter);
! 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
  	e1000_irq_enable(adapter);
  
  	if ((adapter->hw.mng_cookie.status &
***************
*** 5205,5211 ****
  	if (adapter->vlgrp) {
  		uint16_t vid;
  		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
! 			if (!adapter->vlgrp->vlan_devices[vid])
  				continue;
  			e1000_vlan_rx_add_vid(adapter->netdev, vid);
  		}
--- 5208,5214 ----
  	if (adapter->vlgrp) {
  		uint16_t vid;
  		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
! 		 if (!vlan_group_get_device(adapter->vlgrp, vid))	
  				continue;
  			e1000_vlan_rx_add_vid(adapter->netdev, vid);
  		}
