From: David Woodhouse <david.woodho...@intel.com>

This is a minor optimisation, but as a side-effect it means we can know
precisely which descriptors were already in the ring when we last
prodded it to run.

This will give us a better chance to catch the case where we get a
TxEmpty interrupt when it hasn't actually finished the descriptors we
*know* it should have seen, before it ends up being a full-blown TX
timeout and reset.

Since QEMU's emulation doesn't give TxEmpty interrupts, *always* bash on
TxPoll until we see the first TxEmpty interrupt and cp->txempty_seen
gets set.

Signed-off-by: David Woodhouse <david.woodho...@intel.com>
---
I'm actually having second thoughts about this one since realising that
QEMU doesn't implement it correctly. The workaround isn't *that* horrid
but it's not clear it's enough of a performance win — or whether it's
entirely necessary for catching my TX stall.

 drivers/net/ethernet/realtek/8139cp.c | 37 +++++++++++++++++++++++++++++------
 1 file changed, 31 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/realtek/8139cp.c 
b/drivers/net/ethernet/realtek/8139cp.c
index 6feff9f..67a4fcf 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -340,6 +340,9 @@ struct cp_private {
 
        unsigned                tx_head         ____cacheline_aligned;
        unsigned                tx_tail;
+       unsigned                tx_running;
+       unsigned                txempty_seen;
+
        struct cp_desc          *tx_ring;
        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
        u32                     tx_opts[CP_TX_RING_SIZE];
@@ -611,6 +614,22 @@ static irqreturn_t cp_interrupt (int irq, void 
*dev_instance)
        if (status & (TxOK | TxErr | TxEmpty | SWInt))
                handled |= cp_tx(cp);
 
+       if ((status & TxEmpty) && cp->tx_running) {
+               handled = 1;
+               /* Qemu's emulation doesn't give TxEmpty interrupts */
+               cp->txempty_seen = 1;
+               if (cp->tx_head == cp->tx_tail) {
+                       /* Out of descriptors and we have nothing more for it.
+                          Let it stop. */
+                       cp->tx_running = 0;
+               } else {
+                       /* The hardware raced with us adding a new descriptor,
+                          and we didn't get the TxEmpty IRQ in time so we
+                          didn't prod it. Prod it now to restart. */
+                       cpw8(TxPoll, NormalTxPoll);
+               }
+       }
+
        if (status & LinkChg) {
                handled = 1;
                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
@@ -796,8 +815,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 
                cp->tx_skb[entry] = skb;
                cp->tx_opts[entry] = flags;
-               netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen 
%d\n",
-                         entry, skb->len);
+               netif_dbg(cp, tx_queued, cp->dev,
+                         "tx queued, slot %d, skblen %d r %d\n",
+                         entry, skb->len, cp->tx_running);
        } else {
                struct cp_desc *txd;
                u32 first_len, first_eor, ctrl;
@@ -886,8 +906,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
                wmb();
 
                cp->tx_opts[first_entry] = ctrl;
-               netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, 
skblen %d\n",
-                         first_entry, entry, skb->len);
+               netif_dbg(cp, tx_queued, cp->dev,
+                         "tx queued, slots %d-%d, skblen %d r %d\n",
+                         first_entry, entry, skb->len, cp->tx_running);
        }
        cp->tx_head = NEXT_TX(entry);
 
@@ -895,11 +916,13 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
                netif_stop_queue(dev);
 
+       if (!cp->tx_running || !cp->txempty_seen) {
+               cpw8(TxPoll, NormalTxPoll);
+               cp->tx_running = 1;
+       }
 out_unlock:
        spin_unlock_irqrestore(&cp->lock, intr_flags);
 
-       cpw8(TxPoll, NormalTxPoll);
-
        return NETDEV_TX_OK;
 out_dma_error:
        dev_kfree_skb_any(skb);
@@ -989,6 +1012,7 @@ static void cp_stop_hw (struct cp_private *cp)
 
        cp->rx_tail = 0;
        cp->tx_head = cp->tx_tail = 0;
+       cp->tx_running = 0;
 
        netdev_reset_queue(cp->dev);
 }
@@ -1041,6 +1065,7 @@ static inline void cp_start_hw (struct cp_private *cp)
         * This variant appears to work fine.
         */
        cpw8(Cmd, RxOn | TxOn);
+       cp->tx_running = 0;
 
        netdev_reset_queue(cp->dev);
 }
-- 
2.4.3

-- 
David Woodhouse                            Open Source Technology Centre
david.woodho...@intel.com                              Intel Corporation

Attachment: smime.p7s
Description: S/MIME cryptographic signature

Reply via email to