On Thu, Oct 09, 2025 at 02:43:03PM -0400, Nick Holland wrote:
> this time, without the ACPI dumps, to reduce size of the
> message.
> 
> Nick.
> 
> -------- Forwarded Message --------
> Subject: ixl issue on current snapshots
> Date: Thu, 9 Oct 2025 14:18:00 -0400
> From: Nick Holland <[email protected]>
> To: [email protected] <[email protected]>
> 
> 
> > Synopsis:   ixl/carp stops working a few minutes after boot on very recent 
> > snapshots
> > Category:   amd64
> > Environment:
>       System      : OpenBSD 7.8
>       Details     : OpenBSD 7.8-beta (GENERIC.MP) #35: Thu Sep 18 16:01:31 
> MDT 2025
>                        
> [email protected]:/usr/src/sys/arch/amd64/compile/GENERIC.MP
> 
>       Architecture: OpenBSD.amd64
>       Machine     : amd64
> > Description:
>       After upgrading to the Oct 4 snapshot, the ixl(4) port that is part
>       of a carp(4) failover system stops working.  A second ixl port that is 
> not
>       part of a carp group (but also gets little traffic) seems to keep 
> working.
>       The Sep 18 snapshot works properly, the next snapshot, Sep 28, shows the
>       problem.
> 
>       Sep 18: good
>       Sep 28: bad
>       Oct  4: bad
>       Oct  8: bad
> 
>       System will come up well enough to start serving, but will soon (within
>       a couple minutes) stop responding to new traffic.  Old connections
>       sometimes stay active for some time, but new connections cannot be
>       made.  At the moment, I can ping my personal mail server from this 
> machine
>       (they aren't even in the same country), but a machine on the same subnet
>       does not respond to pings.  CARP fails over properly to the machine 
> running
>       an older snapshot.
 
Please share the kstat ixlX::: output.

Looking at your timeline please try to revert the last diff to if_ixl.c
from Sep 17.

Diff below is what got committed (use patch -R).
-- 
:wq Claudio

commit 3740669c66c2a45c58f3253e921ac46892cda543
Author: jan <[email protected]>
Date:   Wed Sep 17 12:54

    ixl/ice(4): use 128 segments for DMA maps of TSO packets
    
    This avoids unnecessary m_defrag() calls and gain some performance.
    
    with tweaks for kettenis and bluhm
    
    ok kettenis, bluhm

diff --git sys/dev/pci/if_ixl.c sys/dev/pci/if_ixl.c
index 6d32e19549d..c61c2a51a7b 100644
--- sys/dev/pci/if_ixl.c
+++ sys/dev/pci/if_ixl.c
@@ -900,6 +900,7 @@ struct ixl_rx_wb_desc_32 {
 } __packed __aligned(16);
 
 #define IXL_TX_PKT_DESCS               8
+#define IXL_TX_TSO_PKT_DESCS           128
 #define IXL_TX_QUEUE_ALIGN             128
 #define IXL_RX_QUEUE_ALIGN             128
 
@@ -1142,6 +1143,7 @@ struct ixl_chip;
 struct ixl_tx_map {
        struct mbuf             *txm_m;
        bus_dmamap_t             txm_map;
+       bus_dmamap_t             txm_map_tso;
        unsigned int             txm_eop;
 };
 
@@ -2584,6 +2586,12 @@ ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
                    &txm->txm_map) != 0)
                        goto uncreate;
 
+               if (bus_dmamap_create(sc->sc_dmat,
+                   MAXMCLBYTES, IXL_TX_TSO_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
+                   BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
+                   &txm->txm_map_tso) != 0)
+                       goto uncreate;
+
                txm->txm_eop = -1;
                txm->txm_m = NULL;
        }
@@ -2600,10 +2608,10 @@ uncreate:
        for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
                txm = &maps[i];
 
-               if (txm->txm_map == NULL)
-                       continue;
-
-               bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
+               if (txm->txm_map != NULL)
+                       bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
+               if (txm->txm_map_tso != NULL)
+                       bus_dmamap_destroy(sc->sc_dmat, txm->txm_map_tso);
        }
 
        ixl_dmamem_free(sc, &txr->txr_mem);
@@ -2680,7 +2688,10 @@ ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring 
*txr)
                if (txm->txm_m == NULL)
                        continue;
 
-               map = txm->txm_map;
+               if (ISSET(txm->txm_m->m_pkthdr.csum_flags, M_TCP_TSO))
+                       map = txm->txm_map_tso;
+               else
+                       map = txm->txm_map;
                bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
                    BUS_DMASYNC_POSTWRITE);
                bus_dmamap_unload(sc->sc_dmat, map);
@@ -2739,6 +2750,7 @@ ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring 
*txr)
                txm = &maps[i];
 
                bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
+               bus_dmamap_destroy(sc->sc_dmat, txm->txm_map_tso);
        }
 
        ixl_dmamem_free(sc, &txr->txr_mem);
@@ -2885,7 +2897,7 @@ ixl_start(struct ifqueue *ifq)
 
        for (;;) {
                /* We need one extra descriptor for TSO packets. */
-               if (free <= (IXL_TX_PKT_DESCS + 1)) {
+               if (free <= (IXL_TX_TSO_PKT_DESCS + 1)) {
                        ifq_set_oactive(ifq);
                        break;
                }
@@ -2897,7 +2909,10 @@ ixl_start(struct ifqueue *ifq)
                offload = ixl_tx_setup_offload(m, txr, prod);
 
                txm = &txr->txr_maps[prod];
-               map = txm->txm_map;
+               if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO))
+                       map = txm->txm_map_tso;
+               else
+                       map = txm->txm_map;
 
                if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
                        prod++;
@@ -2988,7 +3003,10 @@ ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
                if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
                        break;
 
-               map = txm->txm_map;
+               if (ISSET(txm->txm_m->m_pkthdr.csum_flags, M_TCP_TSO))
+                       map = txm->txm_map_tso;
+               else
+                       map = txm->txm_map;
 
                bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
                    BUS_DMASYNC_POSTWRITE);

Reply via email to