Author: vmaffione
Date: Wed Feb  6 10:00:40 2019
New Revision: 343834
URL: https://svnweb.freebsd.org/changeset/base/343834

Log:
  MFC r343549
  
  netmap: add notifications on kloop stop
  
  On sync-kloop stop, send a wake-up signal to the kloop, so that
  waiting for the timeout is not needed.
  Also, improve logging in netmap_freebsd.c.

Modified:
  stable/11/sys/dev/netmap/netmap_bdg.c
  stable/11/sys/dev/netmap/netmap_freebsd.c
  stable/11/sys/dev/netmap/netmap_kloop.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/dev/netmap/netmap_bdg.c
==============================================================================
--- stable/11/sys/dev/netmap/netmap_bdg.c       Wed Feb  6 09:59:20 2019        
(r343833)
+++ stable/11/sys/dev/netmap/netmap_bdg.c       Wed Feb  6 10:00:40 2019        
(r343834)
@@ -1141,8 +1141,8 @@ netmap_bwrap_intr_notify(struct netmap_kring *kring, i
                goto put_out;
        if (kring->nr_hwcur == kring->nr_hwtail) {
                if (netmap_verbose)
-                       nm_prerr("how strange, interrupt with no packets on %s",
-                           na->name);
+                       nm_prlim(1, "interrupt with no packets on %s",
+                               kring->name);
                goto put_out;
        }
 

Modified: stable/11/sys/dev/netmap/netmap_freebsd.c
==============================================================================
--- stable/11/sys/dev/netmap/netmap_freebsd.c   Wed Feb  6 09:59:20 2019        
(r343833)
+++ stable/11/sys/dev/netmap/netmap_freebsd.c   Wed Feb  6 10:00:40 2019        
(r343834)
@@ -238,7 +238,7 @@ nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *dat
        static int notsupported = 0;
        if (!notsupported) {
                notsupported = 1;
-               D("inet4 segmentation not supported");
+               nm_prerr("inet4 segmentation not supported");
        }
 #endif
 }
@@ -254,7 +254,7 @@ nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *
        static int notsupported = 0;
        if (!notsupported) {
                notsupported = 1;
-               D("inet6 segmentation not supported");
+               nm_prerr("inet6 segmentation not supported");
        }
 #endif
 }
@@ -286,8 +286,9 @@ freebsd_generic_rx_handler(struct ifnet *ifp, struct m
 {
        int stolen;
 
-       if (!NM_NA_VALID(ifp)) {
-               RD(1, "Warning: got RX packet for invalid emulated adapter");
+       if (unlikely(!NM_NA_VALID(ifp))) {
+               nm_prlim(1, "Warning: RX packet intercepted, but no"
+                               " emulated adapter");
                return;
        }
 
@@ -313,15 +314,16 @@ nm_os_catch_rx(struct netmap_generic_adapter *gna, int
        nm_os_ifnet_lock();
        if (intercept) {
                if (gna->save_if_input) {
-                       D("cannot intercept again");
-                       ret = EINVAL; /* already set */
+                       nm_prerr("RX on %s already intercepted", na->name);
+                       ret = EBUSY; /* already set */
                        goto out;
                }
                gna->save_if_input = ifp->if_input;
                ifp->if_input = freebsd_generic_rx_handler;
        } else {
-               if (!gna->save_if_input){
-                       D("cannot restore");
+               if (!gna->save_if_input) {
+                       nm_prerr("Failed to undo RX intercept on %s",
+                               na->name);
                        ret = EINVAL;  /* not saved */
                        goto out;
                }
@@ -390,11 +392,11 @@ nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
         * we need to copy from the cluster to the netmap buffer.
         */
        if (MBUF_REFCNT(m) != 1) {
-               D("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
+               nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
                panic("in generic_xmit_frame");
        }
        if (m->m_ext.ext_size < len) {
-               RD(5, "size %d < len %d", m->m_ext.ext_size, len);
+               nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len);
                len = m->m_ext.ext_size;
        }
        bcopy(a->addr, m->m_data, len);
@@ -457,7 +459,6 @@ nm_os_generic_set_features(struct netmap_generic_adapt
 void
 nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct 
netmap_adapter *na)
 {
-       ND("called");
        mit->mit_pending = 0;
        mit->mit_ring_idx = idx;
        mit->mit_na = na;
@@ -467,21 +468,19 @@ nm_os_mitigation_init(struct nm_generic_mit *mit, int 
 void
 nm_os_mitigation_start(struct nm_generic_mit *mit)
 {
-       ND("called");
 }
 
 
 void
 nm_os_mitigation_restart(struct nm_generic_mit *mit)
 {
-       ND("called");
 }
 
 
 int
 nm_os_mitigation_active(struct nm_generic_mit *mit)
 {
-       ND("called");
+
        return 0;
 }
 
@@ -489,12 +488,12 @@ nm_os_mitigation_active(struct nm_generic_mit *mit)
 void
 nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
 {
-       ND("called");
 }
 
 static int
 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
 {
+
        return EINVAL;
 }
 
@@ -557,7 +556,7 @@ nm_vi_free_index(uint8_t val)
                }
        }
        if (lim == nm_vi_indices.active)
-               D("funny, index %u didn't found", val);
+               nm_prerr("Index %u not found", val);
        mtx_unlock(&nm_vi_indices.lock);
 }
 #undef NM_VI_MAX
@@ -595,7 +594,7 @@ nm_os_vi_persist(const char *name, struct ifnet **ret)
 
        ifp = if_alloc(IFT_ETHER);
        if (ifp == NULL) {
-               D("if_alloc failed");
+               nm_prerr("if_alloc failed");
                return ENOMEM;
        }
        if_initname(ifp, name, IF_DUNIT_NONE);
@@ -636,7 +635,7 @@ struct nm_os_extmem {
 void
 nm_os_extmem_delete(struct nm_os_extmem *e)
 {
-       D("freeing %zx bytes", (size_t)e->size);
+       nm_prinf("freeing %zx bytes", (size_t)e->size);
        vm_map_remove(kernel_map, e->kva, e->kva + e->size);
        nm_os_free(e);
 }
@@ -686,7 +685,7 @@ nm_os_extmem_create(unsigned long p, struct nmreq_pool
        rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
                        &obj, &index, &prot, &wired);
        if (rv != KERN_SUCCESS) {
-               D("address %lx not found", p);
+               nm_prerr("address %lx not found", p);
                goto out_free;
        }
        /* check that we are given the whole vm_object ? */
@@ -705,13 +704,13 @@ nm_os_extmem_create(unsigned long p, struct nmreq_pool
                        VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
                        VM_PROT_READ | VM_PROT_WRITE, 0);
        if (rv != KERN_SUCCESS) {
-               D("vm_map_find(%zx) failed", (size_t)e->size);
+               nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
                goto out_rel;
        }
        rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
                        VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
        if (rv != KERN_SUCCESS) {
-               D("vm_map_wire failed");
+               nm_prerr("vm_map_wire failed");
                goto out_rem;
        }
 
@@ -793,7 +792,7 @@ nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev,
 {
        int rid;
 
-       D("ptn_memdev_driver iomap");
+       nm_prinf("ptn_memdev_driver iomap");
 
        rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
        *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
@@ -812,7 +811,7 @@ nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev,
        *nm_paddr = rman_get_start(ptn_dev->pci_mem);
        *nm_addr = rman_get_virtual(ptn_dev->pci_mem);
 
-       D("=== BAR %d start %lx len %lx mem_size %lx ===",
+       nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
                        PTNETMAP_MEM_PCI_BAR,
                        (unsigned long)(*nm_paddr),
                        (unsigned long)rman_get_size(ptn_dev->pci_mem),
@@ -830,7 +829,7 @@ nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev
 void
 nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
 {
-       D("ptn_memdev_driver iounmap");
+       nm_prinf("ptn_memdev_driver iounmap");
 
        if (ptn_dev->pci_mem) {
                bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
@@ -866,8 +865,6 @@ ptn_memdev_attach(device_t dev)
        int rid;
        uint16_t mem_id;
 
-       D("ptn_memdev_driver attach");
-
        ptn_dev = device_get_softc(dev);
        ptn_dev->dev = dev;
 
@@ -891,7 +888,7 @@ ptn_memdev_attach(device_t dev)
        }
        netmap_mem_get(ptn_dev->nm_mem);
 
-       D("ptn_memdev_driver probe OK - host_mem_id: %d", mem_id);
+       nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
 
        return (0);
 }
@@ -902,10 +899,11 @@ ptn_memdev_detach(device_t dev)
 {
        struct ptnetmap_memdev *ptn_dev;
 
-       D("ptn_memdev_driver detach");
        ptn_dev = device_get_softc(dev);
 
        if (ptn_dev->nm_mem) {
+               nm_prinf("ptnetmap memdev detached, host memid %u",
+                       netmap_mem_get_id(ptn_dev->nm_mem));
                netmap_mem_put(ptn_dev->nm_mem);
                ptn_dev->nm_mem = NULL;
        }
@@ -926,7 +924,6 @@ ptn_memdev_detach(device_t dev)
 static int
 ptn_memdev_shutdown(device_t dev)
 {
-       D("ptn_memdev_driver shutdown");
        return bus_generic_shutdown(dev);
 }
 
@@ -951,7 +948,7 @@ netmap_dev_pager_ctor(void *handle, vm_ooffset_t size,
        struct netmap_vm_handle_t *vmh = handle;
 
        if (netmap_verbose)
-               D("handle %p size %jd prot %d foff %jd",
+               nm_prinf("handle %p size %jd prot %d foff %jd",
                        handle, (intmax_t)size, prot, (intmax_t)foff);
        if (color)
                *color = 0;
@@ -968,7 +965,7 @@ netmap_dev_pager_dtor(void *handle)
        struct netmap_priv_d *priv = vmh->priv;
 
        if (netmap_verbose)
-               D("handle %p", handle);
+               nm_prinf("handle %p", handle);
        netmap_dtor(priv);
        free(vmh, M_DEVBUF);
        dev_rel(dev);
@@ -987,7 +984,7 @@ netmap_dev_pager_fault(vm_object_t object, vm_ooffset_
        vm_memattr_t memattr;
        vm_pindex_t pidx;
 
-       ND("object %p offset %jd prot %d mres %p",
+       nm_prdis("object %p offset %jd prot %d mres %p",
                        object, (intmax_t)offset, prot, mres);
        memattr = object->memattr;
        pidx = OFF_TO_IDX(offset);
@@ -1043,7 +1040,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *fo
        vm_object_t obj;
 
        if (netmap_verbose)
-               D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
+               nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
                    (intmax_t )*foff, (intmax_t )objsize, objp, prot);
 
        vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
@@ -1068,7 +1065,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *fo
                &netmap_cdev_pager_ops, objsize, prot,
                *foff, NULL);
        if (obj == NULL) {
-               D("cdev_pager_allocate failed");
+               nm_prerr("cdev_pager_allocate failed");
                error = EINVAL;
                goto err_deref;
        }
@@ -1102,7 +1099,7 @@ static int
 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
 {
        if (netmap_verbose)
-               D("dev %p fflag 0x%x devtype %d td %p",
+               nm_prinf("dev %p fflag 0x%x devtype %d td %p",
                        dev, fflag, devtype, td);
        return 0;
 }
@@ -1253,11 +1250,11 @@ nm_os_kctx_worker_start(struct nm_kctx *nmk)
                goto err;
        }
 
-       D("nm_kthread started td %p", nmk->worker);
+       nm_prinf("nm_kthread started td %p", nmk->worker);
 
        return 0;
 err:
-       D("nm_kthread start failed err %d", error);
+       nm_prerr("nm_kthread start failed err %d", error);
        nmk->worker = NULL;
        return error;
 }
@@ -1335,7 +1332,7 @@ netmap_knrdetach(struct knote *kn)
        struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
        struct selinfo *si = &priv->np_si[NR_RX]->si;
 
-       D("remove selinfo %p", si);
+       nm_prinf("remove selinfo %p", si);
        knlist_remove(&si->si_note, kn, /*islocked=*/0);
 }
 
@@ -1345,7 +1342,7 @@ netmap_knwdetach(struct knote *kn)
        struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
        struct selinfo *si = &priv->np_si[NR_TX]->si;
 
-       D("remove selinfo %p", si);
+       nm_prinf("remove selinfo %p", si);
        knlist_remove(&si->si_note, kn, /*islocked=*/0);
 }
 
@@ -1416,17 +1413,17 @@ netmap_kqfilter(struct cdev *dev, struct knote *kn)
        int ev = kn->kn_filter;
 
        if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
-               D("bad filter request %d", ev);
+               nm_prerr("bad filter request %d", ev);
                return 1;
        }
        error = devfs_get_cdevpriv((void**)&priv);
        if (error) {
-               D("device not yet setup");
+               nm_prerr("device not yet setup");
                return 1;
        }
        na = priv->np_na;
        if (na == NULL) {
-               D("no netmap adapter for this file descriptor");
+               nm_prerr("no netmap adapter for this file descriptor");
                return 1;
        }
        /* the si is indicated in the priv */
@@ -1533,7 +1530,7 @@ netmap_loader(__unused struct module *module, int even
                 * then the module can not be unloaded.
                 */
                if (netmap_use_count) {
-                       D("netmap module can not be unloaded - 
netmap_use_count: %d",
+                       nm_prerr("netmap module can not be unloaded - 
netmap_use_count: %d",
                                        netmap_use_count);
                        error = EBUSY;
                        break;

Modified: stable/11/sys/dev/netmap/netmap_kloop.c
==============================================================================
--- stable/11/sys/dev/netmap/netmap_kloop.c     Wed Feb  6 09:59:20 2019        
(r343833)
+++ stable/11/sys/dev/netmap/netmap_kloop.c     Wed Feb  6 10:00:40 2019        
(r343834)
@@ -567,16 +567,12 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct n
                /* Poll for notifications coming from the netmap rings bound to
                 * this file descriptor. */
                {
-                       NM_SELINFO_T *si[NR_TXRX];
-
                        NMG_LOCK();
-                       si[NR_RX] = nm_si_user(priv, NR_RX) ? &na->si[NR_RX] :
-                               &na->rx_rings[priv->np_qfirst[NR_RX]]->si;
-                       si[NR_TX] = nm_si_user(priv, NR_TX) ? &na->si[NR_TX] :
-                               &na->tx_rings[priv->np_qfirst[NR_TX]]->si;
+                       poll_wait(priv->np_filp, priv->np_si[NR_TX],
+                           &poll_ctx->wait_table);
+                       poll_wait(priv->np_filp, priv->np_si[NR_RX],
+                           &poll_ctx->wait_table);
                        NMG_UNLOCK();
-                       poll_wait(priv->np_filp, si[NR_TX], 
&poll_ctx->wait_table);
-                       poll_wait(priv->np_filp, si[NR_RX], 
&poll_ctx->wait_table);
                }
 #else   /* SYNC_KLOOP_POLL */
                opt->nro_status = EOPNOTSUPP;
@@ -657,7 +653,7 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct n
                        /* If a poll context is present, yield to the scheduler
                         * waiting for a notification to come either from
                         * netmap or the application. */
-                       schedule_timeout(msecs_to_jiffies(20000));
+                       schedule_timeout(msecs_to_jiffies(3000));
                } else
 #endif /* SYNC_KLOOP_POLL */
                {
@@ -708,12 +704,31 @@ out:
 int
 netmap_sync_kloop_stop(struct netmap_priv_d *priv)
 {
+       struct netmap_adapter *na;
        bool running = true;
        int err = 0;
 
+       if (priv->np_nifp == NULL) {
+               return ENXIO;
+       }
+       mb(); /* make sure following reads are not from cache */
+
+       na = priv->np_na;
+       if (!nm_netmap_on(na)) {
+               return ENXIO;
+       }
+
+       /* Set the kloop stopping flag. */
        NMG_LOCK();
        priv->np_kloop_state |= NM_SYNC_KLOOP_STOPPING;
        NMG_UNLOCK();
+
+       /* Send a notification to the kloop, in case it is blocked in
+        * schedule_timeout(). We can use either RX or TX, because the
+        * kloop is waiting on both. */
+       nm_os_selwakeup(priv->np_si[NR_RX]);
+
+       /* Wait for the kloop to actually terminate. */
        while (running) {
                usleep_range(1000, 1500);
                NMG_LOCK();
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to