Author: raj
Date: Tue Oct 14 07:24:18 2008
New Revision: 183867
URL: http://svn.freebsd.org/changeset/base/183867

Log:
  Marvell Gigabit Ethernet controller driver.
  
  This supports 1Gbps Ethernet engine found on ARM-based SOCs (Orion, Kirkwood,
  Discovery), as well as on system controllers for PowerPC processors (MV64430,
  MV6446x).
  
  The following advanced features are supported:
  
    - multicast
    - VLAN tagging
    - IP/TCP/UDP checksum calculation offloading
    - polling
    - interrupt coalescing
  
  Obtained from:        Marvell, Semihalf

Added:
  head/sys/dev/mge/
  head/sys/dev/mge/if_mge.c   (contents, props changed)
  head/sys/dev/mge/if_mgevar.h   (contents, props changed)

Added: head/sys/dev/mge/if_mge.c
==============================================================================
--- /dev/null   00:00:00 1970   (empty, because file is newly added)
+++ head/sys/dev/mge/if_mge.c   Tue Oct 14 07:24:18 2008        (r183867)
@@ -0,0 +1,1757 @@
+/*-
+ * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
+ * All rights reserved.
+ *
+ * Developed by Semihalf.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of MARVELL nor the names of contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#endif
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+
+#include <net/ethernet.h>
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+
+#include <sys/sockio.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
+#define  MGE_VER2      1
+#endif
+
+#define        MV_PHY_ADDR_BASE        8
+
+#include <dev/mge/if_mgevar.h>
+#include <arm/mv/mvreg.h>
+
+#include "miibus_if.h"
+
+/* PHY registers are in the address space of the first mge unit */
+static struct mge_softc *sc_mge0 = NULL;
+
+static int mge_probe(device_t dev);
+static int mge_attach(device_t dev);
+static int mge_detach(device_t dev);
+static int mge_shutdown(device_t dev);
+static int mge_suspend(device_t dev);
+static int mge_resume(device_t dev);
+
+static int mge_miibus_readreg(device_t dev, int phy, int reg);
+static void mge_miibus_writereg(device_t dev, int phy, int reg, int value);
+
+static int mge_ifmedia_upd(struct ifnet *ifp);
+static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
+
+static void mge_init(void *arg);
+static void mge_init_locked(void *arg);
+static void mge_start(struct ifnet *ifp);
+static void mge_start_locked(struct ifnet *ifp);
+static void mge_watchdog(struct mge_softc *sc);
+static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
+
+static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
+static void mge_intr_rx(void *arg);
+static void mge_intr_rx_locked(struct mge_softc *sc, int count);
+static void mge_intr_tx(void *arg);
+static void mge_intr_tx_locked(struct mge_softc *sc);
+static void mge_intr_misc(void *arg);
+static void mge_intr_sum(void *arg);
+static void mge_intr_err(void *arg);
+static void mge_stop(struct mge_softc *sc);
+static void mge_tick(void *msc);
+static uint32_t mge_set_port_serial_control(uint32_t media);
+static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
+static void mge_set_mac_address(struct mge_softc *sc);
+static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
+    uint8_t queue);
+static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
+static int mge_allocate_dma(struct mge_softc *sc);
+static int mge_alloc_desc_dma(struct mge_softc *sc,
+    struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t 
*buffer_tag);
+static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
+    struct mbuf **mbufp, bus_addr_t *paddr);
+static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int 
error);
+static void mge_free_dma(struct mge_softc *sc);
+static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, 
uint32_t size,
+    bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
+static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
+    uint32_t status, uint16_t bufsize);
+static void mge_offload_setup_descriptor(struct mge_softc *sc,
+    struct mge_desc_wrapper *dw);
+static uint8_t mge_crc8(uint8_t *data, int size);
+static void mge_setup_multicast(struct mge_softc *sc);
+static void mge_set_rxic(struct mge_softc *sc);
+static void mge_set_txic(struct mge_softc *sc);
+static void mge_add_sysctls(struct mge_softc *sc);
+static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
+
+static device_method_t mge_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe,         mge_probe),
+       DEVMETHOD(device_attach,        mge_attach),
+       DEVMETHOD(device_detach,        mge_detach),
+       DEVMETHOD(device_shutdown,      mge_shutdown),
+       DEVMETHOD(device_suspend,       mge_suspend),
+       DEVMETHOD(device_resume,        mge_resume),
+       /* MII interface */
+       DEVMETHOD(miibus_readreg,       mge_miibus_readreg),
+       DEVMETHOD(miibus_writereg,      mge_miibus_writereg),
+       { 0, 0 }
+};
+
+static driver_t mge_driver = {
+       "mge",
+       mge_methods,
+       sizeof(struct mge_softc),
+};
+
+static devclass_t mge_devclass;
+
+DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0);
+DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
+MODULE_DEPEND(mge, ether, 1, 1, 1);
+MODULE_DEPEND(mge, miibus, 1, 1, 1);
+
+static struct resource_spec res_spec[] = {
+       { SYS_RES_MEMORY, 0, RF_ACTIVE },
+       { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
+       { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
+       { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
+       { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
+       { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
+       { -1, 0 }
+};
+
+static struct {
+       driver_intr_t *handler;
+       char * description;
+} mge_intrs[MGE_INTR_COUNT] = {
+       { mge_intr_rx,  "GbE receive interrupt" },
+       { mge_intr_tx,  "GbE transmit interrupt" },
+       { mge_intr_misc,"GbE misc interrupt" },
+       { mge_intr_sum, "GbE summary interrupt" },
+       { mge_intr_err, "GbE error interrupt" },
+};
+
+static void
+mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
+{
+       uint32_t mac_l, mac_h;
+
+       /* XXX use currently programmed MAC address; eventually this info will
+        * be provided by the loader */
+
+       mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
+       mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
+
+       addr[0] = (mac_h & 0xff000000) >> 24;
+       addr[1] = (mac_h & 0x00ff0000) >> 16;
+       addr[2] = (mac_h & 0x0000ff00) >> 8;
+       addr[3] = (mac_h & 0x000000ff);
+       addr[4] = (mac_l & 0x0000ff00) >> 8;
+       addr[5] = (mac_l & 0x000000ff);
+}
+
+static void
+mge_set_mac_address(struct mge_softc *sc)
+{
+       char *if_mac;
+       uint32_t mac_l, mac_h;
+
+       MGE_GLOBAL_LOCK_ASSERT(sc);
+
+       if_mac = (char *)IF_LLADDR(sc->ifp);
+
+       mac_l = (if_mac[4] << 8) | (if_mac[5]);
+       mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
+           (if_mac[2] << 8) | (if_mac[3] << 0);
+
+       MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
+       MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
+
+       mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
+}
+
+static void
+mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
+{
+       uint32_t reg_idx, reg_off, reg_val, i;
+
+       last_byte &= 0xf;
+       reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
+       reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
+       reg_val = (1 | (queue << 1)) << reg_off;
+
+       for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
+               if ( i == reg_idx)
+                       MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
+               else
+                       MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
+       }
+}
+
+static void
+mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
+{
+       uint32_t port_config;
+       uint32_t reg_val, i;
+
+       /* Enable or disable promiscuous mode as needed */
+       if (sc->ifp->if_flags & IFF_PROMISC) {
+               port_config = MGE_READ(sc, MGE_PORT_CONFIG);
+               port_config |= PORT_CONFIG_UPM;
+               MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
+
+               reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
+                  (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
+
+               for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
+                       MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
+                       MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
+               }
+
+               for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
+                       MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
+
+       } else {
+               port_config = MGE_READ(sc, MGE_PORT_CONFIG);
+               port_config &= ~PORT_CONFIG_UPM;
+               MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
+
+               for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
+                       MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
+                       MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
+               }
+
+               mge_set_mac_address(sc);
+       }
+}
+
+static void
+mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+       u_int32_t *paddr;
+
+       KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
+       paddr = arg;
+
+       *paddr = segs->ds_addr;
+}
+
+static int
+mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
+    bus_addr_t *paddr)
+{
+       struct mbuf *new_mbuf;
+       bus_dma_segment_t seg[1];
+       int error;
+       int nsegs;
+
+       KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
+
+       new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+       if (new_mbuf == NULL)
+               return (ENOBUFS);
+       new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
+
+       if (*mbufp) {
+               bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
+               bus_dmamap_unload(tag, map);
+       }
+
+       error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
+           BUS_DMA_NOWAIT);
+       KASSERT(nsegs == 1, ("Too many segments returned!"));
+       if (nsegs != 1 || error)
+               panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
+
+       bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
+
+       (*mbufp) = new_mbuf;
+       (*paddr) = seg->ds_addr;
+       return (0);
+}
+
+static int
+mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
+    uint32_t size, bus_dma_tag_t *buffer_tag)
+{
+       struct mge_desc_wrapper *dw;
+       bus_addr_t desc_paddr;
+       int i, error;
+
+       desc_paddr = 0;
+       for (i = size - 1; i >= 0; i--) {
+               dw = &(tab[i]);
+               error = bus_dmamem_alloc(sc->mge_desc_dtag,
+                   (void**)&(dw->mge_desc),
+                   BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
+                   &(dw->desc_dmap));
+
+               if (error) {
+                       if_printf(sc->ifp, "failed to allocate DMA memory\n");
+                       dw->mge_desc = NULL;
+                       return (ENXIO);
+               }
+
+               error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
+                   dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
+                   &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
+
+               if (error) {
+                       if_printf(sc->ifp, "can't load descriptor\n");
+                       bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
+                           dw->desc_dmap);
+                       dw->mge_desc = NULL;
+                       return (ENXIO);
+               }
+
+               /* Chain descriptors */
+               dw->mge_desc->next_desc = desc_paddr;
+               desc_paddr = dw->mge_desc_paddr;
+       }
+       tab[size - 1].mge_desc->next_desc = desc_paddr;
+
+       /* Allocate a busdma tag for mbufs. */
+       error = bus_dma_tag_create(NULL,        /* parent */
+           8, 0,                               /* alignment, boundary */
+           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
+           BUS_SPACE_MAXADDR,                  /* highaddr */
+           NULL, NULL,                         /* filtfunc, filtfuncarg */
+           MCLBYTES, 1,                        /* maxsize, nsegments */
+           MCLBYTES, 0,                        /* maxsegsz, flags */
+           NULL, NULL,                         /* lockfunc, lockfuncarg */
+           buffer_tag);                        /* dmat */
+       if (error) {
+               if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
+               return (ENXIO);
+       }
+
+       /* Create TX busdma maps */
+       for (i = 0; i < size; i++) {
+               dw = &(tab[i]);
+               error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
+               if (error) {
+                       if_printf(sc->ifp, "failed to create map for mbuf\n");
+                       return (ENXIO);
+               }
+
+               dw->buffer = (struct mbuf*)NULL;
+               dw->mge_desc->buffer = (bus_addr_t)NULL;
+       }
+
+       return (0);
+}
+
+static int
+mge_allocate_dma(struct mge_softc *sc)
+{
+       int error;
+       struct mge_desc_wrapper *dw;
+       int num, i;
+
+
+       num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
+
+       /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
+       error = bus_dma_tag_create(NULL,        /* parent */
+           16, 0,                              /* alignment, boundary */
+           BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
+           BUS_SPACE_MAXADDR,                  /* highaddr */
+           NULL, NULL,                         /* filtfunc, filtfuncarg */
+           sizeof(struct mge_desc), 1,         /* maxsize, nsegments */
+           sizeof(struct mge_desc), 0,         /* maxsegsz, flags */
+           NULL, NULL,                         /* lockfunc, lockfuncarg */
+           &sc->mge_desc_dtag);                /* dmat */
+
+
+       mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
+           &sc->mge_tx_dtag);
+       mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
+           &sc->mge_rx_dtag);
+
+       for (i = 0; i < MGE_RX_DESC_NUM; i++) {
+               dw = &(sc->mge_rx_desc[i]);
+               mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
+                   &dw->mge_desc->buffer);
+       }
+
+       sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
+       sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
+
+       return (0);
+}
+
+static void
+mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
+    uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
+{
+       struct mge_desc_wrapper *dw;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               /* Free RX mbuf */
+               dw = &(tab[i]);
+
+               if (dw->buffer_dmap) {
+                       if (free_mbufs) {
+                               bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
+                       }
+                       bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
+                       if (free_mbufs)
+                               m_freem(dw->buffer);
+               }
+               /* Free RX descriptors */
+               if (dw->desc_dmap) {
+                       bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
+                       bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
+                           dw->desc_dmap);
+               }
+       }
+}
+
+static void
+mge_free_dma(struct mge_softc *sc)
+{
+       /* Free desciptors and mbufs */
+       mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
+       mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
+
+       /* Destroy mbuf dma tag */
+       bus_dma_tag_destroy(sc->mge_tx_dtag);
+       bus_dma_tag_destroy(sc->mge_rx_dtag);
+       /* Destroy descriptors tag */
+       bus_dma_tag_destroy(sc->mge_desc_dtag);
+}
+
+static void
+mge_reinit_rx(struct mge_softc *sc)
+{
+       struct mge_desc_wrapper *dw;
+       int i;
+
+       MGE_RECEIVE_LOCK(sc);
+
+       mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
+
+       mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
+           &sc->mge_rx_dtag);
+
+       for (i = 0; i < MGE_RX_DESC_NUM; i++) {
+               dw = &(sc->mge_rx_desc[i]);
+               mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
+               &dw->mge_desc->buffer);
+       }
+
+       sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
+       sc->rx_desc_curr = 0;
+
+       MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
+           sc->rx_desc_start);
+
+       /* Enable RX queue */
+       MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
+
+       MGE_RECEIVE_UNLOCK(sc);
+}
+
+#ifdef DEVICE_POLLING
+static poll_handler_t mge_poll;
+
+static void
+mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+       struct mge_softc *sc = ifp->if_softc;
+       uint32_t int_cause, int_cause_ext;
+
+       MGE_GLOBAL_LOCK(sc);
+
+       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+               MGE_GLOBAL_UNLOCK(sc);
+               return;
+       }
+
+       if (cmd == POLL_AND_CHECK_STATUS) {
+               int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
+               int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
+
+               /* Check for resource error */
+               if (int_cause & MGE_PORT_INT_RXERRQ0)
+                       mge_reinit_rx(sc);
+
+               if (int_cause || int_cause_ext) {
+                       MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
+                       MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
+               }
+       }
+
+       mge_intr_tx_locked(sc);
+       mge_intr_rx_locked(sc, count);
+
+       MGE_GLOBAL_UNLOCK(sc);
+}
+#endif /* DEVICE_POLLING */
+
+static int
+mge_attach(device_t dev)
+{
+       struct mge_softc *sc;
+       struct ifnet *ifp;
+       uint8_t hwaddr[ETHER_ADDR_LEN];
+       int i, error ;
+
+       sc = device_get_softc(dev);
+       sc->dev = dev;
+
+       if (device_get_unit(dev) == 0)
+               sc_mge0 = sc;
+
+       /* Initialize mutexes */
+       mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", 
MTX_DEF);
+       mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", 
MTX_DEF);
+
+       /* Allocate IO and IRQ resources */
+       error = bus_alloc_resources(dev, res_spec, sc->res);
+       if (error) {
+               device_printf(dev, "could not allocate resources\n");
+               mge_detach(dev);
+               return (ENXIO);
+       }
+
+       /* Allocate DMA, buffers, buffer descriptors */
+       error = mge_allocate_dma(sc);
+       if (error) {
+               mge_detach(dev);
+               return (ENXIO);
+       }
+
+       sc->tx_desc_curr = 0;
+       sc->rx_desc_curr = 0;
+       sc->tx_desc_used_idx = 0;
+
+       /* Configure defaults for interrupts coalescing */
+       sc->rx_ic_time = 768;
+       sc->tx_ic_time = 768;
+       mge_add_sysctls(sc);
+
+       /* Allocate network interface */
+       ifp = sc->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "if_alloc() failed\n");
+               mge_detach(dev);
+               return (ENOMEM);
+       }
+
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_softc = sc;
+       ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
+       ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+       ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
+
+#ifdef DEVICE_POLLING
+       /* Advertise that polling is supported */
+       ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+       ifp->if_init = mge_init;
+       ifp->if_start = mge_start;
+       ifp->if_ioctl = mge_ioctl;
+
+       ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
+       IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+       IFQ_SET_READY(&ifp->if_snd);
+
+       mge_get_mac_address(sc, hwaddr);
+       ether_ifattach(ifp, hwaddr);
+       callout_init(&sc->wd_callout, 0);
+
+       /* Probe PHY(s) */
+       error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, 
mge_ifmedia_sts);
+       if (error) {
+               device_printf(dev, "MII failed to find PHY\n");
+               if_free(ifp);
+               sc->ifp = NULL;
+               mge_detach(dev);
+               return (error);
+       }
+       sc->mii = device_get_softc(sc->miibus);
+
+       /* Attach interrupt handlers */
+       for (i = 0; i < 2; ++i) {
+               error = bus_setup_intr(dev, sc->res[1 + i],
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
+                   sc, &sc->ih_cookie[i]);
+               if (error) {
+                       device_printf(dev, "could not setup %s\n",
+                           mge_intrs[i].description);
+                       ether_ifdetach(sc->ifp);
+                       return (error);
+               }
+       }
+
+       return (0);
+}
+
+static int
+mge_detach(device_t dev)
+{
+       struct mge_softc *sc;
+       int error,i;
+
+       sc = device_get_softc(dev);
+
+       /* Stop controller and free TX queue */
+       if (sc->ifp)
+               mge_shutdown(dev);
+
+       /* Wait for stopping ticks */
+        callout_drain(&sc->wd_callout);
+
+       /* Stop and release all interrupts */
+       for (i = 0; i < 2; ++i) {
+               error = bus_teardown_intr(dev, sc->res[1 + i], 
sc->ih_cookie[i]);
+               if (error)
+                       device_printf(dev, "could not release %s\n",
+                           mge_intrs[i].description);
+       }
+
+       /* Detach network interface */
+       if (sc->ifp) {
+               ether_ifdetach(sc->ifp);
+               if_free(sc->ifp);
+       }
+
+       /* Free DMA resources */
+       mge_free_dma(sc);
+
+       /* Free IO memory handler */
+       bus_release_resources(dev, res_spec, sc->res);
+
+       /* Destroy mutexes */
+       mtx_destroy(&sc->receive_lock);
+       mtx_destroy(&sc->transmit_lock);
+
+       return (0);
+}
+
+static void
+mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+       struct mge_softc *sc = ifp->if_softc;
+       struct mii_data *mii;
+
+       MGE_TRANSMIT_LOCK(sc);
+
+       mii = sc->mii;
+       mii_pollstat(mii);
+
+       ifmr->ifm_active = mii->mii_media_active;
+       ifmr->ifm_status = mii->mii_media_status;
+
+       MGE_TRANSMIT_UNLOCK(sc);
+}
+
+static uint32_t
+mge_set_port_serial_control(uint32_t media)
+{
+       uint32_t port_config;
+
+       port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
+           PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
+
+       if (IFM_TYPE(media) == IFM_ETHER) {
+               switch(IFM_SUBTYPE(media)) {
+                       case IFM_AUTO:
+                               break;
+                       case IFM_1000_T:
+                               port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
+                                   PORT_SERIAL_AUTONEG | 
PORT_SERIAL_AUTONEG_FC |
+                                   PORT_SERIAL_SPEED_AUTONEG);
+                               break;
+                       case IFM_100_TX:
+                               port_config  |= (PORT_SERIAL_MII_SPEED_100 |
+                                   PORT_SERIAL_AUTONEG | 
PORT_SERIAL_AUTONEG_FC |
+                                   PORT_SERIAL_SPEED_AUTONEG);
+                               break;
+                       case IFM_10_T:
+                               port_config  |= (PORT_SERIAL_AUTONEG |
+                                   PORT_SERIAL_AUTONEG_FC |
+                                   PORT_SERIAL_SPEED_AUTONEG);
+                               break;
+               }
+               if (media & IFM_FDX)
+                       port_config |= PORT_SERIAL_FULL_DUPLEX;
+       }
+       return (port_config);
+}
+
+static int
+mge_ifmedia_upd(struct ifnet *ifp)
+{
+       struct mge_softc *sc = ifp->if_softc;
+
+       if (ifp->if_flags & IFF_UP) {
+               MGE_GLOBAL_LOCK(sc);
+
+               sc->mge_media_status = sc->mii->mii_media.ifm_media;
+               mii_mediachg(sc->mii);
+               mge_init_locked(sc);
+
+               MGE_GLOBAL_UNLOCK(sc);
+       }
+
+       return (0);
+}
+
+static void
+mge_init(void *arg)
+{
+       struct mge_softc *sc = arg;
+
+       MGE_GLOBAL_LOCK(sc);
+
+       mge_init_locked(arg);
+
+       MGE_GLOBAL_UNLOCK(sc);
+}
+
+static void
+mge_init_locked(void *arg)
+{
+       struct mge_softc *sc = arg;
+       struct mge_desc_wrapper *dw;
+       volatile uint32_t reg_val;
+       int i, count;
+
+
+       MGE_GLOBAL_LOCK_ASSERT(sc);
+
+       /* Stop interface */
+       mge_stop(sc);
+
+       /* Disable interrupts */
+       mge_intrs_ctrl(sc, 0);
+
+       /* Set MAC address */
+       mge_set_mac_address(sc);
+
+       /* Setup multicast filters */
+       mge_setup_multicast(sc);
+
+#if defined(MGE_VER2)
+       MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
+       MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
+#endif
+       /* Initialize TX queue configuration registers */
+       MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), MGE_TX_TOKEN_Q0_DFLT);
+       MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), MGE_TX_TOKEN_Q0_DFLT);
+       MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), MGE_TX_ARB_Q0_DFLT);
+
+       for (i = 1; i < 7; i++) {
+               MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), MGE_TX_TOKEN_Q1_7_DFLT);
+               MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), MGE_TX_TOKEN_Q1_7_DFLT);
+               MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), MGE_TX_ARB_Q1_7_DFLT);
+       }
+
+       /* Set default MTU */
+       MGE_WRITE(sc, MGE_MTU, MGE_MTU_DEFAULT);
+
+       /* Port configuration */
+       MGE_WRITE(sc, MGE_PORT_CONFIG,
+           PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
+           PORT_CONFIG_ARO_RXQ(0));
+       MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
+
+       /* Setup port configuration */
+       reg_val = mge_set_port_serial_control(sc->mge_media_status);
+       MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
+
+       /* Setup SDMA configuration */
+       MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
+           MGE_SDMA_TX_BYTE_SWAP |
+           MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
+           MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
+
+       MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
+
+       MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
+       MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
+           sc->rx_desc_start);
+
+       /* Reset descriptor indexes */
+       sc->tx_desc_curr = 0;
+       sc->rx_desc_curr = 0;
+       sc->tx_desc_used_idx = 0;
+
+       /* Enable RX descriptors */
+       for (i = 0; i < MGE_RX_DESC_NUM; i++) {
+               dw = &sc->mge_rx_desc[i];
+               dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
+               dw->mge_desc->buff_size = MCLBYTES;
+               bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       }
+
+       /* Enable RX queue */
+       MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
+
+       /* Enable port */
+       reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
+       reg_val |= PORT_SERIAL_ENABLE;
+       MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
+       count = 0x100000;
+       for (;;) {
+               reg_val = MGE_READ(sc, MGE_PORT_STATUS);
+               if (reg_val & MGE_STATUS_LINKUP)
+                       break;
+               DELAY(100);
+               if (--count == 0) {
+                       if_printf(sc->ifp, "Timeout on link-up\n");
+                       break;
+               }
+       }
+
+       /* Setup interrupts coalescing */
+       mge_set_rxic(sc);
+       mge_set_txic(sc);
+
+       /* Enable interrupts */
+#ifdef DEVICE_POLLING
+        /*
+        * * ...only if polling is not turned on. Disable interrupts explicitly
+        * if polling is enabled.
+        */
+       if (sc->ifp->if_capenable & IFCAP_POLLING)
+               mge_intrs_ctrl(sc, 0);
+       else
+#endif /* DEVICE_POLLING */
+       mge_intrs_ctrl(sc, 1);
+
+       /* Activate network interface */
+       sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+       sc->wd_timer = 0;
+
+       /* Schedule watchdog timeout */
+       callout_reset(&sc->wd_callout, hz, mge_tick, sc);
+}
+
+static void
+mge_intr_err(void *arg)
+{
+       struct mge_softc *sc = arg;
+       struct ifnet *ifp;
+
+       ifp = sc->ifp;
+       if_printf(ifp, "%s\n", __FUNCTION__);
+}
+
+static void
+mge_intr_misc(void *arg)
+{
+       struct mge_softc *sc = arg;
+       struct ifnet *ifp;
+
+       ifp = sc->ifp;
+       if_printf(ifp, "%s\n", __FUNCTION__);
+}
+
+static void
+mge_intr_rx(void *arg) {
+       struct mge_softc *sc = arg;
+       uint32_t int_cause, int_cause_ext;
+
+       MGE_RECEIVE_LOCK(sc);
+
+#ifdef DEVICE_POLLING
+       if (sc->ifp->if_capenable & IFCAP_POLLING) {
+               MGE_RECEIVE_UNLOCK(sc);
+               return;
+       }
+#endif
+
+       /* Get interrupt cause */
+       int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
+       int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
+
+       /* Check for resource error */
+       if (int_cause & MGE_PORT_INT_RXERRQ0) {
+               mge_reinit_rx(sc);
+               MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
+                   int_cause & ~MGE_PORT_INT_RXERRQ0);
+       }
+
+       int_cause &= MGE_PORT_INT_RXQ0;
+       int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
+
+       if (int_cause || int_cause_ext) {
+               MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
+               MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
+               mge_intr_rx_locked(sc, -1);
+       }
+
+       MGE_RECEIVE_UNLOCK(sc);
+}
+
+
+static void
+mge_intr_rx_locked(struct mge_softc *sc, int count)
+{
+       struct ifnet *ifp = sc->ifp;
+       uint32_t status;
+       uint16_t bufsize;
+       struct mge_desc_wrapper* dw;
+       struct mbuf *mb;
+
+       MGE_RECEIVE_LOCK_ASSERT(sc);
+
+       while(count != 0) {
+               dw = &sc->mge_rx_desc[sc->rx_desc_curr];
+               bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
+                   BUS_DMASYNC_POSTREAD);
+
+               /* Get status */
+               status = dw->mge_desc->cmd_status;
+               bufsize = dw->mge_desc->buff_size;
+               if ((status & MGE_DMA_OWNED) != 0)
+                       break;
+
+               sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
+               if (dw->mge_desc->byte_count &&
+                   ~(status & MGE_ERR_SUMMARY)) {
+
+                       bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
+                           BUS_DMASYNC_POSTREAD);
+
+                       mb = m_devget(dw->buffer->m_data,
+                           dw->mge_desc->byte_count - ETHER_CRC_LEN,
+                           0, ifp, NULL);
+
+                       mb->m_len -= 2;
+                       mb->m_pkthdr.len -= 2;
+                       mb->m_data += 2;
+
+                       mge_offload_process_frame(ifp, mb, status,
+                           bufsize);
+
+                       MGE_RECEIVE_UNLOCK(sc);
+                       (*ifp->if_input)(ifp, mb);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[EMAIL PROTECTED]"

Reply via email to