Hi,

the Allwinner A10 and A20 SoC include an EMAC, but it's only used in
few devices like the Cubieboard.  The A20, like the A31 and A80, contain
a GMAC, which is a Synopsys Designware controller.  This is commonly
used on the Banana Pi, Lamobo R1, Cubieboard 2 and more.  This diff
is the complete diff I worked on to support the GMAC.  It's based on
NetBSD's DWC driver plus some glue code.

Unfortunately this diff currently (probably) only works on the
Cubieboard 2, or devices similarly wired.  Not every device is wired
the same, and we currently have no way to distinguish A20 based devices.
Once we are able to parse a device tree it will be much easier to set up
the clock and tx delay per board.

Patrick

diff --git sys/arch/armv7/conf/GENERIC sys/arch/armv7/conf/GENERIC
index b55771d..6858d75 100644
--- sys/arch/armv7/conf/GENERIC
+++ sys/arch/armv7/conf/GENERIC
@@ -93,6 +93,7 @@ sxidog*               at sunxi?               # watchdog timer
 sxirtc*                at sunxi?               # Real Time Clock
 sxiuart*       at sunxi?               # onboard UARTs
 sxie*          at sunxi?
+awge*          at sunxi?
 ahci*          at sunxi?               # AHCI/SATA (shim)
 ehci*          at sunxi?               # EHCI (shim)
 usb*           at ehci?        #flags 0x1
diff --git sys/arch/armv7/conf/RAMDISK sys/arch/armv7/conf/RAMDISK
index 8c0c22b..d857d2b 100644
--- sys/arch/armv7/conf/RAMDISK
+++ sys/arch/armv7/conf/RAMDISK
@@ -91,6 +91,7 @@ sxidog*               at sunxi?               # watchdog timer
 sxirtc*                at sunxi?               # Real Time Clock
 sxiuart*       at sunxi?               # onboard UARTs
 sxie*          at sunxi?
+awge*          at sunxi?
 ahci*          at sunxi?               # AHCI/SATA (shim)
 ehci*          at sunxi?               # EHCI (shim)
 usb*           at ehci?        #flags 0x1
diff --git sys/arch/armv7/sunxi/files.sunxi sys/arch/armv7/sunxi/files.sunxi
index 80c4ba4..96f8832 100644
--- sys/arch/armv7/sunxi/files.sunxi
+++ sys/arch/armv7/sunxi/files.sunxi
@@ -48,3 +48,7 @@ file  arch/armv7/sunxi/sxiuart.c              sxiuart
 device sxie: ether, ifnet, mii, ifmedia
 attach sxie at sunxi
 file   arch/armv7/sunxi/sxie.c                 sxie
+
+# A20 GMAC
+attach awge at sunxi with sxige
+file   arch/armv7/sunxi/sxige.c                sxige
diff --git sys/arch/armv7/sunxi/sun7i.c sys/arch/armv7/sunxi/sun7i.c
index 53978f3..73ecd24 100644
--- sys/arch/armv7/sunxi/sun7i.c
+++ sys/arch/armv7/sunxi/sun7i.c
@@ -100,12 +100,11 @@ struct armv7_dev sxia20_devs[] = {
          .irq = { UART7_IRQ }
        },
 
-       /* EMAC */
-       { .name = "sxie",
+       /* GMAC */
+       { .name = "awge",
          .unit = 0,
-         .mem = {      { EMAC_ADDR, EMAC_SIZE },
-                       { SXIESRAM_ADDR, SXIESRAM_SIZE } },
-         .irq = { EMAC_IRQ}
+         .mem = { { GMAC_ADDR, GMAC_SIZE } },
+         .irq = { GMAC_IRQ }
        },
 
        /* SATA/AHCI */
diff --git sys/arch/armv7/sunxi/sunxi.c sys/arch/armv7/sunxi/sunxi.c
index dac0348..256169e 100644
--- sys/arch/armv7/sunxi/sunxi.c
+++ sys/arch/armv7/sunxi/sunxi.c
@@ -77,7 +77,7 @@ struct board_dev sun7i_devs[] = {
        { "sxiuart",    5 },
        { "sxiuart",    6 },
        { "sxiuart",    7 },
-       { "sxie",       0 },
+       { "awge",       0 },
        { "ahci",       0 },
        { "ehci",       0 },
        { "ehci",       1 },
diff --git sys/arch/armv7/sunxi/sxiccmu.c sys/arch/armv7/sunxi/sxiccmu.c
index ddfc415..16481a1 100644
--- sys/arch/armv7/sunxi/sxiccmu.c
+++ sys/arch/armv7/sunxi/sxiccmu.c
@@ -64,6 +64,24 @@
 #define        CCMU_AHB_GATING_EMAC            (1 << 17)
 #define        CCMU_AHB_GATING_SATA            (1 << 25)
 
+#define        CCMU_AHB_GATING1                0x64
+#define        CCMU_AHB_GATING_MALI400         (1 << 20)
+#define        CCMU_AHB_GATING_MP              (1 << 18)
+#define        CCMU_AHB_GATING_GMAC            (1 << 17)
+#define        CCMU_AHB_GATING_DE_FE1          (1 << 15)
+#define        CCMU_AHB_GATING_DE_FE0          (1 << 14)
+#define        CCMU_AHB_GATING_DE_BE1          (1 << 13)
+#define        CCMU_AHB_GATING_DE_BE0          (1 << 12)
+#define        CCMU_AHB_GATING_HDMI            (1 << 11)
+#define        CCMU_AHB_GATING_CSI1            (1 << 9)
+#define        CCMU_AHB_GATING_CSI0            (1 << 8)
+#define        CCMU_AHB_GATING_LCD1            (1 << 5)
+#define        CCMU_AHB_GATING_LCD0            (1 << 4)
+#define        CCMU_AHB_GATING_TVE1            (1 << 3)
+#define        CCMU_AHB_GATING_TVE0            (1 << 2)
+#define        CCMU_AHB_GATING_TVD             (1 << 1)
+#define        CCMU_AHB_GATING_VE              (1 << 0)
+
 #define        CCMU_APB_GATING0                0x68
 #define        CCMU_APB_GATING_PIO             (1 << 5)
 #define        CCMU_APB_GATING1                0x6c
@@ -89,6 +107,20 @@
 #define        CCMU_USB1_RESET                 (1 << 1)
 #define        CCMU_USB0_RESET                 (1 << 0)
 
+#define        CCMU_GMAC_CLK_REG               0x164
+#define        CCMU_GMAC_CLK_TXC_DIV           (0x3 << 8)
+#define        CCMU_GMAC_CLK_TXC_DIV_1000      0
+#define        CCMU_GMAC_CLK_TXC_DIV_100       1
+#define        CCMU_GMAC_CLK_TXC_DIV_10        2
+#define        CCMU_GMAC_CLK_RXDC              (0x7 << 5)
+#define        CCMU_GMAC_CLK_RXIE              (1 << 4)
+#define        CCMU_GMAC_CLK_TXIE              (1 << 3)
+#define        CCMU_GMAC_CLK_PIT               (1 << 2)
+#define        CCMU_GMAC_CLK_TCS               (0x3 << 0)
+#define        CCMU_GMAC_CLK_TCS_MII           0
+#define        CCMU_GMAC_CLK_TCS_EXT_125       1
+#define        CCMU_GMAC_CLK_TCS_INT_RGMII     2
+
 struct sxiccmu_softc {
        struct device           sc_dev;
        bus_space_tag_t         sc_iot;
@@ -167,6 +199,15 @@ sxiccmu_enablemodule(int mod)
        case CCMU_EMAC:
                SXISET4(sc, CCMU_AHB_GATING0, CCMU_AHB_GATING_EMAC);
                break;
+       case CCMU_GMAC:
+               SXISET4(sc, CCMU_AHB_GATING1, CCMU_AHB_GATING_GMAC);
+               // assume MII mode
+               SXICMS4(sc, CCMU_GMAC_CLK_REG, 
CCMU_GMAC_CLK_PIT|CCMU_GMAC_CLK_TCS,
+                   CCMU_GMAC_CLK_TCS_MII);
+               // assume RGMII mode - bpi
+               //SXICMS4(sc, CCMU_GMAC_CLK_REG, 
CCMU_GMAC_CLK_PIT|CCMU_GMAC_CLK_TCS,
+               //    CCMU_GMAC_CLK_PIT | CCMU_GMAC_CLK_TCS_INT_RGMII | (0x3 << 
10));
+               break;
        case CCMU_DMA:
                SXISET4(sc, CCMU_AHB_GATING0, CCMU_AHB_GATING_DMA);
                break;
diff --git sys/arch/armv7/sunxi/sxiccmuvar.h sys/arch/armv7/sunxi/sxiccmuvar.h
index cf68e7c..15a07bf 100644
--- sys/arch/armv7/sunxi/sxiccmuvar.h
+++ sys/arch/armv7/sunxi/sxiccmuvar.h
@@ -25,6 +25,7 @@ enum CCMU_MODULES {
        CCMU_OHCI1,
        CCMU_AHCI,
        CCMU_EMAC,
+       CCMU_GMAC,
        CCMU_DMA,
        CCMU_UART0,
        CCMU_UART1,
diff --git sys/arch/armv7/sunxi/sxige.c sys/arch/armv7/sunxi/sxige.c
new file mode 100644
index 0000000..5da0e98
--- /dev/null
+++ sys/arch/armv7/sunxi/sxige.c
@@ -0,0 +1,120 @@
+/*     $OpenBSD$       */
+/*
+ * Copyright (c) 2016 Patrick Wildt <patr...@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+
+#include <machine/bus.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <armv7/armv7/armv7var.h>
+#include <armv7/sunxi/sunxireg.h>
+#include <armv7/sunxi/sxiccmuvar.h>
+#include <armv7/sunxi/sxipiovar.h>
+
+#include <dev/ic/dwc_gmac_var.h>
+#include <dev/ic/dwc_gmac_reg.h>
+
+#define SXIGE_PWRPIN   ((7<<5)+23)
+
+void           sxige_attach(struct device *, struct device *, void *);
+static int     sxige_intr(void *);
+
+struct sxige_softc {
+       struct dwc_gmac_softc    sc_core;
+       void                    *sc_ih;
+};
+
+struct cfattach sxige_ca = {
+       sizeof(struct sxige_softc),
+       NULL,
+       sxige_attach,
+};
+
+void
+sxige_attach(struct device *parent, struct device *self, void *args)
+{
+       struct armv7_attach_args *aa = args;
+       struct sxige_softc *sxisc = (struct sxige_softc *)self;
+       struct dwc_gmac_softc *sc = &sxisc->sc_core;
+       int i = 0;
+
+       sc->sc_bst = aa->aa_iot;
+       sc->sc_dmat = aa->aa_dmat;
+
+       printf("\n");
+
+       if (bus_space_map(sc->sc_bst, aa->aa_dev->mem[0].addr,
+           aa->aa_dev->mem[0].size, 0, &sc->sc_bsh))
+               panic("%s: bus_space_map failed!", __func__);
+
+       /* allocate pins 0-16 */
+       for (i = 0; i <= 16; i++) {
+               sxipio_setcfg((0<<5)+i, 5);
+               sxipio_setdrv((0<<5)+i, 0);
+               //sxipio_setpull((0<<5)+i, 3); -- bpi
+               sxipio_setpull((0<<5)+i, 0);
+       }
+
+       /* enable clock */
+       sxiccmu_enablemodule(CCMU_GMAC);
+       delay(5000);
+
+       /* power up phy */
+       sxipio_setcfg(SXIGE_PWRPIN, SXIPIO_OUTPUT);
+       sxipio_setpin(SXIGE_PWRPIN);
+
+       sxisc->sc_ih = arm_intr_establish(aa->aa_dev->irq[0], IPL_NET,
+           sxige_intr, sc, sc->sc_dev.dv_xname);
+       if (sxisc->sc_ih == NULL) {
+               printf(": unable to establish interrupt\n");
+               goto clrpwr;
+       }
+
+       dwc_gmac_attach(sc, GMAC_MII_CLK_150_250M_DIV102);
+
+       return;
+clrpwr:
+       sxipio_clrpin(SXIGE_PWRPIN);
+       sxiccmu_disablemodule(CCMU_GMAC);
+       bus_space_unmap(sc->sc_bst, sc->sc_bsh, aa->aa_dev->mem[0].size);
+}
+
+static int
+sxige_intr(void *arg)
+{
+       struct dwc_gmac_softc *sc = arg;
+
+       return dwc_gmac_intr(sc);
+}
diff --git sys/arch/armv7/sunxi/sxipio.c sys/arch/armv7/sunxi/sxipio.c
index 9a49343..1f56aac 100644
--- sys/arch/armv7/sunxi/sxipio.c
+++ sys/arch/armv7/sunxi/sxipio.c
@@ -263,7 +263,7 @@ sxipio_setcfg(int pin, int mux)
        bit = pin - (port << 5);
        reg = SXIPIO_CFG(port, bit >> 3);
        off = (bit & 7) << 2;
-       cmask = 7 << off;
+       cmask = 0xf << off;
        mask = mux << off;
 
        s = splhigh();
@@ -273,6 +273,48 @@ sxipio_setcfg(int pin, int mux)
        splx(s);
 }
 
+void
+sxipio_setdrv(int pin, int drv)
+{
+       struct sxipio_softc *sc = sxipio_sc;
+       uint32_t bit, cmask, mask, off, reg, port;
+       int s;
+
+       port = pin >> 5;
+       bit = pin - (port << 5);
+       reg = SXIPIO_DRV(port, bit >> 4);
+       off = (bit & 15) << 1;
+       cmask = 0x3 << off;
+       mask = drv << off;
+
+       s = splhigh();
+
+       SXICMS4(sc, reg, cmask, mask);
+
+       splx(s);
+}
+
+void
+sxipio_setpull(int pin, int pull)
+{
+       struct sxipio_softc *sc = sxipio_sc;
+       uint32_t bit, cmask, mask, off, reg, port;
+       int s;
+
+       port = pin >> 5;
+       bit = pin - (port << 5);
+       reg = SXIPIO_PUL(port, bit >> 4);
+       off = (bit & 15) << 1;
+       cmask = 0x3 << off;
+       mask = pull << off;
+
+       s = splhigh();
+
+       SXICMS4(sc, reg, cmask, mask);
+
+       splx(s);
+}
+
 int
 sxipio_getpin(int pin)
 {
diff --git sys/arch/armv7/sunxi/sxipiovar.h sys/arch/armv7/sunxi/sxipiovar.h
index 275a876..ea4fcbb 100644
--- sys/arch/armv7/sunxi/sxipiovar.h
+++ sys/arch/armv7/sunxi/sxipiovar.h
@@ -33,6 +33,8 @@
 
 int sxipio_getcfg(int);
 void sxipio_setcfg(int, int);
+void sxipio_setdrv(int, int);
+void sxipio_setpull(int, int);
 int sxipio_getpin(int);
 void sxipio_setpin(int);
 void sxipio_clrpin(int);
diff --git sys/conf/files sys/conf/files
index 2f98fa1..8d6b1b8 100644
--- sys/conf/files
+++ sys/conf/files
@@ -317,6 +317,11 @@ file       dev/ic/i82596.c                 ie & (ie_pci | 
ie_eisa | ie_gsc)
 device gem: ether, ifnet, ifmedia, mii
 file   dev/ic/gem.c                    gem
 
+# Synopsis Designware GMAC core, as found on allwinner a20
+# and other SoCs
+device awge: ether, ifnet, ifmedia, mii
+file   dev/ic/dwc_gmac.c               awge
+
 device ti: ether, ifnet, ifmedia, mii, firmload
 file   dev/ic/ti.c                     ti
 
diff --git sys/dev/ic/dwc_gmac.c sys/dev/ic/dwc_gmac.c
new file mode 100644
index 0000000..a4a60a8
--- /dev/null
+++ sys/dev/ic/dwc_gmac.c
@@ -0,0 +1,1486 @@
+/* $OpenBSD$ */
+/* $NetBSD: dwc_gmac.c,v 1.34 2015/08/21 20:12:29 jmcneill Exp $ */
+
+/*-
+ * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry and Martin Husemann.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This driver supports the Synopsis Designware GMAC core, as found
+ * on Allwinner A20 cores and others.
+ *
+ * Real documentation seems to not be available, the marketing product
+ * documents could be found here:
+ *
+ *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
+ */
+
+/*__KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.34 2015/08/21 20:12:29 jmcneill 
Exp $");*/
+
+/* #define     DWC_GMAC_DEBUG  1 */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/evcount.h>
+#include <sys/socket.h>
+#include <sys/timeout.h>
+#include <sys/mbuf.h>
+#include <machine/intr.h>
+#include <machine/bus.h>
+
+#include "bpfilter.h"
+
+#include <net/if.h>
+#include <net/if_media.h>
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/ic/dwc_gmac_reg.h>
+#include <dev/ic/dwc_gmac_var.h>
+
+int dwc_gmac_ifmedia_upd(struct ifnet *);
+void dwc_gmac_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+
+int dwc_gmac_miibus_read_reg(struct device *, int, int);
+void dwc_gmac_miibus_write_reg(struct device *, int, int, int);
+void dwc_gmac_miibus_statchg(struct device *);
+
+int dwc_gmac_reset(struct dwc_gmac_softc *sc);
+void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
+                        uint8_t enaddr[ETHER_ADDR_LEN]);
+int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
+void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
+int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring 
*);
+void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring 
*);
+void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring 
*);
+int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring 
*);
+void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring 
*);
+void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring 
*);
+void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int 
ops);
+int dwc_gmac_init(struct ifnet *ifp);
+void dwc_gmac_stop(struct ifnet *ifp, int disable);
+void dwc_gmac_start(struct ifnet *ifp);
+int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
+int dwc_gmac_ioctl(struct ifnet *, u_long, caddr_t);
+void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
+void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
+void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
+int dwc_gmac_ifflags_cb(struct arpcom *);
+static uint32_t        bitrev32(uint32_t x);
+
+#define        TX_DESC_OFFSET(N)       ((AWGE_RX_RING_COUNT+(N)) \
+                                   *sizeof(struct dwc_gmac_dev_dmadesc))
+#define        TX_NEXT(N)              (((N)+1) & (AWGE_TX_RING_COUNT-1))
+
+#define RX_DESC_OFFSET(N)      ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
+#define        RX_NEXT(N)              (((N)+1) & (AWGE_RX_RING_COUNT-1))
+
+
+
+#define        GMAC_DEF_DMA_INT_MASK   (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
+                               GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
+                               GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
+
+#define        GMAC_DMA_INT_ERRORS     (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
+                               GMAC_DMA_INT_FBE|       \
+                               GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
+                               GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
+                               GMAC_DMA_INT_TJE)
+
+#define        AWIN_DEF_MAC_INTRMASK   \
+       (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |       \
+       AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
+
+
+#ifdef DWC_GMAC_DEBUG
+void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
+void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
+void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
+void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
+void dwc_dump_status(struct dwc_gmac_softc *sc);
+void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
+#endif
+
+struct cfdriver awge_cd = {
+       NULL, "awge", DV_IFNET
+};
+
+void
+dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
+{
+       uint8_t enaddr[ETHER_ADDR_LEN];
+       struct mii_data * const mii = &sc->sc_mii;
+       struct ifnet * const ifp = &sc->sc_ac.ac_if;
+       uint32_t maclo, machi;
+       int s;
+
+       mtx_init(&sc->sc_mdio_lock, IPL_NET);
+       sc->sc_mii_clk = mii_clk & 7;
+
+       /*
+        * If we did not get an externaly configure address,
+        * try to read one from the current filter setup,
+        * before resetting the chip.
+        */
+       maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_MAC_ADDR0LO);
+       machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_MAC_ADDR0HI);
+
+       if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
+               ether_fakeaddr(&sc->sc_ac.ac_if);
+               memcpy(enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
+       } else {
+               enaddr[0] = maclo & 0x0ff;
+               enaddr[1] = (maclo >> 8) & 0x0ff;
+               enaddr[2] = (maclo >> 16) & 0x0ff;
+               enaddr[3] = (maclo >> 24) & 0x0ff;
+               enaddr[4] = machi & 0x0ff;
+               enaddr[5] = (machi >> 8) & 0x0ff;
+       }
+
+       /*
+        * Init chip and do initial setup
+        */
+       if (dwc_gmac_reset(sc) != 0)
+               return; /* not much to cleanup, haven't attached yet */
+       dwc_gmac_write_hwaddr(sc, enaddr);
+       printf("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
+           ether_sprintf(enaddr));
+       memcpy(sc->sc_ac.ac_enaddr, enaddr, ETHER_ADDR_LEN);
+
+       /*
+        * Allocate Tx and Rx rings
+        */
+       if (dwc_gmac_alloc_dma_rings(sc) != 0) {
+               printf("%s: could not allocate DMA rings\n",
+                   sc->sc_dev.dv_xname);
+               goto fail;
+       }
+               
+       if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
+               printf("%s: could not allocate Tx ring\n",
+                   sc->sc_dev.dv_xname);
+               goto fail;
+       }
+
+       mtx_init(&sc->sc_rxq.r_mtx, IPL_NET);
+       if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
+               printf("%s: could not allocate Rx ring\n",
+                   sc->sc_dev.dv_xname);
+               goto fail;
+       }
+
+       /*
+        * Prepare interface data
+        */
+       ifp->if_softc = sc;
+       strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = dwc_gmac_ioctl;
+       ifp->if_start = dwc_gmac_start;
+       ifp->if_capabilities = IFCAP_VLAN_MTU;
+       IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
+       IFQ_SET_READY(&ifp->if_snd);
+
+       /*
+        * Attach MII subdevices
+        */
+       mii->mii_ifp = ifp;
+       mii->mii_readreg = dwc_gmac_miibus_read_reg;
+       mii->mii_writereg = dwc_gmac_miibus_write_reg;
+       mii->mii_statchg = dwc_gmac_miibus_statchg;
+
+       ifmedia_init(&mii->mii_media, 0, dwc_gmac_ifmedia_upd,
+           dwc_gmac_ifmedia_sts);
+       mii_attach((void *)sc, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
+           MIIF_DOPAUSE);
+
+       if (LIST_EMPTY(&mii->mii_phys)) {
+               printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
+               ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
+               ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
+       } else {
+               ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
+       }
+
+       /*
+        * Ready, attach interface
+        */
+       if_attach(ifp);
+       ether_ifattach(ifp);
+
+       /*
+        * Enable interrupts
+        */
+       s = splnet();
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
+           AWIN_DEF_MAC_INTRMASK);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
+           GMAC_DEF_DMA_INT_MASK);
+       splx(s);
+
+       return;
+
+fail:
+       dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
+       dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
+}
+
+int
+dwc_gmac_ifmedia_upd(struct ifnet *ifp)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       struct mii_data *mii = &sc->sc_mii;
+       int err;
+       if (mii->mii_instance) {
+               struct mii_softc *miisc;
+
+               LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
+                   mii_phy_reset(miisc);
+       }
+       err = mii_mediachg(mii);
+       return (err);
+}
+
+void
+dwc_gmac_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       struct mii_data *mii = &sc->sc_mii;
+
+       mii_pollstat(mii);
+
+       ifmr->ifm_active = mii->mii_media_active;
+       ifmr->ifm_status = mii->mii_media_status;
+}
+
+int
+dwc_gmac_reset(struct dwc_gmac_softc *sc)
+{
+       size_t cnt;
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | 
GMAC_BUSMODE_RESET);
+       for (cnt = 0; cnt < 3000; cnt++) {
+               if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, 
AWIN_GMAC_DMA_BUSMODE)
+                   & GMAC_BUSMODE_RESET) == 0)
+                       return 0;
+               delay(10);
+       }
+
+       printf("%s: reset timed out\n", sc->sc_dev.dv_xname);
+       return EIO;
+}
+
+void
+dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
+    uint8_t enaddr[ETHER_ADDR_LEN])
+{
+       uint32_t lo, hi;
+
+       lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
+           | (enaddr[3] << 24);
+       hi = enaddr[4] | (enaddr[5] << 8);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
+}
+
+int
+dwc_gmac_miibus_read_reg(struct device *self, int phy, int reg)
+{
+       struct dwc_gmac_softc * const sc = (struct dwc_gmac_softc *)self;
+       uint16_t mii;
+       size_t cnt;
+       int rv = 0;
+
+       mii = ((phy & GMAC_MII_PHY_MASK) << GMAC_MII_PHY_SHIFT)
+           | ((reg & GMAC_MII_REG_MASK) << GMAC_MII_REG_SHIFT)
+           | ((sc->sc_mii_clk & GMAC_MII_CLKMASK_MASK)
+             << GMAC_MII_CLKMASK_SHIFT)
+           | GMAC_MII_BUSY;
+
+       mtx_enter(&sc->sc_mdio_lock);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
+
+       for (cnt = 0; cnt < 1000; cnt++) {
+               if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+                   AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
+                       rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+                           AWIN_GMAC_MAC_MIIDATA);
+                       break;
+               }
+               delay(10);
+       }
+
+       mtx_leave(&sc->sc_mdio_lock);
+
+       return rv;
+}
+
+void
+dwc_gmac_miibus_write_reg(struct device *self, int phy, int reg, int val)
+{
+       struct dwc_gmac_softc * const sc = (struct dwc_gmac_softc *)self;
+       uint16_t mii;
+       size_t cnt;
+
+       mii = ((phy & GMAC_MII_PHY_MASK) << GMAC_MII_PHY_SHIFT)
+           | ((reg & GMAC_MII_REG_MASK) << GMAC_MII_REG_SHIFT)
+           | ((sc->sc_mii_clk & GMAC_MII_CLKMASK_MASK)
+             << GMAC_MII_CLKMASK_SHIFT)
+           | GMAC_MII_BUSY | GMAC_MII_WRITE;
+
+       mtx_enter(&sc->sc_mdio_lock);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
+
+       for (cnt = 0; cnt < 1000; cnt++) {
+               if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+                   AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
+                       break;
+               delay(10);
+       }
+       
+       mtx_leave(&sc->sc_mdio_lock);
+}
+
+int
+dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
+       struct dwc_gmac_rx_ring *ring)
+{
+       struct dwc_gmac_rx_data *data;
+       bus_addr_t physaddr;
+       const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
+       int error, i, next;
+
+       ring->r_cur = ring->r_next = 0;
+       memset(ring->r_desc, 0, descsize);
+
+       /*
+        * Pre-allocate Rx buffers and populate Rx ring.
+        */
+       for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
+               struct dwc_gmac_dev_dmadesc *desc;
+
+               data = &sc->sc_rxq.r_data[i];
+
+               MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
+               if (data->rd_m == NULL) {
+                       printf("%s: could not allocate rx mbuf #%d\n",
+                           sc->sc_dev.dv_xname, i);
+                       error = ENOMEM;
+                       goto fail;
+               }
+               error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
+                   MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
+               if (error != 0) {
+                       printf("%s: could not create DMA map\n",
+                           sc->sc_dev.dv_xname);
+                       data->rd_map = NULL;
+                       goto fail;
+               }
+               MCLGET(data->rd_m, M_DONTWAIT);
+               if (!(data->rd_m->m_flags & M_EXT)) {
+                       printf("%s: could not allocate mbuf cluster #%d\n",
+                           sc->sc_dev.dv_xname, i);
+                       error = ENOMEM;
+                       goto fail;
+               }
+
+               error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
+                   mtod(data->rd_m, void *), MCLBYTES, NULL,
+                   BUS_DMA_READ | BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("%s: could not load rx buf DMA map #%d",
+                           sc->sc_dev.dv_xname, i);
+                       goto fail;
+               }
+               physaddr = data->rd_map->dm_segs[0].ds_addr;
+
+               desc = &sc->sc_rxq.r_desc[i];
+               desc->ddesc_data = htole32(physaddr);
+               next = RX_NEXT(i);
+               desc->ddesc_next = htole32(ring->r_physaddr 
+                   + next * sizeof(*desc));
+               desc->ddesc_cntl = htole32(
+                   ((AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
+                   << DDESC_CNTL_SIZE1SHIFT) | DDESC_CNTL_RXCHAIN);
+               desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
+           AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
+           BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
+           ring->r_physaddr);
+
+       return 0;
+
+fail:
+       dwc_gmac_free_rx_ring(sc, ring);
+       return error;
+}
+
+void
+dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
+       struct dwc_gmac_rx_ring *ring)
+{
+       struct dwc_gmac_dev_dmadesc *desc;
+       int i;
+
+       for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
+               desc = &sc->sc_rxq.r_desc[i];
+               desc->ddesc_cntl = htole32(
+                   ((AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
+                   << DDESC_CNTL_SIZE1SHIFT) | DDESC_CNTL_RXCHAIN);
+               desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
+           AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
+           BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+       ring->r_cur = ring->r_next = 0;
+       /* reset DMA address to start of ring */
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
+           sc->sc_rxq.r_physaddr);
+}
+
+int
+dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
+{
+       const size_t descsize = AWGE_TOTAL_RING_COUNT *
+               sizeof(struct dwc_gmac_dev_dmadesc);
+       int error, nsegs;
+       caddr_t rings;
+
+       error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
+           BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
+       if (error != 0) {
+               printf("%s: could not create desc DMA map\n", 
sc->sc_dev.dv_xname);
+               sc->sc_dma_ring_map = NULL;
+               goto fail;
+       }
+
+       error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
+           &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
+       if (error != 0) {
+               printf("%s: could not map DMA memory\n", sc->sc_dev.dv_xname);
+               goto fail;
+       }
+
+       error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
+           descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
+       if (error != 0) {
+               printf("%s: could not allocate DMA memory\n", 
sc->sc_dev.dv_xname);
+               goto fail;
+       }
+
+       error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
+           descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
+       if (error != 0) {
+               printf("%s: could not load desc DMA map\n", 
sc->sc_dev.dv_xname);
+               goto fail;
+       }
+
+       /* give first AWGE_RX_RING_COUNT to the RX side */
+       sc->sc_rxq.r_desc = (struct dwc_gmac_dev_dmadesc *)rings;
+       sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
+
+       /* and next rings to the TX side */
+       sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
+       sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr + 
+           AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
+
+       return 0;
+
+fail:
+       dwc_gmac_free_dma_rings(sc);
+       return error;
+}
+
+void
+dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
+{
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
+           sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+       bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
+       bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_rxq.r_desc,
+           AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
+       bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
+}
+
+void
+dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
+{
+       struct dwc_gmac_rx_data *data;
+       int i;
+
+       if (ring->r_desc == NULL)
+               return;
+
+
+       for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
+               data = &ring->r_data[i];
+
+               if (data->rd_map != NULL) {
+                       bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
+                           AWGE_RX_RING_COUNT
+                               *sizeof(struct dwc_gmac_dev_dmadesc),
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(sc->sc_dmat, data->rd_map);
+                       bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
+               }
+               if (data->rd_m != NULL)
+                       m_freem(data->rd_m);
+       }
+}
+
+int
+dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
+       struct dwc_gmac_tx_ring *ring)
+{
+       int i, error = 0;
+
+       ring->t_queued = 0;
+       ring->t_cur = ring->t_next = 0;
+
+       memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+           TX_DESC_OFFSET(0),
+           AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
+           BUS_DMASYNC_POSTWRITE);
+
+       for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
+               error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
+                   AWGE_TX_RING_COUNT, MCLBYTES, 0,
+                   BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
+                   &ring->t_data[i].td_map);
+               if (error != 0) {
+                       printf("%s: could not create TX DMA map #%d\n",
+                           sc->sc_dev.dv_xname, i);
+                       ring->t_data[i].td_map = NULL;
+                       goto fail;
+               }
+               ring->t_desc[i].ddesc_next = htole32(
+                   ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
+                   *TX_NEXT(i));
+       }
+
+       return 0;
+
+fail:
+       dwc_gmac_free_tx_ring(sc, ring);
+       return error;
+}
+
+void
+dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
+{
+       /* 'end' is pointing one descriptor beyound the last we want to sync */
+       if (end > start) {
+               bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+                   TX_DESC_OFFSET(start),
+                   TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
+                   ops);
+               return;
+       }
+       /* sync from 'start' to end of ring */
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+           TX_DESC_OFFSET(start),
+           TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
+           ops);
+       /* sync from start of ring to 'end' */
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+           TX_DESC_OFFSET(0),
+           TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
+           ops);
+}
+
+void
+dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
+       struct dwc_gmac_tx_ring *ring)
+{
+       int i;
+
+       for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
+               struct dwc_gmac_tx_data *data = &ring->t_data[i];
+
+               if (data->td_m != NULL) {
+                       bus_dmamap_sync(sc->sc_dmat, data->td_active,
+                           0, data->td_active->dm_mapsize,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(sc->sc_dmat, data->td_active);
+                       m_freem(data->td_m);
+                       data->td_m = NULL;
+               }
+       }
+
+       bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+           TX_DESC_OFFSET(0),
+           AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
+           BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
+           sc->sc_txq.t_physaddr);
+
+       ring->t_queued = 0;
+       ring->t_cur = ring->t_next = 0;
+}
+
+void
+dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
+       struct dwc_gmac_tx_ring *ring)
+{
+       int i;
+
+       /* unload the maps */
+       for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
+               struct dwc_gmac_tx_data *data = &ring->t_data[i];
+
+               if (data->td_m != NULL) {
+                       bus_dmamap_sync(sc->sc_dmat, data->td_active,
+                           0, data->td_map->dm_mapsize,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(sc->sc_dmat, data->td_active);
+                       m_freem(data->td_m);
+                       data->td_m = NULL;
+               }
+       }
+
+       /* and actually free them */
+       for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
+               struct dwc_gmac_tx_data *data = &ring->t_data[i];
+
+               bus_dmamap_destroy(sc->sc_dmat, data->td_map);
+       }
+}
+
+void
+dwc_gmac_miibus_statchg(struct device *dev)
+{
+       struct dwc_gmac_softc * const sc = (struct dwc_gmac_softc *)dev;
+       struct mii_data * const mii = &sc->sc_mii;
+       uint32_t conf, flow;
+
+       /*
+        * Set MII or GMII interface based on the speed
+        * negotiated by the PHY.                                           
+        */
+       conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
+       conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
+           |AWIN_GMAC_MAC_CONF_FULLDPLX);
+       conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
+           | AWIN_GMAC_MAC_CONF_DISABLERXOWN
+           | AWIN_GMAC_MAC_CONF_DISABLEJABBER
+           | AWIN_GMAC_MAC_CONF_ACS
+           | AWIN_GMAC_MAC_CONF_RXENABLE
+           | AWIN_GMAC_MAC_CONF_TXENABLE;
+       switch (IFM_SUBTYPE(mii->mii_media_active)) {
+       case IFM_10_T:
+               conf |= AWIN_GMAC_MAC_CONF_MIISEL;
+               break;
+       case IFM_100_TX:
+               conf |= AWIN_GMAC_MAC_CONF_FES100 |
+                       AWIN_GMAC_MAC_CONF_MIISEL;
+               break;
+       case IFM_1000_T:
+               break;
+       }
+
+       flow = 0;
+       if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
+               conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
+               flow |= ((0x200 & AWIN_GMAC_MAC_FLOWCTRL_PAUSE_MASK)
+                   << AWIN_GMAC_MAC_FLOWCTRL_PAUSE_SHIFT);
+       }
+       if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
+               flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
+       }
+       if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
+               flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
+       }
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_MAC_FLOWCTRL, flow);
+
+#ifdef DWC_GMAC_DEBUG
+       printf("%s: setting MAC conf register: %08x\n",
+           sc->sc_dev.dv_xname, conf);
+#endif
+
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_MAC_CONF, conf);
+}
+
+int
+dwc_gmac_init(struct ifnet *ifp)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       uint32_t ffilt;
+
+       if (ifp->if_flags & IFF_RUNNING)
+               return 0;
+
+       dwc_gmac_stop(ifp, 0);
+
+       /*
+        * Configure DMA burst/transfer mode and RX/TX priorities.
+        * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
+        */
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
+           GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
+           2 << GMAC_BUSMODE_RPBL_SHIFT |
+           2 << GMAC_BUSMODE_PBL_SHIFT);
+
+       /*
+        * Set up address filter
+        */
+       ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
+       if (ifp->if_flags & IFF_PROMISC) {
+               ffilt |= AWIN_GMAC_MAC_FFILT_PR;
+       } else {
+               ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
+       }
+       if (ifp->if_flags & IFF_BROADCAST) {
+               ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
+       } else {
+               ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
+       }
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
+
+       /*
+        * Set up multicast filter
+        */
+       dwc_gmac_setmulti(sc);
+
+       /*
+        * Set up dma pointer for RX and TX ring
+        */
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
+           sc->sc_rxq.r_physaddr);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
+           sc->sc_txq.t_physaddr);
+
+       /*
+        * Start RX/TX part
+        */
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
+           GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
+
+       ifq_clr_oactive(&ifp->if_snd);
+       ifp->if_flags |= IFF_RUNNING;
+
+       return 0;
+}
+
+void
+dwc_gmac_start(struct ifnet *ifp)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       int old = sc->sc_txq.t_queued;
+       int start = sc->sc_txq.t_cur;
+       struct mbuf *m_head = NULL;
+
+       if (ifq_is_oactive(&ifp->if_snd) || !(ifp->if_flags & IFF_RUNNING))
+               return;
+
+       for (;;) {
+               m_head = ifq_deq_begin(&ifp->if_snd);
+               if (m_head == NULL)
+                       break;
+               if (dwc_gmac_queue(sc, m_head) != 0) {
+                       ifq_deq_rollback(&ifp->if_snd, m_head);
+                       ifq_set_oactive(&ifp->if_snd);
+                       break;
+               }
+
+               ifq_deq_commit(&ifp->if_snd, m_head);
+
+               ifp->if_opackets++;
+
+#if NBPFILTER > 0
+               if (ifp->if_bpf)
+                       bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
+#endif
+
+               if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
+                       ifq_set_oactive(&ifp->if_snd);
+                       break;
+               }
+       }
+
+       if (sc->sc_txq.t_queued != old) {
+               /* packets have been queued, kick it off */
+               dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
+                   BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+               bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+                   AWIN_GMAC_DMA_TXPOLL, ~0U);
+#ifdef DWC_GMAC_DEBUG
+               dwc_dump_status(sc);
+#endif
+       }
+}
+
+void
+dwc_gmac_stop(struct ifnet *ifp, int disable)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+
+       ifp->if_flags &= ~IFF_RUNNING;
+       ifq_clr_oactive(&ifp->if_snd);
+
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_DMA_OPMODE,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+               AWIN_GMAC_DMA_OPMODE)
+               & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_DMA_OPMODE,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+               AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
+
+       mii_down(&sc->sc_mii);
+       dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
+       dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
+}
+
+/*
+ * Add m0 to the TX ring
+ */
+int
+dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
+{
+       struct dwc_gmac_dev_dmadesc *desc = NULL;
+       struct dwc_gmac_tx_data *data = NULL;
+       bus_dmamap_t map;
+       uint32_t flags, len, status;
+       int error, i, first;
+
+#ifdef DWC_GMAC_DEBUG
+       printf("%s: dwc_gmac_queue: adding mbuf chain %p\n",
+           sc->sc_dev.dv_xname, m0);
+#endif
+
+       first = sc->sc_txq.t_cur;
+       map = sc->sc_txq.t_data[first].td_map;
+
+       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
+           BUS_DMA_WRITE|BUS_DMA_NOWAIT);
+       if (error != 0) {
+               printf("%s: could not map mbuf (len: %d, error %d)\n",
+                   sc->sc_dev.dv_xname, m0->m_pkthdr.len, error);
+               return error;
+       }
+
+       if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
+               bus_dmamap_unload(sc->sc_dmat, map);
+               return ENOBUFS;
+       }
+
+       flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
+       status = 0;
+       for (i = 0; i < map->dm_nsegs; i++) {
+               data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
+               desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
+
+               desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
+               len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
+                   << DDESC_CNTL_SIZE1SHIFT;
+
+#ifdef DWC_GMAC_DEBUG
+               printf("%s: enqueing desc #%d data %08lx "
+                   "len %lu (flags: %08x, len: %08x)\n",
+                   sc->sc_dev.dv_xname, sc->sc_txq.t_cur,
+                   (unsigned long)map->dm_segs[i].ds_addr,
+                   (unsigned long)map->dm_segs[i].ds_len,
+                   flags, len);
+#endif
+
+               desc->ddesc_cntl = htole32(len|flags);
+               flags &= ~DDESC_CNTL_TXFIRST;
+
+               /*
+                * Defer passing ownership of the first descriptor
+                * until we are done.
+                */
+               desc->ddesc_status = htole32(status);
+               status |= DDESC_STATUS_OWNEDBYDEV;
+
+               sc->sc_txq.t_queued++;
+               sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
+       }
+
+       desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
+
+       data->td_m = m0;
+       data->td_active = map;
+
+       bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
+           BUS_DMASYNC_PREWRITE);
+
+       /* Pass first to device */
+       sc->sc_txq.t_desc[first].ddesc_status =
+           htole32(DDESC_STATUS_OWNEDBYDEV);
+
+       return 0;
+}
+
+/*
+ * If the interface is up and running, only modify the receive
+ * filter when setting promiscuous or debug mode.  Otherwise fall
+ * through to ether_ioctl, which will reset the chip.
+ */
+int
+dwc_gmac_ifflags_cb(struct arpcom *ac)
+{
+       struct ifnet *ifp = &ac->ac_if;
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       int change = ifp->if_flags ^ sc->sc_if_flags;
+
+       if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
+               return ENETRESET;
+       if ((change & IFF_PROMISC) != 0)
+               dwc_gmac_setmulti(sc);
+       return 0;
+}
+
+int
+dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+       struct dwc_gmac_softc *sc = ifp->if_softc;
+       struct ifreq *ifr = (struct ifreq *)data;
+       int s, error = 0;
+
+       s = splnet();
+
+       switch(cmd) {
+       case SIOCSIFADDR:
+               ifp->if_flags |= IFF_UP;
+               if (!(ifp->if_flags & IFF_RUNNING))
+                       dwc_gmac_init(ifp);
+               break;
+
+       case SIOCSIFFLAGS:
+               if (ifp->if_flags & IFF_UP) {
+                       if (ifp->if_flags & IFF_RUNNING)
+                               error = ENETRESET;
+                       else
+                               dwc_gmac_init(ifp);
+               } else {
+                       if (ifp->if_flags & IFF_RUNNING)
+                               dwc_gmac_stop(ifp, 0);
+               }
+               break;
+
+       case SIOCGIFMEDIA:
+       case SIOCSIFMEDIA:
+               error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
+               break;
+
+       default:
+               error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
+       }
+
+       if (error == ENETRESET) {
+               if (ifp->if_flags & IFF_RUNNING)
+                       dwc_gmac_ifflags_cb(&sc->sc_ac);
+               error = 0;
+       }
+
+       sc->sc_if_flags = sc->sc_ac.ac_if.if_flags;
+       splx(s);
+       return error;
+}
+
+void
+dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
+{
+       struct ifnet *ifp = &sc->sc_ac.ac_if;
+       struct dwc_gmac_tx_data *data;
+       struct dwc_gmac_dev_dmadesc *desc;
+       uint32_t status;
+       int i, nsegs;
+
+       for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
+#ifdef DWC_GMAC_DEBUG
+               printf("%s: dwc_gmac_tx_intr: checking desc #%d (t_queued: 
%d)\n",
+                   sc->sc_dev.dv_xname, i, sc->sc_txq.t_queued);
+#endif
+
+               /*
+                * i+1 does not need to be a valid descriptor,
+                * this is just a special notion to just sync
+                * a single tx descriptor (i)
+                */
+               dwc_gmac_txdesc_sync(sc, i, i+1,
+                   BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+               desc = &sc->sc_txq.t_desc[i];
+               status = le32toh(desc->ddesc_status);
+               if (status & DDESC_STATUS_OWNEDBYDEV)
+                       break;
+
+               data = &sc->sc_txq.t_data[i];
+               if (data->td_m == NULL)
+                       continue;
+
+               ifp->if_opackets++;
+               nsegs = data->td_active->dm_nsegs;
+               bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
+                   data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+               bus_dmamap_unload(sc->sc_dmat, data->td_active);
+
+#ifdef DWC_GMAC_DEBUG
+               printf("%s: dwc_gmac_tx_intr: done with packet at desc #%d, "
+                   "freeing mbuf %p\n", sc->sc_dev.dv_xname, i, data->td_m);
+#endif
+
+               m_freem(data->td_m);
+               data->td_m = NULL;
+
+               sc->sc_txq.t_queued -= nsegs;
+       }
+
+       sc->sc_txq.t_next = i;
+
+       if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
+               ifq_clr_oactive(&ifp->if_snd);
+       }
+}
+
+void
+dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
+{
+       struct ifnet *ifp = &sc->sc_ac.ac_if;
+       struct dwc_gmac_dev_dmadesc *desc;
+       struct dwc_gmac_rx_data *data;
+       bus_addr_t physaddr;
+       uint32_t status;
+       struct mbuf *m, *mnew;
+       int i, len, error;
+       struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+
+       for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
+               bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+                   RX_DESC_OFFSET(i), sizeof(*desc),
+                   BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+               desc = &sc->sc_rxq.r_desc[i];
+               data = &sc->sc_rxq.r_data[i];
+
+               status = le32toh(desc->ddesc_status);
+               if (status & DDESC_STATUS_OWNEDBYDEV)
+                       break;
+
+               if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
+#ifdef DWC_GMAC_DEBUG
+                       printf("%s: RX error: descriptor status %08x, 
skipping\n",
+                           sc->sc_dev.dv_xname, status);
+#endif
+                       ifp->if_ierrors++;
+                       goto skip;
+               }
+
+               len = (status >> DDESC_STATUS_FRMLENSHIFT)
+                   & DDESC_STATUS_FRMLENMSK;
+
+#ifdef DWC_GMAC_DEBUG
+               printf("%s: rx int: device is done with descriptor #%d, len: 
%d\n",
+                   sc->sc_dev.dv_xname, i, len);
+#endif
+
+               /*
+                * Try to get a new mbuf before passing this one
+                * up, if that fails, drop the packet and reuse
+                * the existing one.
+                */
+               MGETHDR(mnew, M_DONTWAIT, MT_DATA);
+               if (mnew == NULL) {
+                       ifp->if_ierrors++;
+                       goto skip;
+               }
+               MCLGET(mnew, M_DONTWAIT);
+               if ((mnew->m_flags & M_EXT) == 0) {
+                       m_freem(mnew);
+                       ifp->if_ierrors++;
+                       goto skip;
+               }
+
+               /* unload old DMA map */
+               bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
+                   data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+               bus_dmamap_unload(sc->sc_dmat, data->rd_map);
+
+               /* and reload with new mbuf */
+               error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
+                   mtod(mnew, void*), MCLBYTES, NULL,
+                   BUS_DMA_READ | BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       m_freem(mnew);
+                       /* try to reload old mbuf */
+                       error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
+                           mtod(data->rd_m, void*), MCLBYTES, NULL,
+                           BUS_DMA_READ | BUS_DMA_NOWAIT);
+                       if (error != 0) {
+                               panic("%s: could not load old rx mbuf",
+                                   sc->sc_dev.dv_xname);
+                       }
+                       ifp->if_ierrors++;
+                       goto skip;
+               }
+               physaddr = data->rd_map->dm_segs[0].ds_addr;
+
+               /*
+                * New mbuf loaded, update RX ring and continue
+                */
+               m = data->rd_m;
+               data->rd_m = mnew;
+               desc->ddesc_data = htole32(physaddr);
+
+               /* finalize mbuf */
+#ifdef __STRICT_ALIGNMENT
+               {
+                       struct mbuf *m0;
+                       m0 = m_devget(mtod(m, caddr_t), len, ETHER_ALIGN);
+                       m_freem(m);
+                       if (m0 == NULL) {
+                               ifp->if_ierrors++;
+                               goto skip;
+                       }
+                       m = m0;
+               }
+#else
+               m->m_pkthdr.len = m->m_len = len;
+#endif
+
+               ml_enqueue(&ml, m);
+
+skip:
+               bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
+                   data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+               desc->ddesc_cntl = htole32(
+                   ((AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
+                   << DDESC_CNTL_SIZE1SHIFT) | DDESC_CNTL_RXCHAIN);
+               desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
+               bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
+                   RX_DESC_OFFSET(i), sizeof(*desc),
+                   BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+       }
+
+       /* update RX pointer */
+       sc->sc_rxq.r_cur = i;
+
+       if_input(ifp, &ml);
+}
+
+/*
+ * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
+ */
+static uint32_t
+bitrev32(uint32_t x)
+{
+       x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
+       x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
+       x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
+       x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
+
+       return (x >> 16) | (x << 16);
+}
+
+void
+dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
+{
+       struct ifnet * const ifp = &sc->sc_ac.ac_if;
+       struct ether_multi *enm;
+       struct ether_multistep step;
+       uint32_t hashes[2] = { 0, 0 };
+       uint32_t ffilt, h;
+       int mcnt, s;
+
+       s = splnet();
+
+       ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
+       
+       if (ifp->if_flags & IFF_PROMISC) {
+               ffilt |= AWIN_GMAC_MAC_FFILT_PR;
+               goto special_filter;
+       }
+
+       ifp->if_flags &= ~IFF_ALLMULTI;
+       ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
+
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
+
+       ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
+       mcnt = 0;
+       while (enm != NULL) {
+               if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
+                   ETHER_ADDR_LEN) != 0) {
+                       ffilt |= AWIN_GMAC_MAC_FFILT_PM;
+                       ifp->if_flags |= IFF_ALLMULTI;
+                       goto special_filter;
+               }
+
+               h = bitrev32(
+                       ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
+                   ) >> 26;
+               hashes[h >> 5] |= (1 << (h & 0x1f));
+
+               mcnt++;
+               ETHER_NEXT_MULTI(step, enm);
+       }
+
+       if (mcnt)
+               ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
+       else
+               ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
+
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
+           hashes[0]);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
+           hashes[1]);
+       sc->sc_if_flags = sc->sc_ac.ac_if.if_flags;
+
+       splx(s);
+
+#ifdef DWC_GMAC_DEBUG
+       dwc_gmac_dump_ffilt(sc, ffilt);
+#endif
+       return;
+
+special_filter:
+#ifdef DWC_GMAC_DEBUG
+       dwc_gmac_dump_ffilt(sc, ffilt);
+#endif
+       /* no MAC hashes, ALLMULTI or PROMISC */
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
+           ffilt);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
+           0xffffffff);
+       bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
+           0xffffffff);
+       sc->sc_if_flags = sc->sc_ac.ac_if.if_flags;
+       splx(s);
+}
+
+int
+dwc_gmac_intr(struct dwc_gmac_softc *sc)
+{
+       uint32_t status, dma_status;
+       int rv = 0;
+
+       status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
+       if (status & AWIN_GMAC_MII_IRQ) {
+               (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+                   AWIN_GMAC_MII_STATUS);
+               rv = 1;
+               mii_pollstat(&sc->sc_mii);
+       }
+
+       dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+           AWIN_GMAC_DMA_STATUS);
+
+       if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
+               rv = 1;
+
+       if (dma_status & GMAC_DMA_INT_TIE)
+               dwc_gmac_tx_intr(sc);
+
+       if (dma_status & GMAC_DMA_INT_RIE)
+               dwc_gmac_rx_intr(sc);
+
+       /*
+        * Check error conditions
+        */
+       if (dma_status & GMAC_DMA_INT_ERRORS) {
+               sc->sc_ac.ac_if.if_oerrors++;
+#ifdef DWC_GMAC_DEBUG
+               dwc_dump_and_abort(sc, "interrupt error condition");
+#endif
+       }
+
+       /* ack interrupt */
+       if (dma_status)
+               bus_space_write_4(sc->sc_bst, sc->sc_bsh,
+                   AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
+
+       /*
+        * Get more packets
+        */
+       if (rv)
+               sc->sc_ac.ac_if.if_start(&sc->sc_ac.ac_if);
+
+       return rv;
+}
+
+#ifdef DWC_GMAC_DEBUG
+void
+dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
+{
+       printf("%s: busmode: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
+       printf("%s: tx poll: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
+       printf("%s: rx poll: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
+       printf("%s: rx descriptors: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
+       printf("%s: tx descriptors: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
+       printf("%s: status: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
+       printf("%s: op mode: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
+       printf("%s: int enable: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
+       printf("%s: cur tx: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, 
AWIN_GMAC_DMA_CUR_TX_DESC));
+       printf("%s: cur rx: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, 
AWIN_GMAC_DMA_CUR_RX_DESC));
+       printf("%s: cur tx buffer: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, 
AWIN_GMAC_DMA_CUR_TX_BUFADDR));
+       printf("%s: cur rx buffer: %08x\n", sc->sc_dev.dv_xname,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, 
AWIN_GMAC_DMA_CUR_RX_BUFADDR));
+}
+
+void
+dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
+{
+       int i;
+
+       printf("%s: TX queue: cur=%d, next=%d, queued=%d\n",
+          sc->sc_dev.dv_xname, sc->sc_txq.t_cur,
+          sc->sc_txq.t_next, sc->sc_txq.t_queued);
+       printf("%s: TX DMA descriptors:\n", sc->sc_dev.dv_xname);
+       for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
+               struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
+               printf("#%d (%08lx): status: %08x cntl: %08x "
+                   "data: %08x next: %08x\n",
+                   i, sc->sc_txq.t_physaddr +
+                       i*sizeof(struct dwc_gmac_dev_dmadesc),
+                   le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
+                   le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
+       }
+}
+
+void
+dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
+{
+       int i;
+
+       printf("%s: RX queue: cur=%d, next=%d\n", sc->sc_dev.dv_xname,
+           sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
+       printf("%s: RX DMA descriptors:\n", sc->sc_dev.dv_xname);
+       for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
+               struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
+               printf("#%d (%08lx): status: %08x cntl: %08x "
+                   "data: %08x next: %08x\n",
+                   i, sc->sc_rxq.r_physaddr +
+                       i*sizeof(struct dwc_gmac_dev_dmadesc),
+                   le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
+                   le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
+       }
+}
+
+void
+dwc_dump_status(struct dwc_gmac_softc *sc)
+{
+       uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+            AWIN_GMAC_MAC_INTR);
+       uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
+            AWIN_GMAC_DMA_STATUS);
+       char buf[200];
+
+       /* print interrupt state */
+       snprintb(buf, sizeof(buf), "\177\20"
+           "b\x10""NI\0"
+           "b\x0f""AI\0"
+           "b\x0e""ER\0"
+           "b\x0d""FB\0"
+           "b\x0a""ET\0"
+           "b\x09""RW\0"
+           "b\x08""RS\0"
+           "b\x07""RU\0"
+           "b\x06""RI\0"
+           "b\x05""UN\0"
+           "b\x04""OV\0"
+           "b\x03""TJ\0"
+           "b\x02""TU\0"
+           "b\x01""TS\0"
+           "b\x00""TI\0"
+           "\0", dma_status);
+       printf("%s: INTR status: %08x, DMA status: %s\n",
+           sc->sc_dev.dv_xname, status, buf);
+}
+
+void
+dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
+{
+       dwc_dump_status(sc);
+       dwc_gmac_dump_ffilt(sc,
+           bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
+       dwc_gmac_dump_dma(sc);
+       dwc_gmac_dump_tx_desc(sc);
+       dwc_gmac_dump_rx_desc(sc);
+
+       panic("%s", msg);
+}
+
+void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
+{
+       char buf[200];
+
+       /* print filter setup */
+       snprintb(buf, sizeof(buf), "\177\20"
+           "b\x1f""RA\0"
+           "b\x0a""HPF\0"
+           "b\x09""SAF\0"
+           "b\x08""SAIF\0"
+           "b\x05""DBF\0"
+           "b\x04""PM\0"
+           "b\x03""DAIF\0"
+           "b\x02""HMC\0"
+           "b\x01""HUC\0"
+           "b\x00""PR\0"
+           "\0", ffilt);
+       printf("%s: FFILT: %s\n", sc->sc_dev.dv_xname, buf);
+}
+#endif
diff --git sys/dev/ic/dwc_gmac_reg.h sys/dev/ic/dwc_gmac_reg.h
new file mode 100644
index 0000000..599c064
--- /dev/null
+++ sys/dev/ic/dwc_gmac_reg.h
@@ -0,0 +1,227 @@
+/* $OpenBSD$ */
+/* $NetBSD: dwc_gmac_reg.h,v 1.15 2015/11/21 16:04:11 martin Exp $ */
+
+/*-
+ * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry and Martin Husemann.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define        AWIN_GMAC_MAC_CONF              0x0000
+#define        AWIN_GMAC_MAC_FFILT             0x0004
+#define        AWIN_GMAC_MAC_HTHIGH            0x0008
+#define        AWIN_GMAC_MAC_HTLOW             0x000c
+#define        AWIN_GMAC_MAC_MIIADDR           0x0010
+#define        AWIN_GMAC_MAC_MIIDATA           0x0014
+#define        AWIN_GMAC_MAC_FLOWCTRL          0x0018
+#define        AWIN_GMAC_MAC_VLANTAG           0x001c
+#define        AWIN_GMAC_MAC_VERSION           0x0020  /* not always 
implemented? */
+#define        AWIN_GMAC_MAC_INTR              0x0038
+#define        AWIN_GMAC_MAC_INTMASK           0x003c
+#define        AWIN_GMAC_MAC_ADDR0HI           0x0040
+#define        AWIN_GMAC_MAC_ADDR0LO           0x0044
+#define        AWIN_GMAC_MII_STATUS            0x00D8
+
+#define        AWIN_GMAC_MAC_CONF_DISABLEJABBER (1 << 22) /* jabber disable */
+#define        AWIN_GMAC_MAC_CONF_FRAMEBURST   (1 << 21) /* allow TX 
frameburst when
+                                                    in half duplex mode */
+#define        AWIN_GMAC_MAC_CONF_MIISEL       (1 << 15) /* select MII phy */
+#define        AWIN_GMAC_MAC_CONF_FES100       (1 << 14) /* 100 mbit mode */
+#define        AWIN_GMAC_MAC_CONF_DISABLERXOWN (1 << 13) /* do not receive our 
own
+                                                    TX frames in half duplex
+                                                    mode */
+#define        AWIN_GMAC_MAC_CONF_FULLDPLX     (1 << 11) /* select full duplex 
*/
+#define        AWIN_GMAC_MAC_CONF_ACS          (1 << 7)  /* auto pad/CRC 
stripping */
+#define        AWIN_GMAC_MAC_CONF_TXENABLE     (1 << 3)  /* enable TX dma 
engine */
+#define        AWIN_GMAC_MAC_CONF_RXENABLE     (1 << 2)  /* enable RX dma 
engine */
+
+#define        AWIN_GMAC_MAC_FFILT_RA          (1U << 31) /* receive all mode 
*/
+#define        AWIN_GMAC_MAC_FFILT_HPF         (1 << 10) /* hash or perfect 
filter */
+#define        AWIN_GMAC_MAC_FFILT_SAF         (1 << 9)  /* source address 
filter */
+#define        AWIN_GMAC_MAC_FFILT_SAIF        (1 << 8)  /* inverse filtering 
*/
+#define        AWIN_GMAC_MAC_FFILT_DBF         (1 << 5)  /* disable broadcast 
frames */
+#define        AWIN_GMAC_MAC_FFILT_PM          (1 << 4)  /* promiscious 
multicast */
+#define        AWIN_GMAC_MAC_FFILT_DAIF        (1 << 3)  /* DA inverse 
filtering */
+#define        AWIN_GMAC_MAC_FFILT_HMC         (1 << 2)  /* multicast hash 
compare */
+#define        AWIN_GMAC_MAC_FFILT_HUC         (1 << 1)  /* unicast hash 
compare */
+#define        AWIN_GMAC_MAC_FFILT_PR          (1 << 0)  /* promiscious mode */
+
+#define        AWIN_GMAC_MAC_INT_LPI           (1 << 10)
+#define        AWIN_GMAC_MAC_INT_TSI           (1 << 9)
+#define        AWIN_GMAC_MAC_INT_ANEG          (1 << 2)
+#define        AWIN_GMAC_MAC_INT_LINKCHG       (1 << 1)
+#define        AWIN_GMAC_MAC_INT_RGSMII        (1 << 0)
+
+#define        AWIN_GMAC_MAC_FLOWCTRL_PAUSE_SHIFT      16
+#define        AWIN_GMAC_MAC_FLOWCTRL_PAUSE_MASK       0xffff
+#define        AWIN_GMAC_MAC_FLOWCTRL_RFE      (1 << 2)
+#define        AWIN_GMAC_MAC_FLOWCTRL_TFE      (1 << 1)
+#define        AWIN_GMAC_MAC_FLOWCTRL_BUSY     (1 << 0)
+
+#define        AWIN_GMAC_DMA_BUSMODE           0x1000
+#define        AWIN_GMAC_DMA_TXPOLL            0x1004
+#define        AWIN_GMAC_DMA_RXPOLL            0x1008
+#define        AWIN_GMAC_DMA_RX_ADDR           0x100c
+#define        AWIN_GMAC_DMA_TX_ADDR           0x1010
+#define        AWIN_GMAC_DMA_STATUS            0x1014
+#define        AWIN_GMAC_DMA_OPMODE            0x1018
+#define        AWIN_GMAC_DMA_INTENABLE         0x101c
+#define        AWIN_GMAC_DMA_CUR_TX_DESC       0x1048
+#define        AWIN_GMAC_DMA_CUR_RX_DESC       0x104c
+#define        AWIN_GMAC_DMA_CUR_TX_BUFADDR    0x1050
+#define        AWIN_GMAC_DMA_CUR_RX_BUFADDR    0x1054
+#define        AWIN_GMAC_DMA_HWFEATURES        0x1058  /* not always 
implemented? */
+
+#define        GMAC_MII_PHY_SHIFT              11
+#define        GMAC_MII_PHY_MASK               0x1f
+#define        GMAC_MII_REG_SHIFT              6
+#define        GMAC_MII_REG_MASK               0x1f
+
+#define        GMAC_MII_BUSY                   (1 << 0)
+#define        GMAC_MII_WRITE                  (1 << 1)
+#define        GMAC_MII_CLK_60_100M_DIV42      0x0
+#define        GMAC_MII_CLK_100_150M_DIV62     0x1
+#define        GMAC_MII_CLK_25_35M_DIV16       0x2
+#define        GMAC_MII_CLK_35_60M_DIV26       0x3
+#define        GMAC_MII_CLK_150_250M_DIV102    0x4
+#define        GMAC_MII_CLK_250_300M_DIV124    0x5
+#define        GMAC_MII_CLK_DIV4               0x8
+#define        GMAC_MII_CLK_DIV6               0x9
+#define        GMAC_MII_CLK_DIV8               0xa
+#define        GMAC_MII_CLK_DIV10              0xb
+#define        GMAC_MII_CLK_DIV12              0xc
+#define        GMAC_MII_CLK_DIV14              0xd
+#define        GMAC_MII_CLK_DIV16              0xe
+#define        GMAC_MII_CLK_DIV18              0xf
+#define        GMAC_MII_CLKMASK_SHIFT          2
+#define        GMAC_MII_CLKMASK_MASK           0xf
+
+#define        GMAC_BUSMODE_4PBL               (1 << 24)
+#define        GMAC_BUSMODE_RPBL_SHIFT         17
+#define        GMAC_BUSMODE_RPBL_MASK          0x3f
+#define        GMAC_BUSMODE_FIXEDBURST         (1 << 16)
+#define        GMAC_BUSMODE_PRIORXTX_SHIFT     14
+#define        GMAC_BUSMODE_PRIORXTX_MASK      0x3
+#define        GMAC_BUSMODE_PRIORXTX_41        3
+#define        GMAC_BUSMODE_PRIORXTX_31        2
+#define        GMAC_BUSMODE_PRIORXTX_21        1
+#define        GMAC_BUSMODE_PRIORXTX_11        0
+#define        GMAC_BUSMODE_PBL_SHIFT          8
+#define        GMAC_BUSMODE_PBL_MASK           0x3f /* possible DMA
+                                               burst len */
+#define        GMAC_BUSMODE_RESET              (1 << 0)
+
+#define        AWIN_GMAC_MII_IRQ               (1 << 0)
+
+
+#define        GMAC_DMA_OP_DISABLECSDROP       (1 << 26) /* disable dropping of
+                                                    frames with TCP/IP
+                                                    checksum errors */
+#define        GMAC_DMA_OP_RXSTOREFORWARD      (1 << 25) /* start RX when a
+                                                   full frame is available */
+#define        GMAC_DMA_OP_DISABLERXFLUSH      (1 << 24) /* Do not drop frames
+                                                    when out of RX descr. */
+#define        GMAC_DMA_OP_TXSTOREFORWARD      (1 << 21) /* start TX when a
+                                                   full frame is available */
+#define        GMAC_DMA_OP_FLUSHTX             (1 << 20) /* flush TX fifo */
+#define        GMAC_DMA_OP_TXSTART             (1 << 13) /* start TX DMA 
engine */
+#define        GMAC_DMA_OP_RXSTART             (1 << 1)  /* start RX DMA 
engine */
+
+#define        GMAC_DMA_INT_NIE                (1 << 16) /* Normal/Summary */
+#define        GMAC_DMA_INT_AIE                (1 << 15) /* Abnormal/Summary */
+#define        GMAC_DMA_INT_ERE                (1 << 14) /* Early receive */
+#define        GMAC_DMA_INT_FBE                (1 << 13) /* Fatal bus error */
+#define        GMAC_DMA_INT_ETE                (1 << 10) /* Early transmit */
+#define        GMAC_DMA_INT_RWE                (1 << 9)  /* Receive watchdog */
+#define        GMAC_DMA_INT_RSE                (1 << 8)  /* Receive stopped */
+#define        GMAC_DMA_INT_RUE                (1 << 7)  /* Receive buffer 
unavail. */
+#define        GMAC_DMA_INT_RIE                (1 << 6)  /* Receive interrupt 
*/
+#define        GMAC_DMA_INT_UNE                (1 << 5)  /* Tx underflow */
+#define        GMAC_DMA_INT_OVE                (1 << 4)  /* Receive overflow */
+#define        GMAC_DMA_INT_TJE                (1 << 3)  /* Transmit jabber */
+#define        GMAC_DMA_INT_TUE                (1 << 2)  /* Transmit buffer 
unavail. */
+#define        GMAC_DMA_INT_TSE                (1 << 1)  /* Transmit stopped */
+#define        GMAC_DMA_INT_TIE                (1 << 0)  /* Transmit interrupt 
*/
+
+#define        GMAC_DMA_INT_MASK               0x1ffff   /* all possible intr 
bits */
+
+struct dwc_gmac_dev_dmadesc {
+       uint32_t ddesc_status;
+/* both: */
+#define        DDESC_STATUS_OWNEDBYDEV         (1U << 31)
+
+/* for RX descriptors */
+#define        DDESC_STATUS_DAFILTERFAIL       (1 << 30)
+#define        DDESC_STATUS_FRMLENMSK          0x3fff
+#define        DDESC_STATUS_FRMLENSHIFT        16
+#define        DDESC_STATUS_RXERROR            (1 << 15)
+#define        DDESC_STATUS_RXTRUNCATED        (1 << 14)
+#define        DDESC_STATUS_SAFILTERFAIL       (1 << 13)
+#define        DDESC_STATUS_RXIPC_GIANTFRAME   (1 << 12)
+#define        DDESC_STATUS_RXDAMAGED          (1 << 11)
+#define        DDESC_STATUS_RXVLANTAG          (1 << 10)
+#define        DDESC_STATUS_RXFIRST            (1 << 9)
+#define        DDESC_STATUS_RXLAST             (1 << 8)
+#define        DDESC_STATUS_RXIPC_GIANT        (1 << 7)
+#define        DDESC_STATUS_RXCOLLISION        (1 << 6)
+#define        DDESC_STATUS_RXFRAMEETHER       (1 << 5)
+#define        DDESC_STATUS_RXWATCHDOG         (1 << 4)
+#define        DDESC_STATUS_RXMIIERROR         (1 << 3)
+#define        DDESC_STATUS_RXDRIBBLING        (1 << 2)
+#define        DDESC_STATUS_RXCRC              (1 << 1)
+
+       uint32_t ddesc_cntl;
+
+/* for TX descriptors */
+#define        DDESC_CNTL_TXINT                (1U << 31)
+#define        DDESC_CNTL_TXLAST               (1 << 30)
+#define        DDESC_CNTL_TXFIRST              (1 << 29)
+#define        DDESC_CNTL_TXCHECKINSCTRL       __BITS(27,28)
+
+#define            DDESC_TXCHECK_DISABLED      0
+#define            DDESC_TXCHECK_IP            1
+#define            DDESC_TXCHECK_IP_NO_PSE     2
+#define            DDESC_TXCHECK_FULL          3
+
+#define        DDESC_CNTL_TXCRCDIS             (1 << 26)
+#define        DDESC_CNTL_TXRINGEND            (1 << 25)
+#define        DDESC_CNTL_TXCHAIN              (1 << 24)
+#define        DDESC_CNTL_TXDISPAD             (1 << 23)
+
+/* for RX descriptors */
+#define        DDESC_CNTL_RXINTDIS             (1U << 31)
+#define        DDESC_CNTL_RXRINGEND            (1 << 25)
+#define        DDESC_CNTL_RXCHAIN              (1 << 24)
+
+/* both */
+#define        DDESC_CNTL_SIZE1MASK            0x3ff
+#define        DDESC_CNTL_SIZE1SHIFT           0
+#define        DDESC_CNTL_SIZE2MASK            0x3ff
+#define        DDESC_CNTL_SIZE2SHIFT           11
+
+       uint32_t ddesc_data;    /* pointer to buffer data */
+       uint32_t ddesc_next;    /* link to next descriptor */
+};
diff --git sys/dev/ic/dwc_gmac_var.h sys/dev/ic/dwc_gmac_var.h
new file mode 100644
index 0000000..5aba017
--- /dev/null
+++ sys/dev/ic/dwc_gmac_var.h
@@ -0,0 +1,95 @@
+/* $OpenBSD */
+/* $NetBSD: dwc_gmac_var.h,v 1.6 2014/11/22 18:31:03 jmcneill Exp $ */
+
+/*-
+ * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry and Martin Husemann.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * We could use 1024 DMA descriptors to fill up an 8k page (each is 16 byte).
+ * However, on TX we probably will not need that many, and on RX we allocate
+ * a full mbuf cluster for each, so secondary memory consumption will grow
+ * rapidly.
+ * So currently we waste half a page of dma memory and consume 512k Byte of
+ * RAM for mbuf clusters.
+ * XXX Maybe fine-tune later, or reconsider unsharing of RX/TX dmamap.
+ */
+#define                AWGE_RX_RING_COUNT      256
+#define                AWGE_TX_RING_COUNT      256
+#define                AWGE_TOTAL_RING_COUNT   \
+                       (AWGE_RX_RING_COUNT + AWGE_TX_RING_COUNT)
+
+#define                AWGE_MAX_PACKET         0x7ff
+
+
+
+struct dwc_gmac_rx_data {
+       bus_dmamap_t    rd_map;
+       struct mbuf     *rd_m;
+};
+
+struct dwc_gmac_tx_data {
+       bus_dmamap_t    td_map;
+       bus_dmamap_t    td_active;
+       struct mbuf     *td_m;
+};
+
+struct dwc_gmac_tx_ring {
+       bus_addr_t                      t_physaddr; /* PA of TX ring start */
+       struct dwc_gmac_dev_dmadesc     *t_desc;    /* VA of TX ring start */
+       struct dwc_gmac_tx_data t_data[AWGE_TX_RING_COUNT];
+       int                             t_cur, t_next, t_queued;
+};
+
+struct dwc_gmac_rx_ring {
+       bus_addr_t                      r_physaddr; /* PA of RX ring start */
+       struct dwc_gmac_dev_dmadesc     *r_desc;    /* VA of RX ring start */
+       struct dwc_gmac_rx_data r_data[AWGE_RX_RING_COUNT];
+       int                             r_cur, r_next;
+       struct mutex                    r_mtx;
+};
+
+struct dwc_gmac_softc {
+       struct device sc_dev;
+       bus_space_tag_t sc_bst;
+       bus_space_handle_t sc_bsh;
+       bus_dma_tag_t sc_dmat;
+       struct arpcom sc_ac;
+       struct mii_data sc_mii;
+       struct mutex sc_mdio_lock;
+       bus_dmamap_t sc_dma_ring_map;           /* common dma memory for RX */
+       bus_dma_segment_t sc_dma_ring_seg;      /* and TX ring */
+       struct dwc_gmac_rx_ring sc_rxq;
+       struct dwc_gmac_tx_ring sc_txq;
+       short sc_if_flags;                      /* shadow of ether flags */
+       uint16_t sc_mii_clk;
+};
+
+void dwc_gmac_attach(struct dwc_gmac_softc*, uint32_t /*mii_clk*/);
+int dwc_gmac_intr(struct dwc_gmac_softc*);

Reply via email to