From: Jennifer Averett <jennifer.aver...@oarcorp.com>

---
 buildset/default.ini                          |    1 +
 freebsd/sys/dev/mii/tiphy.h                   |   57 +
 freebsd/sys/dev/xdma/xdma.c                   |  501 ++++++++
 freebsd/sys/dev/xdma/xdma.h                   |  285 +++++
 freebsd/sys/dev/xdma/xdma_bank.c              |  100 ++
 freebsd/sys/dev/xdma/xdma_mbuf.c              |  151 +++
 freebsd/sys/dev/xdma/xdma_queue.c             |  126 ++
 freebsd/sys/dev/xdma/xdma_sg.c                |  660 ++++++++++
 freebsd/sys/dev/xdma/xdma_sglist.c            |  103 ++
 freebsd/sys/dev/xilinx/axidma.c               |  666 ++++++++++
 freebsd/sys/dev/xilinx/axidma.h               |   96 ++
 freebsd/sys/dev/xilinx/if_xae.c               | 1111 +++++++++++++++++
 freebsd/sys/dev/xilinx/if_xaereg.h            |  122 ++
 freebsd/sys/dev/xilinx/if_xaevar.h            |   80 ++
 .../sys/microblaze/include/machine/in_cksum.h |   82 ++
 freebsd/sys/microblaze/microblaze/in_cksum.c  |  255 ++++
 libbsd.py                                     |   43 +
 rtemsbsd/include/bsp/nexus-devices.h          |   10 +-
 rtemsbsd/include/rtems/bsd/local/xdma_if.h    |  144 +++
 rtemsbsd/local/xdma_if.c                      |   57 +
 rtemsbsd/rtems/rtems-kernel-vmem.c            |   17 +
 21 files changed, 4666 insertions(+), 1 deletion(-)
 create mode 100644 freebsd/sys/dev/mii/tiphy.h
 create mode 100644 freebsd/sys/dev/xdma/xdma.c
 create mode 100644 freebsd/sys/dev/xdma/xdma.h
 create mode 100644 freebsd/sys/dev/xdma/xdma_bank.c
 create mode 100644 freebsd/sys/dev/xdma/xdma_mbuf.c
 create mode 100644 freebsd/sys/dev/xdma/xdma_queue.c
 create mode 100644 freebsd/sys/dev/xdma/xdma_sg.c
 create mode 100644 freebsd/sys/dev/xdma/xdma_sglist.c
 create mode 100644 freebsd/sys/dev/xilinx/axidma.c
 create mode 100644 freebsd/sys/dev/xilinx/axidma.h
 create mode 100644 freebsd/sys/dev/xilinx/if_xae.c
 create mode 100644 freebsd/sys/dev/xilinx/if_xaereg.h
 create mode 100644 freebsd/sys/dev/xilinx/if_xaevar.h
 create mode 100644 freebsd/sys/microblaze/include/machine/in_cksum.h
 create mode 100644 freebsd/sys/microblaze/microblaze/in_cksum.c
 create mode 100644 rtemsbsd/include/rtems/bsd/local/xdma_if.h
 create mode 100644 rtemsbsd/local/xdma_if.c

diff --git a/buildset/default.ini b/buildset/default.ini
index 454cc74e..1113c26f 100644
--- a/buildset/default.ini
+++ b/buildset/default.ini
@@ -23,6 +23,7 @@ dev_nic = on
 dev_nic_broadcomm = on
 dev_nic_dc = on
 dev_nic_e1000 = on
+dev_nic_xilinx = on
 dev_nic_fxp = on
 dev_nic_re = on
 dev_nic_smc = on
diff --git a/freebsd/sys/dev/mii/tiphy.h b/freebsd/sys/dev/mii/tiphy.h
new file mode 100644
index 00000000..d3c35575
--- /dev/null
+++ b/freebsd/sys/dev/mii/tiphy.h
@@ -0,0 +1,57 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Texas Instruments DP83867IR/CR Robust, High Immunity
+ * 10/100/1000 Ethernet Physical Layer Transceiver.
+ */
+
+#ifndef _DEV_MII_TIPHY_H_
+#define _DEV_MII_TIPHY_H_
+
+#define        DP83867_PHYCR                   0x10    /* PHY Control Register 
*/
+#define         PHYCR_SGMII_EN                 (1 << 11)
+#define        DP83867_CFG2                    0x14    /* Configuration 
Register 2 */
+#define         CFG2_SPEED_OPT_10M_EN          (1 << 6) /* Speed Optimization 
*/
+#define         CFG2_SPEED_OPT_ENHANCED_EN     (1 << 8)
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_S   10
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_M   (0x3 << 
CFG2_SPEED_OPT_ATTEMPT_CNT_S)
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_1   (0 << 
CFG2_SPEED_OPT_ATTEMPT_CNT_S)
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_2   (1 << 
CFG2_SPEED_OPT_ATTEMPT_CNT_S)
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_4   (2 << 
CFG2_SPEED_OPT_ATTEMPT_CNT_S)
+#define         CFG2_SPEED_OPT_ATTEMPT_CNT_8   (3 << 
CFG2_SPEED_OPT_ATTEMPT_CNT_S)
+#define         CFG2_INTERRUPT_POLARITY        (1 << 13) /* Int pin is active 
low. */
+#define        DP83867_CFG4                    0x31 /* Configuration Register 
4 */
+
+#endif /* !_DEV_MII_TIPHY_H_ */
diff --git a/freebsd/sys/dev/xdma/xdma.c b/freebsd/sys/dev/xdma/xdma.c
new file mode 100644
index 00000000..1cc9b10c
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma.c
@@ -0,0 +1,501 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016-2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/kobj.h>
+#include <sys/malloc.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+#ifdef __rtems__
+#define IN_XDMA_C
+#endif
+
+#include <dev/xdma/xdma.h>
+
+#include <rtems/bsd/local/xdma_if.h>
+
+/*
+ * Multiple xDMA controllers may work with single DMA device,
+ * so we have global lock for physical channel management.
+ */
+static struct mtx xdma_mtx;
+
+#define        XDMA_LOCK()                     mtx_lock(&xdma_mtx)
+#define        XDMA_UNLOCK()                   mtx_unlock(&xdma_mtx)
+#define        XDMA_ASSERT_LOCKED()            mtx_assert(&xdma_mtx, MA_OWNED)
+
+#define        FDT_REG_CELLS   4
+
+/*
+ * Allocate virtual xDMA channel.
+ */
+xdma_channel_t *
+xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
+{
+       xdma_channel_t *xchan;
+       int ret;
+
+       xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
+       xchan->xdma = xdma;
+       xchan->caps = caps;
+
+       XDMA_LOCK();
+
+       /* Request a real channel from hardware driver. */
+       ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't request hardware channel.\n", __func__);
+               XDMA_UNLOCK();
+               free(xchan, M_XDMA);
+
+               return (NULL);
+       }
+
+       TAILQ_INIT(&xchan->ie_handlers);
+
+       mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
+       mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
+       mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
+       mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
+       mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
+
+       TAILQ_INIT(&xchan->bank);
+       TAILQ_INIT(&xchan->queue_in);
+       TAILQ_INIT(&xchan->queue_out);
+       TAILQ_INIT(&xchan->processing);
+
+       TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
+
+       XDMA_UNLOCK();
+
+       return (xchan);
+}
+
+int
+xdma_channel_free(xdma_channel_t *xchan)
+{
+       xdma_controller_t *xdma;
+       int err;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       XDMA_LOCK();
+
+       /* Free the real DMA channel. */
+       err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
+       if (err != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't free real hw channel.\n", __func__);
+               XDMA_UNLOCK();
+               return (-1);
+       }
+
+       if (xchan->flags & XCHAN_TYPE_SG)
+               xdma_channel_free_sg(xchan);
+
+       xdma_teardown_all_intr(xchan);
+
+       mtx_destroy(&xchan->mtx_lock);
+       mtx_destroy(&xchan->mtx_qin_lock);
+       mtx_destroy(&xchan->mtx_qout_lock);
+       mtx_destroy(&xchan->mtx_bank_lock);
+       mtx_destroy(&xchan->mtx_proc_lock);
+
+       TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
+
+       free(xchan, M_XDMA);
+
+       XDMA_UNLOCK();
+
+       return (0);
+}
+
+int
+xdma_setup_intr(xdma_channel_t *xchan,
+    int (*cb)(void *, xdma_transfer_status_t *),
+    void *arg, void **ihandler)
+{
+       struct xdma_intr_handler *ih;
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       /* Sanity check. */
+       if (cb == NULL) {
+               device_printf(xdma->dev,
+                   "%s: Can't setup interrupt handler.\n",
+                   __func__);
+
+               return (-1);
+       }
+
+       ih = malloc(sizeof(struct xdma_intr_handler),
+           M_XDMA, M_WAITOK | M_ZERO);
+       ih->cb = cb;
+       ih->cb_user = arg;
+
+       XCHAN_LOCK(xchan);
+       TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
+       XCHAN_UNLOCK(xchan);
+
+       if (ihandler != NULL)
+               *ihandler = ih;
+
+       return (0);
+}
+
+int
+xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
+{
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       /* Sanity check. */
+       if (ih == NULL) {
+               device_printf(xdma->dev,
+                   "%s: Can't teardown interrupt.\n", __func__);
+               return (-1);
+       }
+
+       TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+       free(ih, M_XDMA);
+
+       return (0);
+}
+
+int
+xdma_teardown_all_intr(xdma_channel_t *xchan)
+{
+       struct xdma_intr_handler *ih_tmp;
+       struct xdma_intr_handler *ih;
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+               TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+               free(ih, M_XDMA);
+       }
+
+       return (0);
+}
+
+int
+xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
+{
+       xdma_controller_t *xdma;
+       int ret;
+
+       xdma = xchan->xdma;
+
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       XCHAN_LOCK(xchan);
+       ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't request a transfer.\n", __func__);
+               XCHAN_UNLOCK(xchan);
+
+               return (-1);
+       }
+       XCHAN_UNLOCK(xchan);
+
+       return (0);
+}
+
+int
+xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
+{
+       xdma_controller_t *xdma;
+       int ret;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't process command.\n", __func__);
+               return (-1);
+       }
+
+       return (0);
+}
+
+void
+xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
+{
+       struct xdma_intr_handler *ih_tmp;
+       struct xdma_intr_handler *ih;
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
+               if (ih->cb != NULL)
+                       ih->cb(ih->cb_user, status);
+
+       if (xchan->flags & XCHAN_TYPE_SG)
+               xdma_queue_submit(xchan);
+}
+
+#ifdef FDT
+/*
+ * Notify the DMA driver we have machine-dependent data in FDT.
+ */
+static int
+xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
+{
+       uint32_t ret;
+
+       ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
+           cells, ncells, (void **)&xdma->data);
+
+       return (ret);
+}
+
+static int
+xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
+{
+       pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
+       pcell_t *regp;
+       int addr_cells, size_cells;
+       int i, reg_len, ret, tuple_size, tuples;
+       u_long mem_start, mem_size;
+
+       if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
+           &size_cells)) != 0)
+               return (ret);
+
+       if (addr_cells > 2)
+               return (ERANGE);
+
+       tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
+       reg_len = OF_getproplen(memory, "reg");
+       if (reg_len <= 0 || reg_len > sizeof(reg))
+               return (ERANGE);
+
+       if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
+               return (ENXIO);
+
+       tuples = reg_len / tuple_size;
+       regp = (pcell_t *)&reg;
+       for (i = 0; i < tuples; i++) {
+               ret = fdt_data_to_res(regp, addr_cells, size_cells,
+                   &mem_start, &mem_size);
+               if (ret != 0)
+                       return (ret);
+
+               vmem_add(vmem, mem_start, mem_size, 0);
+               regp += addr_cells + size_cells;
+       }
+
+       return (0);
+}
+
+vmem_t *
+xdma_get_memory(device_t dev)
+{
+       phandle_t mem_node, node;
+       pcell_t mem_handle;
+       vmem_t *vmem;
+
+       node = ofw_bus_get_node(dev);
+       if (node <= 0) {
+               device_printf(dev,
+                   "%s called on not ofw based device.\n", __func__);
+               return (NULL);
+       }
+
+       if (!OF_hasprop(node, "memory-region"))
+               return (NULL);
+
+       if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
+           sizeof(mem_handle)) <= 0)
+               return (NULL);
+
+       vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
+           PAGE_SIZE, M_BESTFIT | M_WAITOK);
+       if (vmem == NULL)
+               return (NULL);
+
+       mem_node = OF_node_from_xref(mem_handle);
+       if (xdma_handle_mem_node(vmem, mem_node) != 0) {
+               vmem_destroy(vmem);
+               return (NULL);
+       }
+
+       return (vmem);
+}
+
+void
+xdma_put_memory(vmem_t *vmem)
+{
+
+       vmem_destroy(vmem);
+}
+
+void
+xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
+{
+
+       xchan->vmem = vmem;
+}
+
+/*
+ * Allocate xdma controller.
+ */
+xdma_controller_t *
+xdma_ofw_get(device_t dev, const char *prop)
+{
+       phandle_t node, parent;
+       xdma_controller_t *xdma;
+       device_t dma_dev;
+       pcell_t *cells;
+       int ncells;
+       int error;
+       int ndmas;
+       int idx;
+
+       node = ofw_bus_get_node(dev);
+       if (node <= 0)
+               device_printf(dev,
+                   "%s called on not ofw based device.\n", __func__);
+
+       error = ofw_bus_parse_xref_list_get_length(node,
+           "dmas", "#dma-cells", &ndmas);
+       if (error) {
+               device_printf(dev,
+                   "%s can't get dmas list.\n", __func__);
+               return (NULL);
+       }
+
+       if (ndmas == 0) {
+               device_printf(dev,
+                   "%s dmas list is empty.\n", __func__);
+               return (NULL);
+       }
+
+       error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
+       if (error != 0) {
+               device_printf(dev,
+                   "%s can't find string index.\n", __func__);
+               return (NULL);
+       }
+
+       error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
+           idx, &parent, &ncells, &cells);
+       if (error != 0) {
+               device_printf(dev,
+                   "%s can't get dma device xref.\n", __func__);
+               return (NULL);
+       }
+
+       dma_dev = OF_device_from_xref(parent);
+       if (dma_dev == NULL) {
+               device_printf(dev,
+                   "%s can't get dma device.\n", __func__);
+               return (NULL);
+       }
+
+       xdma = malloc(sizeof(struct xdma_controller),
+           M_XDMA, M_WAITOK | M_ZERO);
+       xdma->dev = dev;
+       xdma->dma_dev = dma_dev;
+
+       TAILQ_INIT(&xdma->channels);
+
+       xdma_ofw_md_data(xdma, cells, ncells);
+       free(cells, M_OFWPROP);
+
+       return (xdma);
+}
+#endif
+
+/*
+ * Free xDMA controller object.
+ */
+int
+xdma_put(xdma_controller_t *xdma)
+{
+
+       XDMA_LOCK();
+
+       /* Ensure no channels allocated. */
+       if (!TAILQ_EMPTY(&xdma->channels)) {
+               device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
+               return (-1);
+       }
+
+       free(xdma->data, M_DEVBUF);
+       free(xdma, M_XDMA);
+
+       XDMA_UNLOCK();
+
+       return (0);
+}
+
+static void
+xdma_init(void)
+{
+
+       mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
+}
+
+SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
diff --git a/freebsd/sys/dev/xdma/xdma.h b/freebsd/sys/dev/xdma/xdma.h
new file mode 100644
index 00000000..39e5b48b
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma.h
@@ -0,0 +1,285 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2016-2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_XDMA_XDMA_H_
+#define _DEV_XDMA_XDMA_H_
+
+#include <sys/proc.h>
+#include <sys/vmem.h>
+#ifdef __rtems__
+#include <dev/ofw/openfirm.h>
+#endif
+
+enum xdma_direction {
+       XDMA_MEM_TO_MEM,
+       XDMA_MEM_TO_DEV,
+       XDMA_DEV_TO_MEM,
+       XDMA_DEV_TO_DEV,
+};
+
+enum xdma_operation_type {
+       XDMA_MEMCPY,
+       XDMA_CYCLIC,
+       XDMA_FIFO,
+       XDMA_SG,
+};
+
+enum xdma_request_type {
+       XR_TYPE_PHYS,
+       XR_TYPE_VIRT,
+       XR_TYPE_MBUF,
+       XR_TYPE_BIO,
+};
+
+enum xdma_command {
+       XDMA_CMD_BEGIN,
+       XDMA_CMD_PAUSE,
+       XDMA_CMD_TERMINATE,
+};
+
+struct xdma_transfer_status {
+       uint32_t        transferred;
+       int             error;
+};
+
+typedef struct xdma_transfer_status xdma_transfer_status_t;
+
+struct xdma_controller {
+       device_t dev;           /* DMA consumer device_t. */
+       device_t dma_dev;       /* A real DMA device_t. */
+       void *data;             /* OFW MD part. */
+       vmem_t *vmem;           /* Bounce memory. */
+
+       /* List of virtual channels allocated. */
+       TAILQ_HEAD(xdma_channel_list, xdma_channel)     channels;
+};
+
+typedef struct xdma_controller xdma_controller_t;
+
+struct xchan_buf {
+       bus_dmamap_t                    map;
+       uint32_t                        nsegs;
+       uint32_t                        nsegs_left;
+       vm_offset_t                     vaddr;
+       vm_offset_t                     paddr;
+       vm_size_t                       size;
+};
+
+struct xdma_request {
+       struct mbuf                     *m;
+       struct bio                      *bp;
+       enum xdma_operation_type        operation;
+       enum xdma_request_type          req_type;
+       enum xdma_direction             direction;
+       bus_addr_t                      src_addr;
+       bus_addr_t                      dst_addr;
+       uint8_t                         src_width;
+       uint8_t                         dst_width;
+       bus_size_t                      block_num;
+       bus_size_t                      block_len;
+       xdma_transfer_status_t          status;
+       void                            *user;
+       TAILQ_ENTRY(xdma_request)       xr_next;
+       struct xchan_buf                buf;
+};
+
+struct xdma_sglist {
+       bus_addr_t                      src_addr;
+       bus_addr_t                      dst_addr;
+       size_t                          len;
+       uint8_t                         src_width;
+       uint8_t                         dst_width;
+       enum xdma_direction             direction;
+       bool                            first;
+       bool                            last;
+};
+
+struct xdma_channel {
+       xdma_controller_t               *xdma;
+       vmem_t                          *vmem;
+
+       uint32_t                        flags;
+#define        XCHAN_BUFS_ALLOCATED            (1 << 0)
+#define        XCHAN_SGLIST_ALLOCATED          (1 << 1)
+#define        XCHAN_CONFIGURED                (1 << 2)
+#define        XCHAN_TYPE_CYCLIC               (1 << 3)
+#define        XCHAN_TYPE_MEMCPY               (1 << 4)
+#define        XCHAN_TYPE_FIFO                 (1 << 5)
+#define        XCHAN_TYPE_SG                   (1 << 6)
+
+       uint32_t                        caps;
+#define        XCHAN_CAP_BUSDMA                (1 << 0)
+#define        XCHAN_CAP_NOSEG                 (1 << 1)
+#define        XCHAN_CAP_NOBUFS                (1 << 2)
+
+       /* A real hardware driver channel. */
+       void                            *chan;
+
+       /* Interrupt handlers. */
+       TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
+       TAILQ_ENTRY(xdma_channel)       xchan_next;
+
+       struct mtx                      mtx_lock;
+       struct mtx                      mtx_qin_lock;
+       struct mtx                      mtx_qout_lock;
+       struct mtx                      mtx_bank_lock;
+       struct mtx                      mtx_proc_lock;
+
+       /* Request queue. */
+       bus_dma_tag_t                   dma_tag_bufs;
+       struct xdma_request             *xr_mem;
+       uint32_t                        xr_num;
+
+       /* Bus dma tag options. */
+       bus_size_t                      maxsegsize;
+       bus_size_t                      maxnsegs;
+       bus_size_t                      alignment;
+       bus_addr_t                      boundary;
+       bus_addr_t                      lowaddr;
+       bus_addr_t                      highaddr;
+
+       struct xdma_sglist              *sg;
+
+       TAILQ_HEAD(, xdma_request)      bank;
+       TAILQ_HEAD(, xdma_request)      queue_in;
+       TAILQ_HEAD(, xdma_request)      queue_out;
+       TAILQ_HEAD(, xdma_request)      processing;
+};
+
+typedef struct xdma_channel xdma_channel_t;
+
+struct xdma_intr_handler {
+       int             (*cb)(void *cb_user, xdma_transfer_status_t *status);
+       void            *cb_user;
+       TAILQ_ENTRY(xdma_intr_handler)  ih_next;
+};
+
+#ifndef __rtems__
+static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+#else
+#ifdef IN_XDMA_C
+MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+#else
+MALLOC_DECLARE(M_XDMA);
+#endif
+#endif
+
+#define        XCHAN_LOCK(xchan)               mtx_lock(&(xchan)->mtx_lock)
+#define        XCHAN_UNLOCK(xchan)             mtx_unlock(&(xchan)->mtx_lock)
+#define        XCHAN_ASSERT_LOCKED(xchan)      \
+    mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
+
+#define        QUEUE_IN_LOCK(xchan)            mtx_lock(&(xchan)->mtx_qin_lock)
+#define        QUEUE_IN_UNLOCK(xchan)          
mtx_unlock(&(xchan)->mtx_qin_lock)
+#define        QUEUE_IN_ASSERT_LOCKED(xchan)   \
+    mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
+
+#define        QUEUE_OUT_LOCK(xchan)           
mtx_lock(&(xchan)->mtx_qout_lock)
+#define        QUEUE_OUT_UNLOCK(xchan)         
mtx_unlock(&(xchan)->mtx_qout_lock)
+#define        QUEUE_OUT_ASSERT_LOCKED(xchan)  \
+    mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
+
+#define        QUEUE_BANK_LOCK(xchan)          
mtx_lock(&(xchan)->mtx_bank_lock)
+#define        QUEUE_BANK_UNLOCK(xchan)        
mtx_unlock(&(xchan)->mtx_bank_lock)
+#define        QUEUE_BANK_ASSERT_LOCKED(xchan) \
+    mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
+
+#define        QUEUE_PROC_LOCK(xchan)          
mtx_lock(&(xchan)->mtx_proc_lock)
+#define        QUEUE_PROC_UNLOCK(xchan)        
mtx_unlock(&(xchan)->mtx_proc_lock)
+#define        QUEUE_PROC_ASSERT_LOCKED(xchan) \
+    mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
+
+#define        XDMA_SGLIST_MAXLEN      2048
+#define        XDMA_MAX_SEG            128
+
+/* xDMA controller ops */
+xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
+int xdma_put(xdma_controller_t *xdma);
+vmem_t * xdma_get_memory(device_t dev);
+void xdma_put_memory(vmem_t *vmem);
+
+/* xDMA channel ops */
+xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
+int xdma_channel_free(xdma_channel_t *);
+int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
+void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
+
+/* SG interface */
+int xdma_prep_sg(xdma_channel_t *, uint32_t,
+    bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
+void xdma_channel_free_sg(xdma_channel_t *xchan);
+int xdma_queue_submit_sg(xdma_channel_t *xchan);
+void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
+
+/* Queue operations */
+int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
+    xdma_transfer_status_t *);
+int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
+    uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
+    xdma_transfer_status_t *status);
+int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
+    uint8_t, uint8_t, enum xdma_direction dir);
+int xdma_dequeue(xdma_channel_t *xchan, void **user,
+    xdma_transfer_status_t *status);
+int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+    uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
+int xdma_queue_submit(xdma_channel_t *xchan);
+
+/* Mbuf operations */
+uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
+uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
+
+/* Channel Control */
+int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
+
+/* Interrupt callback */
+int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
+    xdma_transfer_status_t *), void *arg, void **);
+int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
+int xdma_teardown_all_intr(xdma_channel_t *xchan);
+void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
+
+/* Sglist */
+int xchan_sglist_alloc(xdma_channel_t *xchan);
+void xchan_sglist_free(xdma_channel_t *xchan);
+int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+    uint32_t nsegs, struct xdma_request *xr);
+
+/* Requests bank */
+void xchan_bank_init(xdma_channel_t *xchan);
+int xchan_bank_free(xdma_channel_t *xchan);
+struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
+int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
+
+#endif /* !_DEV_XDMA_XDMA_H_ */
diff --git a/freebsd/sys/dev/xdma/xdma_bank.c b/freebsd/sys/dev/xdma/xdma_bank.c
new file mode 100644
index 00000000..96ddef97
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma_bank.c
@@ -0,0 +1,100 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2018-2019 Ruslan Bukin <b...@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+void
+xchan_bank_init(xdma_channel_t *xchan)
+{
+       struct xdma_request *xr;
+       xdma_controller_t *xdma;
+       int i;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       xchan->xr_mem = malloc(sizeof(struct xdma_request) * xchan->xr_num,
+           M_XDMA, M_WAITOK | M_ZERO);
+
+       for (i = 0; i < xchan->xr_num; i++) {
+               xr = &xchan->xr_mem[i];
+               TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+       }
+}
+
+int
+xchan_bank_free(xdma_channel_t *xchan)
+{
+
+       free(xchan->xr_mem, M_XDMA);
+
+       return (0);
+}
+
+struct xdma_request *
+xchan_bank_get(xdma_channel_t *xchan)
+{
+       struct xdma_request *xr;
+       struct xdma_request *xr_tmp;
+
+       QUEUE_BANK_LOCK(xchan);
+       TAILQ_FOREACH_SAFE(xr, &xchan->bank, xr_next, xr_tmp) {
+               TAILQ_REMOVE(&xchan->bank, xr, xr_next);
+               break;
+       }
+       QUEUE_BANK_UNLOCK(xchan);
+
+       return (xr);
+}
+
+int
+xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+
+       QUEUE_BANK_LOCK(xchan);
+       TAILQ_INSERT_TAIL(&xchan->bank, xr, xr_next);
+       QUEUE_BANK_UNLOCK(xchan);
+
+       return (0);
+}
diff --git a/freebsd/sys/dev/xdma/xdma_mbuf.c b/freebsd/sys/dev/xdma/xdma_mbuf.c
new file mode 100644
index 00000000..cbd32984
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma_mbuf.c
@@ -0,0 +1,151 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2017-2019 Ruslan Bukin <b...@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+int
+xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+    xdma_transfer_status_t *status)
+{
+       struct xdma_request *xr;
+       struct xdma_request *xr_tmp;
+
+       QUEUE_OUT_LOCK(xchan);
+       TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+               TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+               break;
+       }
+       QUEUE_OUT_UNLOCK(xchan);
+
+       if (xr == NULL)
+               return (-1);
+
+       *mp = xr->m;
+       status->error = xr->status.error;
+       status->transferred = xr->status.transferred;
+
+       xchan_bank_put(xchan, xr);
+
+       return (0);
+}
+
+int
+xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **mp,
+    uintptr_t addr, uint8_t src_width, uint8_t dst_width,
+    enum xdma_direction dir)
+{
+       struct xdma_request *xr;
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+
+       xr = xchan_bank_get(xchan);
+       if (xr == NULL)
+               return (-1); /* No space is available yet. */
+
+       xr->direction = dir;
+       xr->m = *mp;
+       xr->req_type = XR_TYPE_MBUF;
+       if (dir == XDMA_MEM_TO_DEV) {
+               xr->dst_addr = addr;
+               xr->src_addr = 0;
+       } else {
+               xr->src_addr = addr;
+               xr->dst_addr = 0;
+       }
+       xr->src_width = src_width;
+       xr->dst_width = dst_width;
+
+       QUEUE_IN_LOCK(xchan);
+       TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+       QUEUE_IN_UNLOCK(xchan);
+
+       return (0);
+}
+
+uint32_t
+xdma_mbuf_chain_count(struct mbuf *m0)
+{
+       struct mbuf *m;
+       uint32_t c;
+
+       c = 0;
+
+       for (m = m0; m != NULL; m = m->m_next)
+               c++;
+
+       return (c);
+}
+
+uint32_t
+xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr)
+{
+       xdma_controller_t *xdma;
+       struct mbuf *m;
+       uint32_t c;
+
+       xdma = xchan->xdma;
+
+       c = xdma_mbuf_chain_count(xr->m);
+       if (c == 1)
+               return (c); /* Nothing to do. */
+
+       if ((m = m_defrag(xr->m, M_NOWAIT)) == NULL) {
+               device_printf(xdma->dma_dev,
+                   "%s: Can't defrag mbuf\n",
+                   __func__);
+               return (c);
+       }
+
+       xr->m = m;
+       c = 1;
+
+       return (c);
+}
diff --git a/freebsd/sys/dev/xdma/xdma_queue.c 
b/freebsd/sys/dev/xdma/xdma_queue.c
new file mode 100644
index 00000000..22d7e2e2
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma_queue.c
@@ -0,0 +1,126 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2018-2019 Ruslan Bukin <b...@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+int
+xdma_dequeue(xdma_channel_t *xchan, void **user,
+    xdma_transfer_status_t *status)
+{
+       struct xdma_request *xr_tmp;
+       struct xdma_request *xr;
+
+       QUEUE_OUT_LOCK(xchan);
+       TAILQ_FOREACH_SAFE(xr, &xchan->queue_out, xr_next, xr_tmp) {
+               TAILQ_REMOVE(&xchan->queue_out, xr, xr_next);
+               break;
+       }
+       QUEUE_OUT_UNLOCK(xchan);
+
+       if (xr == NULL)
+               return (-1);
+
+       *user = xr->user;
+       status->error = xr->status.error;
+       status->transferred = xr->status.transferred;
+
+       xchan_bank_put(xchan, xr);
+
+       return (0);
+}
+
+int
+xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
+    uint8_t src_width, uint8_t dst_width, bus_size_t len,
+    enum xdma_direction dir, void *user)
+{
+       struct xdma_request *xr;
+       xdma_controller_t *xdma;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       xr = xchan_bank_get(xchan);
+       if (xr == NULL)
+               return (-1); /* No space is available. */
+
+       xr->user = user;
+       xr->direction = dir;
+       xr->m = NULL;
+       xr->bp = NULL;
+       xr->block_num = 1;
+       xr->block_len = len;
+       xr->req_type = XR_TYPE_VIRT;
+       xr->src_addr = src;
+       xr->dst_addr = dst;
+       xr->src_width = src_width;
+       xr->dst_width = dst_width;
+
+       QUEUE_IN_LOCK(xchan);
+       TAILQ_INSERT_TAIL(&xchan->queue_in, xr, xr_next);
+       QUEUE_IN_UNLOCK(xchan);
+
+       return (0);
+}
+
+int
+xdma_queue_submit(xdma_channel_t *xchan)
+{
+       xdma_controller_t *xdma;
+       int ret;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       ret = 0;
+
+       XCHAN_LOCK(xchan);
+
+       if (xchan->flags & XCHAN_TYPE_SG)
+               ret = xdma_queue_submit_sg(xchan);
+
+       XCHAN_UNLOCK(xchan);
+
+       return (ret);
+}
diff --git a/freebsd/sys/dev/xdma/xdma_sg.c b/freebsd/sys/dev/xdma/xdma_sg.c
new file mode 100644
index 00000000..fa69a297
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma_sg.c
@@ -0,0 +1,660 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2018-2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/rwlock.h>
+
+#include <machine/bus.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_page.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+#include <rtems/bsd/local/xdma_if.h>
+
+struct seg_load_request {
+       struct bus_dma_segment *seg;
+       uint32_t nsegs;
+       uint32_t error;
+};
+
+static void
+xchan_bufs_free_reserved(xdma_channel_t *xchan)
+{
+       struct xdma_request *xr;
+       vm_size_t size;
+       int i;
+
+       for (i = 0; i < xchan->xr_num; i++) {
+               xr = &xchan->xr_mem[i];
+               size = xr->buf.size;
+#ifndef __rtems__    /* XXX */
+               if (xr->buf.vaddr) {
+                       pmap_kremove_device(xr->buf.vaddr, size);
+                       kva_free(xr->buf.vaddr, size);
+                       xr->buf.vaddr = 0;
+               }
+#endif
+               if (xr->buf.paddr) {
+                       vmem_free(xchan->vmem, xr->buf.paddr, size);
+                       xr->buf.paddr = 0;
+               }
+               xr->buf.size = 0;
+       }
+}
+
+static int
+xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
+{
+       xdma_controller_t *xdma;
+       struct xdma_request *xr;
+       vmem_addr_t addr;
+       vm_size_t size;
+       int i;
+
+       xdma = xchan->xdma;
+
+       if (xchan->vmem == NULL)
+               return (ENOBUFS);
+
+       for (i = 0; i < xchan->xr_num; i++) {
+               xr = &xchan->xr_mem[i];
+               size = round_page(xchan->maxsegsize);
+               if (vmem_alloc(xchan->vmem, size,
+                   M_BESTFIT | M_NOWAIT, &addr)) {
+                       device_printf(xdma->dev,
+                           "%s: Can't allocate memory\n", __func__);
+                       xchan_bufs_free_reserved(xchan);
+                       return (ENOMEM);
+               }
+               
+               xr->buf.size = size;
+               xr->buf.paddr = addr;
+#ifndef __rtems__
+               xr->buf.vaddr = kva_alloc(size);
+#else
+               xr->buf.vaddr = calloc(1,size);
+#endif
+               if (xr->buf.vaddr == 0) {
+                       device_printf(xdma->dev,
+                           "%s: Can't allocate KVA\n", __func__);
+                       xchan_bufs_free_reserved(xchan);
+                       return (ENOMEM);
+               }
+#ifndef __rtems__
+               pmap_kenter_device(xr->buf.vaddr, size, addr);
+#else
+/* XXX */
+#endif
+       }
+
+       return (0);
+}
+
+static int
+xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
+{
+       xdma_controller_t *xdma;
+       struct xdma_request *xr;
+       int err;
+       int i;
+
+       xdma = xchan->xdma;
+
+       /* Create bus_dma tag */
+       err = bus_dma_tag_create(
+           bus_get_dma_tag(xdma->dev), /* Parent tag. */
+           xchan->alignment,           /* alignment */
+           xchan->boundary,            /* boundary */
+           xchan->lowaddr,             /* lowaddr */
+           xchan->highaddr,            /* highaddr */
+           NULL, NULL,                 /* filter, filterarg */
+           xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
+           xchan->maxnsegs,            /* nsegments */
+           xchan->maxsegsize,          /* maxsegsize */
+           0,                          /* flags */
+           NULL, NULL,                 /* lockfunc, lockarg */
+           &xchan->dma_tag_bufs);
+       if (err != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't create bus_dma tag.\n", __func__);
+               return (-1);
+       }
+
+       for (i = 0; i < xchan->xr_num; i++) {
+               xr = &xchan->xr_mem[i];
+               err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
+                   &xr->buf.map);
+               if (err != 0) {
+                       device_printf(xdma->dev,
+                           "%s: Can't create buf DMA map.\n", __func__);
+
+                       /* Cleanup. */
+                       bus_dma_tag_destroy(xchan->dma_tag_bufs);
+
+                       return (-1);
+               }
+       }
+
+       return (0);
+}
+
+static int
+xchan_bufs_alloc(xdma_channel_t *xchan)
+{
+       xdma_controller_t *xdma;
+       int ret;
+
+       xdma = xchan->xdma;
+
+       if (xdma == NULL) {
+               printf("%s: Channel was not allocated properly.\n", __func__);
+               return (-1);
+       }
+
+       if (xchan->caps & XCHAN_CAP_BUSDMA)
+               ret = xchan_bufs_alloc_busdma(xchan);
+       else {
+               ret = xchan_bufs_alloc_reserved(xchan);
+       }
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't allocate bufs.\n", __func__);
+               return (-1);
+       }
+
+       xchan->flags |= XCHAN_BUFS_ALLOCATED;
+
+       return (0);
+}
+
+static int
+xchan_bufs_free(xdma_channel_t *xchan)
+{
+       struct xdma_request *xr;
+       struct xchan_buf *b;
+       int i;
+
+       if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
+               return (-1);
+
+       if (xchan->caps & XCHAN_CAP_BUSDMA) {
+               for (i = 0; i < xchan->xr_num; i++) {
+                       xr = &xchan->xr_mem[i];
+                       b = &xr->buf;
+                       bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
+               }
+               bus_dma_tag_destroy(xchan->dma_tag_bufs);
+       } else
+               xchan_bufs_free_reserved(xchan);
+
+       xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
+
+       return (0);
+}
+
+void
+xdma_channel_free_sg(xdma_channel_t *xchan)
+{
+
+       xchan_bufs_free(xchan);
+       xchan_sglist_free(xchan);
+       xchan_bank_free(xchan);
+}
+
+/*
+ * Prepare xchan for a scatter-gather transfer.
+ * xr_num - xdma requests queue size,
+ * maxsegsize - maximum allowed scatter-gather list element size in bytes
+ */
+int
+xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
+    bus_size_t maxsegsize, bus_size_t maxnsegs,
+    bus_size_t alignment, bus_addr_t boundary,
+    bus_addr_t lowaddr, bus_addr_t highaddr)
+{
+       xdma_controller_t *xdma;
+       int ret;
+
+       xdma = xchan->xdma;
+
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       if (xchan->flags & XCHAN_CONFIGURED) {
+               device_printf(xdma->dev,
+                   "%s: Channel is already configured.\n", __func__);
+               return (-1);
+       }
+
+       xchan->xr_num = xr_num;
+       xchan->maxsegsize = maxsegsize;
+       xchan->maxnsegs = maxnsegs;
+       xchan->alignment = alignment;
+       xchan->boundary = boundary;
+       xchan->lowaddr = lowaddr;
+       xchan->highaddr = highaddr;
+
+       if (xchan->maxnsegs > XDMA_MAX_SEG) {
+               device_printf(xdma->dev, "%s: maxnsegs is too big\n",
+                   __func__);
+               return (-1);
+       }
+
+       xchan_bank_init(xchan);
+
+       /* Allocate sglist. */
+       ret = xchan_sglist_alloc(xchan);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't allocate sglist.\n", __func__);
+               return (-1);
+       }
+
+       /* Allocate buffers if required. */
+       if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
+               ret = xchan_bufs_alloc(xchan);
+               if (ret != 0) {
+                       device_printf(xdma->dev,
+                           "%s: Can't allocate bufs.\n", __func__);
+
+                       /* Cleanup */
+                       xchan_sglist_free(xchan);
+                       xchan_bank_free(xchan);
+
+                       return (-1);
+               }
+       }
+
+       xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
+
+       XCHAN_LOCK(xchan);
+       ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't prepare SG transfer.\n", __func__);
+               XCHAN_UNLOCK(xchan);
+
+               return (-1);
+       }
+       XCHAN_UNLOCK(xchan);
+
+       return (0);
+}
+
+void
+xchan_seg_done(xdma_channel_t *xchan,
+    struct xdma_transfer_status *st)
+{
+       struct xdma_request *xr;
+       xdma_controller_t *xdma;
+       struct xchan_buf *b;
+
+       xdma = xchan->xdma;
+
+       xr = TAILQ_FIRST(&xchan->processing);
+       if (xr == NULL)
+               panic("request not found\n");
+
+       b = &xr->buf;
+
+       atomic_subtract_int(&b->nsegs_left, 1);
+
+       if (b->nsegs_left == 0) {
+               if (xchan->caps & XCHAN_CAP_BUSDMA) {
+                       if (xr->direction == XDMA_MEM_TO_DEV)
+                               bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
+                                   BUS_DMASYNC_POSTWRITE);
+                       else
+                               bus_dmamap_sync(xchan->dma_tag_bufs, b->map, 
+                                   BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
+               } else {
+                       if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
+                           xr->req_type == XR_TYPE_MBUF &&
+                           xr->direction == XDMA_DEV_TO_MEM)
+                               m_copyback(xr->m, 0, st->transferred,
+                                   (void *)xr->buf.vaddr);
+               }
+               xr->status.error = st->error;
+               xr->status.transferred = st->transferred;
+
+               QUEUE_PROC_LOCK(xchan);
+               TAILQ_REMOVE(&xchan->processing, xr, xr_next);
+               QUEUE_PROC_UNLOCK(xchan);
+
+               QUEUE_OUT_LOCK(xchan);
+               TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
+               QUEUE_OUT_UNLOCK(xchan);
+       }
+}
+
+static void
+xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+       struct seg_load_request *slr;
+       struct bus_dma_segment *seg;
+       int i;
+
+       slr = arg;
+       seg = slr->seg;
+
+       if (error != 0) {
+               slr->error = error;
+               return;
+       }
+
+       slr->nsegs = nsegs;
+
+       for (i = 0; i < nsegs; i++) {
+               seg[i].ds_addr = segs[i].ds_addr;
+               seg[i].ds_len = segs[i].ds_len;
+       }
+}
+
+static int
+_xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
+    struct bus_dma_segment *seg)
+{
+       xdma_controller_t *xdma;
+       struct seg_load_request slr;
+       uint32_t nsegs;
+       void *addr;
+       int error;
+
+       xdma = xchan->xdma;
+
+       error = 0;
+       nsegs = 0;
+
+       switch (xr->req_type) {
+       case XR_TYPE_MBUF:
+               error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
+                   xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
+               break;
+#ifndef __rtems__
+       case XR_TYPE_BIO:
+               slr.nsegs = 0;
+               slr.error = 0;
+               slr.seg = seg;
+               error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
+                   xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+               if (slr.error != 0) {
+                       device_printf(xdma->dma_dev,
+                           "%s: bus_dmamap_load failed, err %d\n",
+                           __func__, slr.error);
+                       return (0);
+               }
+               nsegs = slr.nsegs;
+               break;
+#endif   /* XXX */
+       case XR_TYPE_VIRT:
+               switch (xr->direction) {
+               case XDMA_MEM_TO_DEV:
+                       addr = (void *)xr->src_addr;
+                       break;
+               case XDMA_DEV_TO_MEM:
+                       addr = (void *)xr->dst_addr;
+                       break;
+               default:
+                       device_printf(xdma->dma_dev,
+                           "%s: Direction is not supported\n", __func__);
+                       return (0);
+               }
+               slr.nsegs = 0;
+               slr.error = 0;
+               slr.seg = seg;
+               error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
+                   addr, (xr->block_len * xr->block_num),
+                   xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
+               if (slr.error != 0) {
+                       device_printf(xdma->dma_dev,
+                           "%s: bus_dmamap_load failed, err %d\n",
+                           __func__, slr.error);
+                       return (0);
+               }
+               nsegs = slr.nsegs;
+               break;
+       default:
+               break;
+       }
+
+       if (error != 0) {
+               if (error == ENOMEM) {
+                       /*
+                        * Out of memory. Try again later.
+                        * TODO: count errors.
+                        */
+               } else
+                       device_printf(xdma->dma_dev,
+                           "%s: bus_dmamap_load failed with err %d\n",
+                           __func__, error);
+               return (0);
+       }
+
+       if (xr->direction == XDMA_MEM_TO_DEV)
+               bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+                   BUS_DMASYNC_PREWRITE);
+       else
+               bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
+                   BUS_DMASYNC_PREREAD);
+
+       return (nsegs);
+}
+
+static int
+_xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
+    struct bus_dma_segment *seg)
+{
+       xdma_controller_t *xdma;
+       struct mbuf *m;
+       uint32_t nsegs;
+
+       xdma = xchan->xdma;
+
+       m = xr->m;
+
+       nsegs = 1;
+
+       switch (xr->req_type) {
+       case XR_TYPE_MBUF:
+               if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0) {
+                       if (xr->direction == XDMA_MEM_TO_DEV)
+                               m_copydata(m, 0, m->m_pkthdr.len,
+                                   (void *)xr->buf.vaddr);
+                       seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
+               } else
+                       seg[0].ds_addr = mtod(m, bus_addr_t);
+               seg[0].ds_len = m->m_pkthdr.len;
+               break;
+       case XR_TYPE_BIO:
+       case XR_TYPE_VIRT:
+       default:
+               panic("implement me\n");
+       }
+
+       return (nsegs);
+}
+
+static int
+xdma_load_data(xdma_channel_t *xchan,
+    struct xdma_request *xr, struct bus_dma_segment *seg)
+{
+       xdma_controller_t *xdma;
+       int error;
+       int nsegs;
+
+       xdma = xchan->xdma;
+
+       error = 0;
+       nsegs = 0;
+
+       if (xchan->caps & XCHAN_CAP_BUSDMA)
+               nsegs = _xdma_load_data_busdma(xchan, xr, seg);
+       else
+               nsegs = _xdma_load_data(xchan, xr, seg);
+       if (nsegs == 0)
+               return (0); /* Try again later. */
+
+       xr->buf.nsegs = nsegs;
+       xr->buf.nsegs_left = nsegs;
+
+       return (nsegs);
+}
+
+static int
+xdma_process(xdma_channel_t *xchan,
+    struct xdma_sglist *sg)
+{
+       struct bus_dma_segment seg[XDMA_MAX_SEG];
+       struct xdma_request *xr;
+       struct xdma_request *xr_tmp;
+       xdma_controller_t *xdma;
+       uint32_t capacity;
+       uint32_t n;
+       uint32_t c;
+       int nsegs;
+       int ret;
+
+       XCHAN_ASSERT_LOCKED(xchan);
+
+       xdma = xchan->xdma;
+
+       n = 0;
+       c = 0;
+
+       ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't get DMA controller capacity.\n", __func__);
+               return (-1);
+       }
+
+       TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
+               switch (xr->req_type) {
+               case XR_TYPE_MBUF:
+                       if ((xchan->caps & XCHAN_CAP_NOSEG) ||
+                           (c > xchan->maxnsegs))
+                               c = xdma_mbuf_defrag(xchan, xr);
+                       break;
+               case XR_TYPE_BIO:
+               case XR_TYPE_VIRT:
+               default:
+                       c = 1;
+               }
+
+               if (capacity <= (c + n)) {
+                       /*
+                        * No space yet available for the entire
+                        * request in the DMA engine.
+                        */
+                       break;
+               }
+
+               if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
+                       /* Sglist is full. */
+                       break;
+               }
+
+               nsegs = xdma_load_data(xchan, xr, seg);
+               if (nsegs == 0)
+                       break;
+
+               xdma_sglist_add(&sg[n], seg, nsegs, xr);
+               n += nsegs;
+
+               QUEUE_IN_LOCK(xchan);
+               TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
+               QUEUE_IN_UNLOCK(xchan);
+
+               QUEUE_PROC_LOCK(xchan);
+               TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
+               QUEUE_PROC_UNLOCK(xchan);
+       }
+
+       return (n);
+}
+
+int
+xdma_queue_submit_sg(xdma_channel_t *xchan)
+{
+       struct xdma_sglist *sg;
+       xdma_controller_t *xdma;
+       uint32_t sg_n;
+       int ret;
+
+       xdma = xchan->xdma;
+       KASSERT(xdma != NULL, ("xdma is NULL"));
+
+       XCHAN_ASSERT_LOCKED(xchan);
+
+       sg = xchan->sg;
+
+       if ((xchan->caps & XCHAN_CAP_NOBUFS) == 0 &&
+          (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't submit a transfer: no bufs\n",
+                   __func__);
+               return (-1);
+       }
+
+       sg_n = xdma_process(xchan, sg);
+       if (sg_n == 0)
+               return (0); /* Nothing to submit */
+
+       /* Now submit sglist to DMA engine driver. */
+       ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
+       if (ret != 0) {
+               device_printf(xdma->dev,
+                   "%s: Can't submit an sglist.\n", __func__);
+               return (-1);
+       }
+
+       return (0);
+}
diff --git a/freebsd/sys/dev/xdma/xdma_sglist.c 
b/freebsd/sys/dev/xdma/xdma_sglist.c
new file mode 100644
index 00000000..8c3c5ab0
--- /dev/null
+++ b/freebsd/sys/dev/xdma/xdma_sglist.c
@@ -0,0 +1,103 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2017-2018 Ruslan Bukin <b...@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+int
+xchan_sglist_alloc(xdma_channel_t *xchan)
+{
+       uint32_t sz;
+
+       if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+               return (-1);
+
+       sz = (sizeof(struct xdma_sglist) * XDMA_SGLIST_MAXLEN);
+       xchan->sg = malloc(sz, M_XDMA, M_WAITOK | M_ZERO);
+       xchan->flags |= XCHAN_SGLIST_ALLOCATED;
+
+       return (0);
+}
+
+void
+xchan_sglist_free(xdma_channel_t *xchan)
+{
+
+       if (xchan->flags & XCHAN_SGLIST_ALLOCATED)
+               free(xchan->sg, M_XDMA);
+
+       xchan->flags &= ~XCHAN_SGLIST_ALLOCATED;
+}
+
+int
+xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
+    uint32_t nsegs, struct xdma_request *xr)
+{
+       int i;
+
+       if (nsegs == 0)
+               return (-1);
+
+       for (i = 0; i < nsegs; i++) {
+               sg[i].src_width = xr->src_width;
+               sg[i].dst_width = xr->dst_width;
+
+               if (xr->direction == XDMA_MEM_TO_DEV) {
+                       sg[i].src_addr = seg[i].ds_addr;
+                       sg[i].dst_addr = xr->dst_addr;
+               } else {
+                       sg[i].src_addr = xr->src_addr;
+                       sg[i].dst_addr = seg[i].ds_addr;
+               }
+               sg[i].len = seg[i].ds_len;
+               sg[i].direction = xr->direction;
+
+               sg[i].first = 0;
+               sg[i].last = 0;
+       }
+
+       sg[0].first = 1;
+       sg[nsegs - 1].last = 1;
+
+       return (0);
+}
diff --git a/freebsd/sys/dev/xilinx/axidma.c b/freebsd/sys/dev/xilinx/axidma.c
new file mode 100644
index 00000000..00ae2f1d
--- /dev/null
+++ b/freebsd/sys/dev/xilinx/axidma.c
@@ -0,0 +1,666 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Xilinx AXI DMA controller driver. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/local/opt_platform.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_page.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+#include <dev/xilinx/axidma.h>
+
+#include <rtems/bsd/local/xdma_if.h>
+
+#define AXIDMA_DEBUG
+#undef AXIDMA_DEBUG
+
+#ifdef __rtems__
+#include <sys/endian.h>
+#endif
+
+#ifdef AXIDMA_DEBUG
+#define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+#define        AXIDMA_NCHANNELS        2
+#define        AXIDMA_DESCS_NUM        512
+#define        AXIDMA_TX_CHAN          0
+#define        AXIDMA_RX_CHAN          1
+
+extern struct bus_space memmap_bus;
+
+struct axidma_fdt_data {
+       int id;
+};
+
+struct axidma_channel {
+       struct axidma_softc     *sc;
+       xdma_channel_t          *xchan;
+       bool                    used;
+       int                     idx_head;
+       int                     idx_tail;
+
+       struct axidma_desc      **descs;
+       vm_paddr_t              *descs_phys;
+       uint32_t                descs_num;
+
+       vm_size_t               mem_size;
+       vm_offset_t             mem_paddr;
+       vm_offset_t             mem_vaddr;
+
+       uint32_t                descs_used_count;
+};
+
+struct axidma_softc {
+       device_t                dev;
+       struct resource         *res[3];
+       bus_space_tag_t         bst;
+       bus_space_handle_t      bsh;
+       void                    *ih[2];
+       struct axidma_desc      desc;
+       struct axidma_channel   channels[AXIDMA_NCHANNELS];
+};
+
+static struct resource_spec axidma_spec[] = {
+       { SYS_RES_MEMORY,       0,      RF_ACTIVE },
+       { SYS_RES_IRQ,          0,      RF_ACTIVE },
+       { SYS_RES_IRQ,          1,      RF_ACTIVE },
+       { -1, 0 }
+};
+
+#define        HWTYPE_NONE     0
+#define        HWTYPE_STD      1
+
+static struct ofw_compat_data compat_data[] = {
+       { "xlnx,eth-dma",       HWTYPE_STD },
+       { NULL,                 HWTYPE_NONE },
+};
+
+static int axidma_probe(device_t dev);
+static int axidma_attach(device_t dev);
+static int axidma_detach(device_t dev);
+
+static inline uint32_t
+axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
+{
+
+       return ((curidx + 1) % chan->descs_num);
+}
+
+static void
+axidma_intr(struct axidma_softc *sc,
+    struct axidma_channel *chan)
+{
+       xdma_transfer_status_t status;
+       xdma_transfer_status_t st;
+       struct axidma_fdt_data *data;
+       xdma_controller_t *xdma;
+       struct axidma_desc *desc;
+       struct xdma_channel *xchan;
+       uint32_t tot_copied;
+       int pending;
+       int errors;
+
+       xchan = chan->xchan;
+       xdma = xchan->xdma;
+       data = xdma->data;
+
+       pending = READ4(sc, AXI_DMASR(data->id));
+       WRITE4(sc, AXI_DMASR(data->id), pending);
+
+       errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
+                       | DMASR_DMADECOREERR | DMASR_SGINTERR
+                       | DMASR_SGSLVERR | DMASR_SGDECERR));
+
+       dprintf("%s: AXI_DMASR %x\n", __func__,
+           READ4(sc, AXI_DMASR(data->id)));
+       dprintf("%s: AXI_CURDESC %x\n", __func__,
+           READ4(sc, AXI_CURDESC(data->id)));
+       dprintf("%s: AXI_TAILDESC %x\n", __func__,
+           READ4(sc, AXI_TAILDESC(data->id)));
+
+       tot_copied = 0;
+
+       while (chan->idx_tail != chan->idx_head) {
+               desc = chan->descs[chan->idx_tail];
+               if ((desc->status & BD_STATUS_CMPLT) == 0)
+                       break;
+
+               st.error = errors;
+               st.transferred = desc->status & BD_CONTROL_LEN_M;
+               tot_copied += st.transferred;
+               xchan_seg_done(xchan, &st);
+
+               chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
+               atomic_subtract_int(&chan->descs_used_count, 1);
+       }
+
+       /* Finish operation */
+       status.error = errors;
+       status.transferred = tot_copied;
+       xdma_callback(chan->xchan, &status);
+}
+
+static void
+axidma_intr_rx(void *arg)
+{
+       struct axidma_softc *sc;
+       struct axidma_channel *chan;
+
+       dprintf("%s\n", __func__);
+
+       sc = arg;
+       chan = &sc->channels[AXIDMA_RX_CHAN];
+
+       axidma_intr(sc, chan);
+}
+
+static void
+axidma_intr_tx(void *arg)
+{
+       struct axidma_softc *sc;
+       struct axidma_channel *chan;
+
+       dprintf("%s\n", __func__);
+
+       sc = arg;
+       chan = &sc->channels[AXIDMA_TX_CHAN];
+
+       axidma_intr(sc, chan);
+}
+
+static int
+axidma_reset(struct axidma_softc *sc, int chan_id)
+{
+       int timeout;
+
+       WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
+
+       timeout = 100;
+       do {
+               if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
+                       break;
+       } while (timeout--);
+
+       dprintf("timeout %d\n", timeout);
+
+       if (timeout == 0)
+               return (-1);
+
+       dprintf("%s: %d read control after reset: %x\n",
+           __func__, chan_id, READ4(sc, AXI_DMACR(chan_id)));
+
+       return (0);
+}
+
+static int
+axidma_probe(device_t dev)
+{
+       int hwtype;
+
+       if (!ofw_bus_status_okay(dev))
+               return (ENXIO);
+
+       hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+       if (hwtype == HWTYPE_NONE)
+               return (ENXIO);
+
+       device_set_desc(dev, "Xilinx AXI DMA");
+
+       return (BUS_PROBE_DEFAULT);
+}
+
+static int
+axidma_attach(device_t dev)
+{
+       struct axidma_softc *sc;
+       phandle_t xref, node;
+       int err;
+
+       sc = device_get_softc(dev);
+       sc->dev = dev;
+
+       if (bus_alloc_resources(dev, axidma_spec, sc->res)) {
+               device_printf(dev, "could not allocate resources.\n");
+               return (ENXIO);
+       }
+
+       /* CSR memory interface */
+       sc->bst = rman_get_bustag(sc->res[0]);
+       sc->bsh = rman_get_bushandle(sc->res[0]);
+
+       /* Setup interrupt handler */
+       err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
+           NULL, axidma_intr_tx, sc, &sc->ih[0]);
+       if (err) {
+               device_printf(dev, "Unable to alloc interrupt resource.\n");
+               return (ENXIO);
+       }
+
+       /* Setup interrupt handler */
+       err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
+           NULL, axidma_intr_rx, sc, &sc->ih[1]);
+       if (err) {
+               device_printf(dev, "Unable to alloc interrupt resource.\n");
+               return (ENXIO);
+       }
+
+       node = ofw_bus_get_node(dev);
+       xref = OF_xref_from_node(node);
+       OF_device_register_xref(xref, dev);
+
+       return (0);
+}
+
+static int
+axidma_detach(device_t dev)
+{
+       struct axidma_softc *sc;
+
+       sc = device_get_softc(dev);
+
+       bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
+       bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
+       bus_release_resources(dev, axidma_spec, sc->res);
+
+       return (0);
+}
+
+static int
+axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
+{
+       struct xdma_channel *xchan;
+       int nsegments;
+
+       nsegments = chan->descs_num;
+       xchan = chan->xchan;
+
+       free(chan->descs, M_DEVBUF);
+       free(chan->descs_phys, M_DEVBUF);
+#ifndef __rtems__
+       pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
+       kva_free(chan->mem_vaddr, chan->mem_size);
+#endif
+       vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
+
+       return (0);
+}
+
+static int
+axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
+    uint32_t desc_size)
+{
+       struct axidma_channel *chan;
+       int nsegments;
+       int i;
+
+       chan = (struct axidma_channel *)xchan->chan;
+       nsegments = chan->descs_num;
+
+       chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (chan->descs == NULL) {
+               device_printf(sc->dev,
+                   "%s: Can't allocate memory.\n", __func__);
+               return (-1);
+       }
+
+       chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
+           M_DEVBUF, M_NOWAIT | M_ZERO);
+       chan->mem_size = desc_size * nsegments;
+       if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
+           &chan->mem_paddr)) {
+               device_printf(sc->dev, "Failed to allocate memory.\n");
+               return (-1);
+       }
+#ifndef __rtems__
+       chan->mem_vaddr = kva_alloc(chan->mem_size);
+       pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr);
+#else
+       chan->mem_vaddr = calloc(1, chan->mem_size);
+#endif
+
+       device_printf(sc->dev, "Allocated chunk %lx %d\n",
+           chan->mem_paddr, chan->mem_size);
+
+       for (i = 0; i < nsegments; i++) {
+               chan->descs[i] = (struct axidma_desc *)
+                   ((uint64_t)chan->mem_vaddr + desc_size * i);
+               chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
+       }
+
+       return (0);
+}
+
+static int
+axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
+{
+       xdma_controller_t *xdma;
+       struct axidma_fdt_data *data;
+       struct axidma_channel *chan;
+       struct axidma_softc *sc;
+
+       sc = device_get_softc(dev);
+
+       if (xchan->caps & XCHAN_CAP_BUSDMA) {
+               device_printf(sc->dev,
+                   "Error: busdma operation is not implemented.");
+               return (-1);
+       }
+
+       xdma = xchan->xdma;
+       data = xdma->data;
+
+       chan = &sc->channels[data->id];
+       if (chan->used == false) {
+               if (axidma_reset(sc, data->id) != 0)
+                       return (-1);
+               chan->xchan = xchan;
+               xchan->chan = (void *)chan;
+               chan->sc = sc;
+               chan->used = true;
+               chan->idx_head = 0;
+               chan->idx_tail = 0;
+               chan->descs_used_count = 0;
+               chan->descs_num = AXIDMA_DESCS_NUM;
+
+               return (0);
+       }
+
+       return (-1);
+}
+
+static int
+axidma_channel_free(device_t dev, struct xdma_channel *xchan)
+{
+       struct axidma_channel *chan;
+       struct axidma_softc *sc;
+
+       sc = device_get_softc(dev);
+
+       chan = (struct axidma_channel *)xchan->chan;
+
+       axidma_desc_free(sc, chan);
+
+       chan->used = false;
+
+       return (0);
+}
+
+static int
+axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
+    uint32_t *capacity)
+{
+       struct axidma_channel *chan;
+       uint32_t c;
+
+       chan = (struct axidma_channel *)xchan->chan;
+
+       /* At least one descriptor must be left empty. */
+       c = (chan->descs_num - chan->descs_used_count - 1);
+
+       *capacity = c;
+
+       return (0);
+}
+
+static int
+axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
+    struct xdma_sglist *sg, uint32_t sg_n)
+{
+       xdma_controller_t *xdma;
+       struct axidma_fdt_data *data;
+       struct axidma_channel *chan;
+       struct axidma_desc *desc;
+       struct axidma_softc *sc;
+       uint32_t src_addr;
+       uint32_t dst_addr;
+       uint32_t addr;
+       uint32_t len;
+       uint32_t tmp;
+       int i;
+       int tail;
+
+       dprintf("%s: sg_n %d\n", __func__, sg_n);
+
+       sc = device_get_softc(dev);
+
+       chan = (struct axidma_channel *)xchan->chan;
+       xdma = xchan->xdma;
+       data = xdma->data;
+
+       if (sg_n == 0)
+               return (0);
+
+       tail = chan->idx_head;
+
+       tmp = 0;
+
+       for (i = 0; i < sg_n; i++) {
+               src_addr = (uint32_t)sg[i].src_addr;
+               dst_addr = (uint32_t)sg[i].dst_addr;
+               len = (uint32_t)sg[i].len;
+
+               dprintf("%s(%d): src %x dst %x len %d\n", __func__,
+                   data->id, src_addr, dst_addr, len);
+
+               desc = chan->descs[chan->idx_head];
+               if (sg[i].direction == XDMA_MEM_TO_DEV)
+                       desc->phys = src_addr;
+               else
+                       desc->phys = dst_addr;
+               desc->status = 0;
+               desc->control = len;
+               if (sg[i].first == 1)
+                       desc->control |= BD_CONTROL_TXSOF;
+               if (sg[i].last == 1)
+                       desc->control |= BD_CONTROL_TXEOF;
+
+               tmp = chan->idx_head;
+
+               atomic_add_int(&chan->descs_used_count, 1);
+               chan->idx_head = axidma_next_desc(chan, chan->idx_head);
+       }
+
+       dprintf("%s(%d): _curdesc(msb) %x\n", __func__, data->id,
+           READ8(sc, AXI_CURDESC_MSB(data->id)));
+       dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
+           READ8(sc, AXI_CURDESC(data->id)));
+       dprintf("%s(%d): status %x\n", __func__, data->id,
+           READ4(sc, AXI_DMASR(data->id)));
+
+       addr = chan->descs_phys[tmp];
+       WRITE8(sc, AXI_TAILDESC(data->id), addr);
+
+       return (0);
+}
+
+static int
+axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
+{
+       xdma_controller_t *xdma;
+       struct axidma_fdt_data *data;
+       struct axidma_channel *chan;
+       struct axidma_desc *desc;
+       struct axidma_softc *sc;
+       uint32_t addr;
+       uint32_t reg;
+       int ret;
+       int i;
+
+       sc = device_get_softc(dev);
+
+       chan = (struct axidma_channel *)xchan->chan;
+       xdma = xchan->xdma;
+       data = xdma->data;
+
+       dprintf("%s(%d)\n", __func__, data->id);
+
+       ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
+       if (ret != 0) {
+               device_printf(sc->dev,
+                   "%s: Can't allocate descriptors.\n", __func__);
+               return (-1);
+       }
+
+       for (i = 0; i < chan->descs_num; i++) {
+               desc = chan->descs[i];
+               bzero(desc, sizeof(struct axidma_desc));
+
+               if (i == (chan->descs_num - 1))
+                       desc->next = chan->descs_phys[0];
+               else
+                       desc->next = chan->descs_phys[i + 1];
+               desc->status = 0;
+               desc->control = 0;
+#ifndef __rtems__
+               dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
+                   data->id, i, (uint64_t)desc, le32toh(desc->next));
+#else
+               dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
+                   data->id, i, desc, le32toh(desc->next));
+#endif
+       }
+
+       addr = chan->descs_phys[0];
+       WRITE8(sc, AXI_CURDESC(data->id), addr);
+
+       reg = READ4(sc, AXI_DMACR(data->id));
+       reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
+       WRITE4(sc, AXI_DMACR(data->id), reg);
+       reg |= DMACR_RS;
+       WRITE4(sc, AXI_DMACR(data->id), reg);
+#ifdef __rtems__
+       dprintf("%s: %d read control: %x\n",
+           __func__, data->id, READ4(sc, AXI_DMACR(data->id)));
+#endif
+       return (0);
+}
+
+static int
+axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
+{
+       struct axidma_channel *chan;
+       struct axidma_softc *sc;
+
+       sc = device_get_softc(dev);
+
+       chan = (struct axidma_channel *)xchan->chan;
+
+       switch (cmd) {
+       case XDMA_CMD_BEGIN:
+       case XDMA_CMD_TERMINATE:
+       case XDMA_CMD_PAUSE:
+               /* TODO: implement me */
+               return (-1);
+       }
+
+       return (0);
+}
+
+#ifdef FDT
+static int
+axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
+{
+       struct axidma_fdt_data *data;
+
+       if (ncells != 1)
+               return (-1);
+
+       data = malloc(sizeof(struct axidma_fdt_data),
+           M_DEVBUF, (M_WAITOK | M_ZERO));
+       data->id = cells[0];
+
+       *ptr = data;
+
+       return (0);
+}
+#endif
+
+static device_method_t axidma_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe,                 axidma_probe),
+       DEVMETHOD(device_attach,                axidma_attach),
+       DEVMETHOD(device_detach,                axidma_detach),
+
+       /* xDMA Interface */
+       DEVMETHOD(xdma_channel_alloc,           axidma_channel_alloc),
+       DEVMETHOD(xdma_channel_free,            axidma_channel_free),
+       DEVMETHOD(xdma_channel_control,         axidma_channel_control),
+
+       /* xDMA SG Interface */
+       DEVMETHOD(xdma_channel_capacity,        axidma_channel_capacity),
+       DEVMETHOD(xdma_channel_prep_sg,         axidma_channel_prep_sg),
+       DEVMETHOD(xdma_channel_submit_sg,       axidma_channel_submit_sg),
+
+#ifdef FDT
+       DEVMETHOD(xdma_ofw_md_data,             axidma_ofw_md_data),
+#endif
+
+       DEVMETHOD_END
+};
+
+static driver_t axidma_driver = {
+       "axidma",
+       axidma_methods,
+       sizeof(struct axidma_softc),
+};
+
+static devclass_t axidma_devclass;
+
+EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, axidma_devclass, 0, 0,
+    BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
diff --git a/freebsd/sys/dev/xilinx/axidma.h b/freebsd/sys/dev/xilinx/axidma.h
new file mode 100644
index 00000000..8e5d8387
--- /dev/null
+++ b/freebsd/sys/dev/xilinx/axidma.h
@@ -0,0 +1,96 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_XILINX_AXIDMA_H_
+#define _DEV_XILINX_AXIDMA_H_
+
+#define        AXI_DMACR(n)            (0x00 + 0x30 * (n)) /* DMA Control 
register */
+#define         DMACR_RS               (1 << 0) /* Run / Stop. */
+#define         DMACR_RESET            (1 << 2) /* Soft reset the AXI DMA 
core. */
+#define         DMACR_IOC_IRQEN        (1 << 12) /* Interrupt on Complete 
(IOC) Interrupt Enable. */
+#define         DMACR_DLY_IRQEN        (1 << 13) /* Interrupt on Delay Timer 
Interrupt Enable. */
+#define         DMACR_ERR_IRQEN        (1 << 14) /* Interrupt on Error 
Interrupt Enable. */
+#define        AXI_DMASR(n)            (0x04 + 0x30 * (n)) /* DMA Status 
register */
+#define         DMASR_HALTED           (1 << 0)
+#define         DMASR_IDLE             (1 << 1)
+#define         DMASR_SGINCLD          (1 << 3) /* Scatter Gather Enabled */
+#define         DMASR_DMAINTERR        (1 << 4) /* DMA Internal Error. */
+#define         DMASR_DMASLVERR        (1 << 5) /* DMA Slave Error. */
+#define         DMASR_DMADECOREERR     (1 << 6) /* Decode Error. */
+#define         DMASR_SGINTERR         (1 << 8) /* Scatter Gather Internal 
Error. */
+#define         DMASR_SGSLVERR         (1 << 9) /* Scatter Gather Slave Error. 
*/
+#define         DMASR_SGDECERR         (1 << 10) /* Scatter Gather Decode 
Error. */
+#define         DMASR_IOC_IRQ          (1 << 12) /* Interrupt on Complete. */
+#define         DMASR_DLY_IRQ          (1 << 13) /* Interrupt on Delay. */
+#define         DMASR_ERR_IRQ          (1 << 14) /* Interrupt on Error. */
+#define        AXI_CURDESC(n)          (0x08 + 0x30 * (n)) /* Current 
Descriptor Pointer. Lower 32 bits of the address. */
+#define        AXI_CURDESC_MSB(n)      (0x0C + 0x30 * (n)) /* Current 
Descriptor Pointer. Upper 32 bits of address. */
+#define        AXI_TAILDESC(n)         (0x10 + 0x30 * (n)) /* Tail Descriptor 
Pointer. Lower 32 bits. */
+#define        AXI_TAILDESC_MSB(n)     (0x14 + 0x30 * (n)) /* Tail Descriptor 
Pointer. Upper 32 bits of address. */
+#define        AXI_SG_CTL              0x2C /* Scatter/Gather User and Cache */
+
+#define        READ4(_sc, _reg)        \
+       bus_space_read_4(_sc->bst, _sc->bsh, _reg)
+#define        WRITE4(_sc, _reg, _val) \
+       bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
+#define        READ8(_sc, _reg)        \
+       bus_space_read_8(_sc->bst, _sc->bsh, _reg)
+#define        WRITE8(_sc, _reg, _val) \
+       bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
+
+struct axidma_desc {
+       uint32_t next;
+       uint32_t reserved1;
+       uint32_t phys;
+       uint32_t reserved2;
+       uint32_t reserved3;
+       uint32_t reserved4;
+       uint32_t control;
+#define        BD_CONTROL_TXSOF        (1 << 27) /* Start of Frame. */
+#define        BD_CONTROL_TXEOF        (1 << 26) /* End of Frame. */
+#define        BD_CONTROL_LEN_S        0       /* Buffer Length. */
+#define        BD_CONTROL_LEN_M        (0x3ffffff << BD_CONTROL_LEN_S)
+       uint32_t status;
+#define        BD_STATUS_CMPLT         (1 << 31)
+#define        BD_STATUS_TRANSFERRED_S 0
+#define        BD_STATUS_TRANSFERRED_M (0x7fffff << BD_STATUS_TRANSFERRED_S)
+       uint32_t app0;
+       uint32_t app1;
+       uint32_t app2;
+       uint32_t app3;
+       uint32_t app4;
+       uint32_t reserved[3];
+};
+
+#endif /* !_DEV_XILINX_AXIDMA_H_ */
diff --git a/freebsd/sys/dev/xilinx/if_xae.c b/freebsd/sys/dev/xilinx/if_xae.c
new file mode 100644
index 00000000..0582a588
--- /dev/null
+++ b/freebsd/sys/dev/xilinx/if_xae.c
@@ -0,0 +1,1111 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+
+#include <machine/bus.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+#include <dev/mii/tiphy.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/xilinx/if_xaereg.h>
+#include <dev/xilinx/if_xaevar.h>
+
+#include <rtems/bsd/local/miibus_if.h>
+
+#define        READ4(_sc, _reg) \
+       bus_read_4((_sc)->res[0], _reg)
+#define        WRITE4(_sc, _reg, _val) \
+       bus_write_4((_sc)->res[0], _reg, _val)
+
+#define        READ8(_sc, _reg) \
+       bus_read_8((_sc)->res[0], _reg)
+#define        WRITE8(_sc, _reg, _val) \
+       bus_write_8((_sc)->res[0], _reg, _val)
+
+#define        XAE_LOCK(sc)                    mtx_lock(&(sc)->mtx)
+#define        XAE_UNLOCK(sc)                  mtx_unlock(&(sc)->mtx)
+#define        XAE_ASSERT_LOCKED(sc)           mtx_assert(&(sc)->mtx, MA_OWNED)
+#define        XAE_ASSERT_UNLOCKED(sc)         mtx_assert(&(sc)->mtx, 
MA_NOTOWNED)
+
+#define XAE_DEBUG
+#undef XAE_DEBUG
+#ifdef __rtems__
+#define DEBUG_MII 
+#undef DEBUG_MII 
+#endif
+
+#ifdef XAE_DEBUG
+#define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+#define        RX_QUEUE_SIZE           64
+#define        TX_QUEUE_SIZE           64
+#define        NUM_RX_MBUF             16
+#define        BUFRING_SIZE            8192
+#define        MDIO_CLK_DIV_DEFAULT    29
+
+#define        PHY1_RD(sc, _r)         \
+       xae_miibus_read_reg(sc->dev, 1, _r)
+#define        PHY1_WR(sc, _r, _v)     \
+       xae_miibus_write_reg(sc->dev, 1, _r, _v)
+
+#define        PHY_RD(sc, _r)          \
+       xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
+#define        PHY_WR(sc, _r, _v)      \
+       xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
+
+/* Use this macro to access regs > 0x1f */
+#define WRITE_TI_EREG(sc, reg, data) {                                 \
+       PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK);                       \
+       PHY_WR(sc, MII_MMDAADR, reg);                                   \
+       PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI);   \
+       PHY_WR(sc, MII_MMDAADR, data);                                  \
+}
+
+/* Not documented, Xilinx VCU118 workaround */
+#define         CFG4_SGMII_TMR                 0x160 /* bits 8:7 MUST be '10' 
*/
+#define        DP83867_SGMIICTL1               0xD3 /* not documented register 
*/
+#define         SGMIICTL1_SGMII_6W             (1 << 14) /* no idea what it is 
*/
+
+static struct resource_spec xae_spec[] = {
+       { SYS_RES_MEMORY,       0,      RF_ACTIVE },
+       { SYS_RES_IRQ,          0,      RF_ACTIVE },
+       { -1, 0 }
+};
+
+static void xae_stop_locked(struct xae_softc *sc);
+static void xae_setup_rxfilter(struct xae_softc *sc);
+
+static int
+xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
+{
+       struct mbuf *m;
+       int i;
+
+       for (i = 0; i < n; i++) {
+               m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+               if (m == NULL) {
+                       device_printf(sc->dev,
+                           "%s: Can't alloc rx mbuf\n", __func__);
+                       return (-1);
+               }
+
+               m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
+               xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
+       }
+
+       return (0);
+}
+
+static int
+xae_get_phyaddr(phandle_t node, int *phy_addr)
+{
+       phandle_t phy_node;
+       pcell_t phy_handle, phy_reg;
+
+       if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
+           sizeof(phy_handle)) <= 0)
+               return (ENXIO);
+
+       phy_node = OF_node_from_xref(phy_handle);
+
+       if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
+           sizeof(phy_reg)) <= 0)
+               return (ENXIO);
+
+       *phy_addr = phy_reg;
+
+       return (0);
+}
+
+static int
+xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
+{
+       xdma_transfer_status_t st;
+       struct xae_softc *sc;
+       struct ifnet *ifp;
+       struct mbuf *m;
+       int err;
+
+#ifdef __rtems__       
+        dprintf("%s\n", __func__);
+#endif
+       sc = arg;
+
+       XAE_LOCK(sc);
+
+       ifp = sc->ifp;
+
+       for (;;) {
+               err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
+               if (err != 0) {
+                       break;
+               }
+
+               if (st.error != 0) {
+                       if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+               }
+
+               m_freem(m);
+       }
+
+       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+       XAE_UNLOCK(sc);
+
+       return (0);
+}
+
+static int
+xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
+{
+       xdma_transfer_status_t st;
+       struct xae_softc *sc;
+       struct ifnet *ifp;
+       struct mbuf *m;
+       int err;
+       uint32_t cnt_processed;
+
+       sc = arg;
+
+       dprintf("%s\n", __func__);
+
+       XAE_LOCK(sc);
+
+       ifp = sc->ifp;
+
+       cnt_processed = 0;
+       for (;;) {
+               err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
+               if (err != 0) {
+                       break;
+               }
+               cnt_processed++;
+
+               if (st.error != 0) {
+                       if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+                       m_freem(m);
+                       continue;
+               }
+
+               m->m_pkthdr.len = m->m_len = st.transferred;
+               m->m_pkthdr.rcvif = ifp;
+               XAE_UNLOCK(sc);
+               (*ifp->if_input)(ifp, m);
+               XAE_LOCK(sc);
+       }
+
+       xae_rx_enqueue(sc, cnt_processed);
+
+       XAE_UNLOCK(sc);
+
+       return (0);
+}
+
+static void
+xae_qflush(struct ifnet *ifp)
+{
+       struct xae_softc *sc;
+
+       sc = ifp->if_softc;
+}
+
+static int
+xae_transmit_locked(struct ifnet *ifp)
+{
+       struct xae_softc *sc;
+       struct mbuf *m;
+       struct buf_ring *br;
+       int error;
+       int enq;
+
+       dprintf("%s\n", __func__);
+
+       sc = ifp->if_softc;
+       br = sc->br;
+
+       enq = 0;
+
+       while ((m = drbr_peek(ifp, br)) != NULL) {
+               error = xdma_enqueue_mbuf(sc->xchan_tx,
+                   &m, 0, 4, 4, XDMA_MEM_TO_DEV);
+               if (error != 0) {
+                       /* No space in request queue available yet. */
+                       drbr_putback(ifp, br, m);
+                       break;
+               }
+
+               drbr_advance(ifp, br);
+
+               enq++;
+
+               /* If anyone is interested give them a copy. */
+               ETHER_BPF_MTAP(ifp, m);
+        }
+
+       if (enq > 0)
+               xdma_queue_submit(sc->xchan_tx);
+
+       return (0);
+}
+
+static int
+xae_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+       struct xae_softc *sc;
+       int error;
+
+       dprintf("%s\n", __func__);
+
+       sc = ifp->if_softc;
+
+       XAE_LOCK(sc);
+
+       error = drbr_enqueue(ifp, sc->br, m);
+       if (error) {
+               XAE_UNLOCK(sc);
+               return (error);
+       }
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING) {
+               XAE_UNLOCK(sc);
+               return (0);
+       }
+
+       if (!sc->link_is_up) {
+               XAE_UNLOCK(sc);
+               return (0);
+       }
+
+       error = xae_transmit_locked(ifp);
+
+       XAE_UNLOCK(sc);
+
+       return (error);
+}
+
+static void
+xae_stop_locked(struct xae_softc *sc)
+{
+       struct ifnet *ifp;
+       uint32_t reg;
+
+       XAE_ASSERT_LOCKED(sc);
+
+       ifp = sc->ifp;
+       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       callout_stop(&sc->xae_callout);
+
+       /* Stop the transmitter */
+       reg = READ4(sc, XAE_TC);
+       reg &= ~TC_TX;
+       WRITE4(sc, XAE_TC, reg);
+
+       /* Stop the receiver. */
+       reg = READ4(sc, XAE_RCW1);
+       reg &= ~RCW1_RX;
+       WRITE4(sc, XAE_RCW1, reg);
+}
+
+static uint64_t
+xae_stat(struct xae_softc *sc, int counter_id)
+{
+       uint64_t new, old;
+       uint64_t delta;
+
+       KASSERT(counter_id < XAE_MAX_COUNTERS,
+               ("counter %d is out of range", counter_id));
+
+       new = READ8(sc, XAE_STATCNT(counter_id));
+       old = sc->counters[counter_id];
+
+       if (new >= old)
+               delta = new - old;
+       else
+               delta = UINT64_MAX - old + new;
+       sc->counters[counter_id] = new;
+       return (delta);
+}
+
+static void
+xae_harvest_stats(struct xae_softc *sc)
+{
+       struct ifnet *ifp;
+
+       ifp = sc->ifp;
+
+       if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
+       if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
+       if_inc_counter(ifp, IFCOUNTER_IERRORS,
+           xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
+           xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
+           xae_stat(sc, RX_ALIGNMENT_ERRORS));
+
+       if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
+       if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
+       if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
+       if_inc_counter(ifp, IFCOUNTER_OERRORS,
+           xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
+
+       if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
+           xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
+           xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
+           xae_stat(sc, TX_LATE_COLLISIONS) +
+           xae_stat(sc, TX_EXCESS_COLLISIONS));
+}
+
+static void
+xae_tick(void *arg)
+{
+       struct xae_softc *sc;
+       struct ifnet *ifp;
+       int link_was_up;
+
+       sc = arg;
+
+       XAE_ASSERT_LOCKED(sc);
+
+       ifp = sc->ifp;
+
+       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+               return;
+
+       /* Gather stats from hardware counters. */
+       xae_harvest_stats(sc);
+
+       /* Check the media status. */
+       link_was_up = sc->link_is_up;
+       mii_tick(sc->mii_softc);
+       if (sc->link_is_up && !link_was_up)
+               xae_transmit_locked(sc->ifp);
+
+       /* Schedule another check one second from now. */
+       callout_reset(&sc->xae_callout, hz, xae_tick, sc);
+}
+
+static void
+xae_init_locked(struct xae_softc *sc)
+{
+       struct ifnet *ifp;
+
+       XAE_ASSERT_LOCKED(sc);
+
+       ifp = sc->ifp;
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+               return;
+
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+       xae_setup_rxfilter(sc);
+
+       /* Enable the transmitter */
+       WRITE4(sc, XAE_TC, TC_TX);
+
+       /* Enable the receiver. */
+       WRITE4(sc, XAE_RCW1, RCW1_RX);
+
+       /*
+        * Call mii_mediachg() which will call back into xae_miibus_statchg()
+        * to set up the remaining config registers based on current media.
+        */
+       mii_mediachg(sc->mii_softc);
+       callout_reset(&sc->xae_callout, hz, xae_tick, sc);
+}
+
+static void
+xae_init(void *arg)
+{
+       struct xae_softc *sc;
+
+       sc = arg;
+
+       XAE_LOCK(sc);
+       xae_init_locked(sc);
+       XAE_UNLOCK(sc);
+}
+
+static void
+xae_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
+{
+       struct xae_softc *sc;
+       struct mii_data *mii;
+
+       sc = ifp->if_softc;
+       mii = sc->mii_softc;
+//
+       XAE_LOCK(sc);
+       mii_pollstat(mii);
+       ifmr->ifm_active = mii->mii_media_active;
+       ifmr->ifm_status = mii->mii_media_status;
+       XAE_UNLOCK(sc);
+}
+
+static int
+xae_media_change_locked(struct xae_softc *sc)
+{
+
+       return (mii_mediachg(sc->mii_softc));
+}
+
+static int
+xae_media_change(struct ifnet * ifp)
+{
+       struct xae_softc *sc;
+       int error;
+
+       sc = ifp->if_softc;
+
+       XAE_LOCK(sc);
+       error = xae_media_change_locked(sc);
+       XAE_UNLOCK(sc);
+
+       return (error);
+}
+
+static void
+xae_setup_rxfilter(struct xae_softc *sc)
+{
+       struct ifmultiaddr *ifma;
+       struct ifnet *ifp;
+       uint32_t reg;
+       uint8_t *ma;
+       int i;
+
+#ifdef __rtems__       
+        dprintf("%s\n", __func__);
+#endif
+       XAE_ASSERT_LOCKED(sc);
+
+       ifp = sc->ifp;
+
+       /*
+        * Set the multicast (group) filter hash.
+        */
+       if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
+               reg = READ4(sc, XAE_FFC);
+               reg |= FFC_PM;
+               WRITE4(sc, XAE_FFC, reg);
+       } else {
+               reg = READ4(sc, XAE_FFC);
+               reg &= ~FFC_PM;
+               WRITE4(sc, XAE_FFC, reg);
+
+               if_maddr_rlock(ifp);
+
+               i = 0;
+               CK_STAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
+                       if (ifma->ifma_addr->sa_family != AF_LINK)
+                               continue;
+
+                       if (i >= XAE_MULTICAST_TABLE_SIZE)
+                               break;
+
+                       ma = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+
+                       reg = READ4(sc, XAE_FFC) & 0xffffff00;
+                       reg |= i++;
+                       WRITE4(sc, XAE_FFC, reg);
+
+                       reg = (ma[0]);
+                       reg |= (ma[1] << 8);
+                       reg |= (ma[2] << 16);
+                       reg |= (ma[3] << 24);
+                       WRITE4(sc, XAE_FFV(0), reg);
+
+                       reg = ma[4];
+                       reg |= ma[5] << 8;
+                       WRITE4(sc, XAE_FFV(1), reg);
+               }
+               if_maddr_runlock(ifp);
+       }
+
+       /*
+        * Set the primary address.
+        */
+       reg = sc->macaddr[0];
+       reg |= (sc->macaddr[1] << 8);
+       reg |= (sc->macaddr[2] << 16);
+       reg |= (sc->macaddr[3] << 24);
+       WRITE4(sc, XAE_UAW0, reg);
+
+       reg = sc->macaddr[4];
+       reg |= (sc->macaddr[5] << 8);
+       WRITE4(sc, XAE_UAW1, reg);
+}
+
+static int
+xae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+       struct xae_softc *sc;
+       struct mii_data *mii;
+       struct ifreq *ifr;
+       int mask, error;
+
+       sc = ifp->if_softc;
+       ifr = (struct ifreq *)data;
+
+       error = 0;
+       switch (cmd) {
+       case SIOCSIFFLAGS:
+               XAE_LOCK(sc);
+               if (ifp->if_flags & IFF_UP) {
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                               if ((ifp->if_flags ^ sc->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI))
+                                       xae_setup_rxfilter(sc);
+                       } else {
+                               if (!sc->is_detaching)
+                                       xae_init_locked(sc);
+                       }
+               } else {
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+                               xae_stop_locked(sc);
+               }
+               sc->if_flags = ifp->if_flags;
+               XAE_UNLOCK(sc);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       XAE_LOCK(sc);
+                       xae_setup_rxfilter(sc);
+                       XAE_UNLOCK(sc);
+               }
+               break;
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               mii = sc->mii_softc;
+               error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+               break;
+       case SIOCSIFCAP:
+               mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+               if (mask & IFCAP_VLAN_MTU) {
+                       /* No work to do except acknowledge the change took */
+                       ifp->if_capenable ^= IFCAP_VLAN_MTU;
+               }
+               break;
+
+       default:
+               error = ether_ioctl(ifp, cmd, data);
+               break;
+       }
+
+       return (error);
+}
+
+static void
+xae_intr(void *arg)
+{
+
+}
+
+static int
+xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
+{
+       phandle_t node;
+       int len;
+
+       node = ofw_bus_get_node(sc->dev);
+
+       /* Check if there is property */
+       if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
+               return (EINVAL);
+
+       if (len != ETHER_ADDR_LEN)
+               return (EINVAL);
+
+       OF_getprop(node, "local-mac-address", hwaddr,
+           ETHER_ADDR_LEN);
+
+       return (0);
+}
+
+static int
+mdio_wait(struct xae_softc *sc)
+{
+       uint32_t reg;
+       int timeout;
+
+       timeout = 200;
+
+       do {
+               reg = READ4(sc, XAE_MDIO_CTRL);
+               if (reg & MDIO_CTRL_READY)
+                       break;
+               DELAY(1);
+       } while (timeout--);
+
+       if (timeout <= 0) {
+               printf("Failed to get MDIO ready\n");
+               return (1);
+       }
+
+       return (0);
+}
+
+static int
+xae_miibus_read_reg(device_t dev, int phy, int reg)
+{
+       struct xae_softc *sc;
+       uint32_t mii;
+       int rv;
+
+       sc = device_get_softc(dev);
+
+       if (mdio_wait(sc))
+               return (0);
+
+       mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
+       mii |= (reg << MDIO_TX_REGAD_S);
+       mii |= (phy << MDIO_TX_PHYAD_S);
+
+       WRITE4(sc, XAE_MDIO_CTRL, mii);
+
+       if (mdio_wait(sc))
+               return (0);
+
+       rv = READ4(sc, XAE_MDIO_READ);
+
+#if defined(__rtems__) && defined(DEBUG_MII)
+       dprintf("%s: reg: %d phy %d val 0x%x\n", __func__, reg, phy, rv);
+#endif
+       return (rv);
+}
+
+static int
+xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
+{
+       struct xae_softc *sc;
+       uint32_t mii;
+
+       sc = device_get_softc(dev);
+
+       if (mdio_wait(sc))
+               return (1);
+
+       mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
+       mii |= (reg << MDIO_TX_REGAD_S);
+       mii |= (phy << MDIO_TX_PHYAD_S);
+
+       WRITE4(sc, XAE_MDIO_WRITE, val);
+       WRITE4(sc, XAE_MDIO_CTRL, mii);
+
+       if (mdio_wait(sc))
+               return (1);
+
+#if defined(__rtems__) && defined(DEBUG_MII)
+       dprintf("%s: reg: %d phy %d val 0x%x\n", __func__, reg, phy, val);
+#endif
+       return (0);
+}
+
+static void
+xae_phy_fixup(struct xae_softc *sc)
+{
+       uint32_t reg;
+       device_t dev;
+
+       dev = sc->dev;
+
+       do {
+               WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
+               PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
+
+               reg = PHY_RD(sc, DP83867_CFG2);
+               reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
+               reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
+               reg |= CFG2_INTERRUPT_POLARITY;
+               reg |= CFG2_SPEED_OPT_ENHANCED_EN;
+               reg |= CFG2_SPEED_OPT_10M_EN;
+               PHY_WR(sc, DP83867_CFG2, reg);
+
+               WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
+               PHY_WR(sc, MII_BMCR,
+                   BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
+       } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
+
+       do {
+               PHY1_WR(sc, MII_BMCR,
+                   BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
+               DELAY(40000);
+       } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
+}
+
+static int
+setup_xdma(struct xae_softc *sc)
+{
+       device_t dev;
+       vmem_t *vmem;
+       int error;
+
+       dev = sc->dev;
+       /* Get xDMA controller */   
+       sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
+       if (sc->xdma_tx == NULL) {
+               device_printf(dev, "Could not find DMA controller.\n");
+               return (ENXIO);
+       }
+
+       sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
+       if (sc->xdma_rx == NULL) {
+               device_printf(dev, "Could not find DMA controller.\n");
+               return (ENXIO);
+       }
+       /* Alloc xDMA TX virtual channel. */
+       sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
+       if (sc->xchan_tx == NULL) {
+               device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
+               return (ENXIO);
+       }
+
+       /* Setup interrupt handler. */
+       error = xdma_setup_intr(sc->xchan_tx,
+           xae_xdma_tx_intr, sc, &sc->ih_tx);
+       if (error) {
+               device_printf(sc->dev,
+                   "Can't setup xDMA TX interrupt handler.\n");
+               return (ENXIO);
+       }
+
+       /* Alloc xDMA RX virtual channel. */
+       sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
+       if (sc->xchan_rx == NULL) {
+               device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
+               return (ENXIO);
+       }
+
+       /* Setup interrupt handler. */
+       error = xdma_setup_intr(sc->xchan_rx,
+           xae_xdma_rx_intr, sc, &sc->ih_rx);
+       if (error) {
+               device_printf(sc->dev,
+                   "Can't setup xDMA RX interrupt handler.\n");
+               return (ENXIO);
+       }
+
+       /* Setup bounce buffer */
+       vmem = xdma_get_memory(dev);
+       if (vmem) {
+               xchan_set_memory(sc->xchan_tx, vmem);
+               xchan_set_memory(sc->xchan_rx, vmem);
+       }
+
+       xdma_prep_sg(sc->xchan_tx,
+           TX_QUEUE_SIZE,      /* xchan requests queue size */
+           MCLBYTES,   /* maxsegsize */
+           8,          /* maxnsegs */
+           16,         /* alignment */
+           0,          /* boundary */
+           BUS_SPACE_MAXADDR_32BIT,
+           BUS_SPACE_MAXADDR);
+
+       xdma_prep_sg(sc->xchan_rx,
+           RX_QUEUE_SIZE,      /* xchan requests queue size */
+           MCLBYTES,   /* maxsegsize */
+           1,          /* maxnsegs */
+           16,         /* alignment */
+           0,          /* boundary */
+           BUS_SPACE_MAXADDR_32BIT,
+           BUS_SPACE_MAXADDR);
+
+       return (0);
+}
+
+static int
+xae_probe(device_t dev)
+{
+       if (!ofw_bus_status_okay(dev))
+               return (ENXIO);
+       if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
+               return (ENXIO);
+       device_set_desc(dev, "Xilinx AXI Ethernet");
+
+       return (BUS_PROBE_DEFAULT);
+}
+#define XAE_RAF_NEWFNCENBL_MASK        0x00000800 /**< New function mode */
+
+static int
+xae_attach(device_t dev)
+{
+       struct xae_softc *sc;
+       struct ifnet *ifp;
+       phandle_t node;
+       uint32_t reg;
+       int error;
+        bool removeme;
+
+       sc = device_get_softc(dev);
+       sc->dev = dev;
+       node = ofw_bus_get_node(dev);
+
+       if (setup_xdma(sc) != 0) {
+               device_printf(dev, "Could not setup xDMA.\n");
+               return (ENXIO);
+       }
+
+       mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
+           MTX_NETWORK_LOCK, MTX_DEF);
+
+       sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
+           M_NOWAIT, &sc->mtx);
+       if (sc->br == NULL)
+               return (ENOMEM);
+
+       if (bus_alloc_resources(dev, xae_spec, sc->res)) {
+               device_printf(dev, "could not allocate resources\n");
+               return (ENXIO);
+       }
+
+       /* Memory interface */
+       sc->bst = rman_get_bustag(sc->res[0]);
+       sc->bsh = rman_get_bushandle(sc->res[0]);
+
+       device_printf(sc->dev, "Identification: %x\n",
+           READ4(sc, XAE_IDENT));
+
+       /* Get MAC addr */
+       if (xae_get_hwaddr(sc, sc->macaddr)) {
+               device_printf(sc->dev, "can't get mac\n");
+               return (ENXIO);
+       }
+
+       /* Enable MII clock */
+       reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
+       reg |= MDIO_SETUP_ENABLE;
+       WRITE4(sc, XAE_MDIO_SETUP, reg);
+       if (mdio_wait(sc))
+               return (ENXIO);
+
+       callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
+
+       /* Setup interrupt handler. */
+       error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
+           NULL, xae_intr, sc, &sc->intr_cookie);
+       if (error != 0) {
+               device_printf(dev, "could not setup interrupt handler.\n");
+               return (ENXIO);
+       }
+
+       /* Set up the ethernet interface. */
+       sc->ifp = ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "could not allocate ifp.\n");
+               return (ENXIO);
+       }
+
+       ifp->if_softc = sc;
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_capabilities = IFCAP_VLAN_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+       ifp->if_transmit = xae_transmit;
+       ifp->if_qflush = xae_qflush;
+       ifp->if_ioctl = xae_ioctl;
+       ifp->if_init = xae_init;
+       IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
+       ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
+       IFQ_SET_READY(&ifp->if_snd);
+
+       if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
+               return (ENXIO);
+
+       /* Attach the mii driver. */
+       error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
+           xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
+           MII_OFFSET_ANY, 0);
+       if (error != 0) {
+               device_printf(dev, "PHY attach failed\n");
+               return (ENXIO);
+       }
+       sc->mii_softc = device_get_softc(sc->miibus);
+
+       /* Apply vcu118 workaround. */
+       // if (OF_getproplen(node, "xlnx,vcu118") >= 0)
+               xae_phy_fixup(sc);
+
+       /* All ready to run, attach the ethernet interface. */
+       ether_ifattach(ifp, sc->macaddr);
+       sc->is_attached = true;
+
+       xae_rx_enqueue(sc, NUM_RX_MBUF);
+       xdma_queue_submit(sc->xchan_rx);
+
+
+       return (0);
+}
+
+static int
+xae_detach(device_t dev)
+{
+       struct xae_softc *sc;
+       struct ifnet *ifp;
+
+       sc = device_get_softc(dev);
+
+       KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
+           device_get_nameunit(dev)));
+
+       ifp = sc->ifp;
+
+       /* Only cleanup if attach succeeded. */
+       if (device_is_attached(dev)) {
+               XAE_LOCK(sc);
+               xae_stop_locked(sc);
+               XAE_UNLOCK(sc);
+               callout_drain(&sc->xae_callout);
+               ether_ifdetach(ifp);
+       }
+
+       if (sc->miibus != NULL)
+               device_delete_child(dev, sc->miibus);
+
+       if (ifp != NULL)
+               if_free(ifp);
+
+       mtx_destroy(&sc->mtx);
+
+       bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
+
+       bus_release_resources(dev, xae_spec, sc->res);
+
+       xdma_channel_free(sc->xchan_tx);
+       xdma_channel_free(sc->xchan_rx);
+       xdma_put(sc->xdma_tx);
+       xdma_put(sc->xdma_rx);
+
+       return (0);
+}
+
+static void
+xae_miibus_statchg(device_t dev)
+{
+       struct xae_softc *sc;
+       struct mii_data *mii;
+       uint32_t reg;
+
+       /*
+        * Called by the MII bus driver when the PHY establishes
+        * link to set the MAC interface registers.
+        */
+
+       sc = device_get_softc(dev);
+
+       XAE_ASSERT_LOCKED(sc);
+
+       mii = sc->mii_softc;
+
+       if (mii->mii_media_status & IFM_ACTIVE)
+               sc->link_is_up = true;
+       else
+               sc->link_is_up = false;
+
+       switch (IFM_SUBTYPE(mii->mii_media_active)) {
+       case IFM_1000_T:
+       case IFM_1000_SX:
+               reg = SPEED_1000;
+               break;
+       case IFM_100_TX:
+               reg = SPEED_100;
+               break;
+       case IFM_10_T:
+               reg = SPEED_10;
+               break;
+       case IFM_NONE:
+               sc->link_is_up = false;
+               return;
+       default:
+               sc->link_is_up = false;
+               device_printf(dev, "Unsupported media %u\n",
+                   IFM_SUBTYPE(mii->mii_media_active));
+               return;
+       }
+
+       WRITE4(sc, XAE_SPEED, reg);
+}
+
+static device_method_t xae_methods[] = {
+       DEVMETHOD(device_probe,         xae_probe),
+       DEVMETHOD(device_attach,        xae_attach),
+       DEVMETHOD(device_detach,        xae_detach),
+
+       /* MII Interface */
+       DEVMETHOD(miibus_readreg,       xae_miibus_read_reg),
+       DEVMETHOD(miibus_writereg,      xae_miibus_write_reg),
+       DEVMETHOD(miibus_statchg,       xae_miibus_statchg),
+
+       { 0, 0 }
+};
+
+driver_t xae_driver = {
+       "xae",
+       xae_methods,
+       sizeof(struct xae_softc),
+};
+
+static devclass_t xae_devclass;
+
+DRIVER_MODULE(xae, simplebus, xae_driver, xae_devclass, 0, 0);
+DRIVER_MODULE(miibus, xae, miibus_driver, miibus_devclass, 0, 0);
+
+MODULE_DEPEND(xae, ether, 1, 1, 1);
+MODULE_DEPEND(xae, miibus, 1, 1, 1);
diff --git a/freebsd/sys/dev/xilinx/if_xaereg.h 
b/freebsd/sys/dev/xilinx/if_xaereg.h
new file mode 100644
index 00000000..1c4f0493
--- /dev/null
+++ b/freebsd/sys/dev/xilinx/if_xaereg.h
@@ -0,0 +1,122 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_XILINX_IF_XAE_H_
+#define _DEV_XILINX_IF_XAE_H_
+
+#define        XAE_RAF         0x00000 /* Reset and Address Filter RW */
+#define        XAE_TPF         0x00004 /* Transmit Pause Frame RW */
+#define        XAE_IFGP        0x00008 /* Transmit Inter Frame Gap Adjustment 
RW */
+#define        XAE_IS          0x0000C /* Interrupt Status register RW */
+#define        XAE_IP          0x00010 /* Interrupt Pending register RO */
+#define        XAE_IE          0x00014 /* Interrupt Enable register RW */
+#define        XAE_TTAG        0x00018 /* Transmit VLAN Tag RW */
+#define        XAE_RTAG        0x0001C /* Receive VLAN Tag  RW */
+#define        XAE_UAWL        0x00020 /* Unicast Address Word Lower RW */
+#define        XAE_UAWU        0x00024 /* Unicast Address Word Upper RW */
+#define        XAE_TPID0       0x00028 /* VLAN TPID Word 0 RW */
+#define        XAE_TPID1       0x0002C /* VLAN TPID Word 1 RW */
+#define        XAE_PPST        0x00030 /* PCS PMA Status register RO */
+#define        XAE_STATCNT(n)  (0x00200 + 0x8 * (n)) /* Statistics Counters RO 
*/
+#define        XAE_RCW0        0x00400 /* Receive Configuration Word 0 
Register RW */
+#define        XAE_RCW1        0x00404 /* Receive Configuration Word 1 
Register RW */
+#define         RCW1_RX        (1 << 28) /* Receive Enable */
+#define        XAE_TC          0x00408 /* Transmitter Configuration register 
RW */
+#define         TC_TX          (1 << 28) /* Transmit Enable */
+#define        XAE_FCC         0x0040C /* Flow Control Configuration register 
RW */
+#define         FCC_FCRX       (1 << 29) /* Flow Control Enable (RX) */
+#define        XAE_SPEED       0x00410 /* MAC Speed Configuration Word RW */
+#define         SPEED_CONF_S   30
+#define         SPEED_10       (0 << SPEED_CONF_S)
+#define         SPEED_100      (1 << SPEED_CONF_S)
+#define         SPEED_1000     (2 << SPEED_CONF_S)
+#define        XAE_RX_MAXFRAME 0x00414 /* RX Max Frame Configuration RW */
+#define        XAE_TX_MAXFRAME 0x00418 /* TX Max Frame Configuration RW */
+#define        XAE_TX_TIMESTMP 0x0041C /* TX timestamp adjust control register 
RW */
+#define        XAE_IDENT       0x004F8 /* Identification register RO */
+#define        XAE_ABILITY     0x004FC /* Ability register RO */
+#define        XAE_MDIO_SETUP  0x00500 /* MDIO Setup register RW */
+#define         MDIO_SETUP_ENABLE      (1 << 6) /* MDIO Enable */
+#define         MDIO_SETUP_CLK_DIV_S   0 /* Clock Divide */
+#define        XAE_MDIO_CTRL   0x00504 /* MDIO Control RW */
+#define         MDIO_TX_REGAD_S        16 /* This controls the register 
address being accessed. */
+#define         MDIO_TX_REGAD_M        (0x1f << MDIO_TX_REGAD_S)
+#define         MDIO_TX_PHYAD_S        24 /* This controls the PHY address 
being accessed. */
+#define         MDIO_TX_PHYAD_M        (0x1f << MDIO_TX_PHYAD_S)
+#define         MDIO_CTRL_TX_OP_S      14 /* Type of access performed. */
+#define         MDIO_CTRL_TX_OP_M      (0x3 << MDIO_CTRL_TX_OP_S)
+#define         MDIO_CTRL_TX_OP_READ   (0x2 << MDIO_CTRL_TX_OP_S)
+#define         MDIO_CTRL_TX_OP_WRITE  (0x1 << MDIO_CTRL_TX_OP_S)
+#define         MDIO_CTRL_INITIATE     (1 << 11) /* Start an MDIO transfer. */
+#define         MDIO_CTRL_READY        (1 << 7) /* MDIO is ready for a new 
xfer */
+#define        XAE_MDIO_WRITE  0x00508 /* MDIO Write Data RW */
+#define        XAE_MDIO_READ   0x0050C /* MDIO Read Data RO */
+#define        XAE_INT_STATUS  0x00600 /* Interrupt Status Register RW */
+#define        XAE_INT_PEND    0x00610 /* Interrupt Pending Register RO */
+#define        XAE_INT_ENABLE  0x00620 /* Interrupt Enable Register RW */
+#define        XAE_INT_CLEAR   0x00630 /* Interrupt Clear Register RW */
+#define        XAE_UAW0        0x00700 /* Unicast Address Word 0 register 
(UAW0) RW */
+#define        XAE_UAW1        0x00704 /* Unicast Address Word 1 register 
(UAW1) RW */
+#define        XAE_FFC         0x00708 /* Frame Filter Control RW */
+#define         FFC_PM         (1 << 31) /* Promiscuous Mode */
+#define        XAE_FFV(n)      (0x00710 + 0x4 * (n)) /* Frame Filter Value RW 
*/
+#define        XAE_FFMV(n)     (0x00750 + 0x4 * (n)) /* Frame Filter Mask 
Value RW */
+#define        XAE_TX_VLAN(n)  (0x04000 + 0x4 * (n)) /* Transmit VLAN Data 
Table RW */
+#define        XAE_RX_VLAN(n)  (0x08000 + 0x4 * (n)) /* Receive VLAN Data 
Table RW */
+#define        XAE_AVB(n)      (0x10000 + 0x4 * (n)) /* Ethernet AVB RW */
+#define        XAE_MAT(n)      (0x20000 + 0x4 * (n)) /* Multicast Address 
Table RW */
+
+#define        XAE_MULTICAST_TABLE_SIZE        4
+
+/* RX statistical counters. */
+#define        RX_BYTES                        0
+#define        RX_GOOD_FRAMES                  18
+#define        RX_FRAME_CHECK_SEQ_ERROR        19
+#define        RX_GOOD_MCASTS                  21
+#define        RX_LEN_OUT_OF_RANGE             23
+#define        RX_ALIGNMENT_ERRORS             40
+
+/* TX statistical counters. */
+#define        TX_BYTES                        1
+#define        TX_GOOD_FRAMES                  27
+#define        TX_GOOD_MCASTS                  29
+#define        TX_GOOD_UNDERRUN_ERRORS         30
+#define        TX_SINGLE_COLLISION_FRAMES      34
+#define        TX_MULTI_COLLISION_FRAMES       35
+#define        TX_LATE_COLLISIONS              37
+#define        TX_EXCESS_COLLISIONS            38
+
+#define        XAE_MAX_COUNTERS                43
+
+#endif /* _DEV_XILINX_IF_XAE_H_ */
diff --git a/freebsd/sys/dev/xilinx/if_xaevar.h 
b/freebsd/sys/dev/xilinx/if_xaevar.h
new file mode 100644
index 00000000..30396537
--- /dev/null
+++ b/freebsd/sys/dev/xilinx/if_xaevar.h
@@ -0,0 +1,80 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ruslan Bukin <b...@bsdpad.com>
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory (Department of Computer Science and
+ * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
+ * DARPA SSITH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef        _DEV_XILINX_IF_XAEVAR_H_
+#define        _DEV_XILINX_IF_XAEVAR_H_
+
+#include <dev/xdma/xdma.h>
+
+/*
+ * Driver data and defines.
+ */
+#define        RX_DESC_COUNT   1024
+#define        TX_DESC_COUNT   1024
+
+struct xae_softc {
+       struct resource         *res[2];
+       bus_space_tag_t         bst;
+       bus_space_handle_t      bsh;
+       device_t                dev;
+       uint8_t                 macaddr[ETHER_ADDR_LEN];
+       device_t                miibus;
+       struct mii_data *       mii_softc;
+       struct ifnet            *ifp;
+       int                     if_flags;
+       struct mtx              mtx;
+       void *                  intr_cookie;
+       struct callout          xae_callout;
+       boolean_t               link_is_up;
+       boolean_t               is_attached;
+       boolean_t               is_detaching;
+       int                     phy_addr;
+
+       /* xDMA TX */
+       xdma_controller_t       *xdma_tx;
+       xdma_channel_t          *xchan_tx;
+       void                    *ih_tx;
+
+       /* xDMA RX */
+       xdma_controller_t       *xdma_rx;
+       xdma_channel_t          *xchan_rx;
+       void                    *ih_rx;
+
+       struct buf_ring         *br;
+
+       /* Counters */
+       uint64_t                counters[XAE_MAX_COUNTERS];
+};
+
+#endif /* _DEV_XILINX_IF_XAEVAR_H_ */
diff --git a/freebsd/sys/microblaze/include/machine/in_cksum.h 
b/freebsd/sys/microblaze/include/machine/in_cksum.h
new file mode 100644
index 00000000..fd34e13d
--- /dev/null
+++ b/freebsd/sys/microblaze/include/machine/in_cksum.h
@@ -0,0 +1,82 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from tahoe:     in_cksum.c      1.2     86/01/05
+ *     from:           @(#)in_cksum.c  1.3 (Berkeley) 1/19/91
+ *     from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define        _MACHINE_IN_CKSUM_H_    1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len)       in_cksum_skip(m, len, 0)
+
+#if defined(IPVERSION) && (IPVERSION == 4)
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+       int __tmpsum;
+       __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+       ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define        in_cksum_update(ip) \
+       do { \
+               int __tmpsum; \
+               __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+               ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+       } while(0)
+
+#endif
+#endif
+
+#ifdef _KERNEL
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *ip);
+#endif
+u_short        in_addword(u_short sum, u_short b);
+u_short        in_pseudo(u_int sum, u_int b, u_int c);
+u_short        in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/freebsd/sys/microblaze/microblaze/in_cksum.c 
b/freebsd/sys/microblaze/microblaze/in_cksum.c
new file mode 100644
index 00000000..22e19d83
--- /dev/null
+++ b/freebsd/sys/microblaze/microblaze/in_cksum.c
@@ -0,0 +1,255 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ * Copyright (c) 1996
+ *     Matt Thomas <m...@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)in_cksum.c  8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ *    (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x)  (x > 65535 ? x -= 65535 : x)
+#define REDUCE32                                                         \
+    {                                                                    \
+       q_util.q = sum;                                                   \
+       sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3];      \
+    }
+#define REDUCE16                                                         \
+    {                                                                    \
+       q_util.q = sum;                                                   \
+       l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+       sum = l_util.s[0] + l_util.s[1];                                  \
+       ADDCARRY(sum);                                                    \
+    }
+
+static const u_int32_t in_masks[] = {
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+       /*0 bytes*/ /*1 byte*/  /*2 bytes*/ /*3 bytes*/
+       0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+       0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+       0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+       0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+#else
+       /*0 bytes*/ /*1 byte*/  /*2 bytes*/ /*3 bytes*/
+       0x00000000, 0xFF000000, 0xFFFF0000, 0xFFFFFF00, /* offset 0 */
+       0x00000000, 0x00FF0000, 0x00FFFF00, 0x00FFFFFF, /* offset 1 */
+       0x00000000, 0x0000FF00, 0x0000FFFF, 0x0000FFFF, /* offset 2 */
+       0x00000000, 0x000000FF, 0x000000FF, 0x000000FF, /* offset 3 */
+#endif
+};
+
+union l_util {
+       u_int16_t s[2];
+       u_int32_t l;
+};
+union q_util {
+       u_int16_t s[4];
+       u_int32_t l[2];
+       u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+       const u_int32_t *lw = (const u_int32_t *) buf;
+       u_int64_t sum = 0;
+       u_int64_t prefilled;
+       int offset;
+       union q_util q_util;
+
+       if ((3 & (long) lw) == 0 && len == 20) {
+               sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+               REDUCE32;
+               return sum;
+       }
+
+       if ((offset = 3 & (long) lw) != 0) {
+               const u_int32_t *masks = in_masks + (offset << 2);
+               lw = (u_int32_t *) (((long) lw) - offset);
+               sum = *lw++ & masks[len >= 3 ? 3 : len];
+               len -= 4 - offset;
+               if (len <= 0) {
+                       REDUCE32;
+                       return sum;
+               }
+       }
+#if 0
+       /*
+        * Force to cache line boundary.
+        */
+       offset = 32 - (0x1f & (long) lw);
+       if (offset < 32 && len > offset) {
+               len -= offset;
+               if (4 & offset) {
+                       sum += (u_int64_t) lw[0];
+                       lw += 1;
+               }
+               if (8 & offset) {
+                       sum += (u_int64_t) lw[0] + lw[1];
+                       lw += 2;
+               }
+               if (16 & offset) {
+                       sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+                       lw += 4;
+               }
+       }
+#endif
+       /*
+        * access prefilling to start load of next cache line.
+        * then add current cache line
+        * save result of prefilling for loop iteration.
+        */
+       prefilled = lw[0];
+       while ((len -= 32) >= 4) {
+               u_int64_t prefilling = lw[8];
+               sum += prefilled + lw[1] + lw[2] + lw[3]
+                       + lw[4] + lw[5] + lw[6] + lw[7];
+               lw += 8;
+               prefilled = prefilling;
+       }
+       if (len >= 0) {
+               sum += prefilled + lw[1] + lw[2] + lw[3]
+                       + lw[4] + lw[5] + lw[6] + lw[7];
+               lw += 8;
+       } else {
+               len += 32;
+       }
+       while ((len -= 16) >= 0) {
+               sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+               lw += 4;
+       }
+       len += 16;
+       while ((len -= 4) >= 0) {
+               sum += (u_int64_t) *lw++;
+       }
+       len += 4;
+       if (len > 0)
+               sum += (u_int64_t) (in_masks[len] & *lw);
+       REDUCE32;
+       return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+       u_int64_t sum = a + b;
+
+       ADDCARRY(sum);
+       return (sum);
+}
+
+u_short
+#ifdef __rtems__
+/* Prototype does not match in FreeBSD code */
+in_pseudo(u_int a, u_int b, u_int c)
+#else
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+#endif
+{
+       u_int64_t sum;
+       union q_util q_util;
+       union l_util l_util;
+
+       sum = (u_int64_t) a + b + c;
+       REDUCE16;
+       return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+       u_int64_t sum = 0;
+       int mlen = 0;
+       int clen = 0;
+       caddr_t addr;
+       union q_util q_util;
+       union l_util l_util;
+
+       len -= skip;
+       for (; skip && m; m = m->m_next) {
+               if (m->m_len > skip) {
+                       mlen = m->m_len - skip;
+                       addr = mtod(m, caddr_t) + skip;
+                       goto skip_start;
+               } else {
+                       skip -= m->m_len;
+               }
+       }
+
+       for (; m && len; m = m->m_next) {
+               if (m->m_len == 0)
+                       continue;
+               mlen = m->m_len;
+               addr = mtod(m, caddr_t);
+skip_start:
+               if (len < mlen)
+                       mlen = len;
+
+               if ((clen ^ (uintptr_t) addr) & 1)
+                       sum += in_cksumdata(addr, mlen) << 8;
+               else
+                       sum += in_cksumdata(addr, mlen);
+
+               clen += mlen;
+               len -= mlen;
+       }
+       REDUCE16;
+       return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+       u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+       union q_util q_util;
+       union l_util l_util;
+       REDUCE16;
+       return (~sum & 0xffff);
+}
diff --git a/libbsd.py b/libbsd.py
index 7593e9ba..2af1741b 100644
--- a/libbsd.py
+++ b/libbsd.py
@@ -177,6 +177,7 @@ class rtems(builder.Module):
                 'local/ofw_if.c',
                 'local/pcib_if.c',
                 'local/pci_if.c',
+                'local/xdma_if.c',
                 'local/usb_if.c',
                 'local/mmcbus_if.c',
                 'local/mmcbr_if.c',
@@ -1642,6 +1643,39 @@ class dev_nic_e1000(builder.Module):
             mm.generator['source']()
         )
 
+#
+# NIC xilinx
+#
+class dev_nic_xilinx(builder.Module):
+
+    def __init__(self, manager):
+        super(dev_nic_xilinx, self).__init__(manager, type(self).__name__)
+
+    def generate(self):
+        mm = self.manager
+        self.addKernelSpaceHeaderFiles(
+            [
+                'sys/dev/xilinx/if_xaereg.h',
+                'sys/dev/xilinx/if_xaevar.h',
+                'sys/dev/mii/tiphy.h',
+                'sys/dev/xdma/xdma.h',
+                'sys/dev/xilinx/axidma.h',
+             ]
+        )
+        self.addKernelSpaceSourceFiles(
+            [
+                'sys/dev/xilinx/if_xae.c',
+                'sys/dev/xdma/xdma.c',
+                'sys/dev/xdma/xdma_mbuf.c',
+                'sys/dev/xdma/xdma_queue.c',
+                'sys/dev/xdma/xdma_sg.c',
+                'sys/dev/xdma/xdma_bank.c',
+                'sys/dev/xdma/xdma_sglist.c',
+                'sys/dev/xilinx/axidma.c',
+            ],
+            mm.generator['source']()
+        )
+
 #
 # DEC Tulip aka Intel 21143
 #
@@ -5172,6 +5206,7 @@ class in_cksum(builder.Module):
         self.addCPUDependentFreeBSDHeaderFiles(
             [
                 'sys/i386/include/in_cksum.h',
+                'sys/microblaze/include/in_cksum.h',
                 'sys/mips/include/in_cksum.h',
                 'sys/powerpc/include/in_cksum.h',
                 'sys/sparc64/include/in_cksum.h',
@@ -5206,6 +5241,13 @@ class in_cksum(builder.Module):
             ],
             mm.generator['source']()
         )
+        self.addCPUDependentFreeBSDSourceFiles(
+            [ 'microblaze' ],
+            [
+                'sys/microblaze/microblaze/in_cksum.c',
+            ],
+            mm.generator['source']()
+        )
         self.addCPUDependentFreeBSDSourceFiles(
             [ 'powerpc' ],
             [
@@ -5593,6 +5635,7 @@ def load(mm):
     mm.addModule(dev_nic_re(mm))
     mm.addModule(dev_nic_fxp(mm))
     mm.addModule(dev_nic_e1000(mm))
+    mm.addModule(dev_nic_xilinx(mm))
     mm.addModule(dev_nic_dc(mm))
     mm.addModule(dev_nic_smc(mm))
     mm.addModule(dev_nic_broadcomm(mm))
diff --git a/rtemsbsd/include/bsp/nexus-devices.h 
b/rtemsbsd/include/bsp/nexus-devices.h
index 46df17b4..abbafbad 100644
--- a/rtemsbsd/include/bsp/nexus-devices.h
+++ b/rtemsbsd/include/bsp/nexus-devices.h
@@ -272,6 +272,14 @@ RTEMS_BSD_DRIVER_PC_LEGACY;
 RTEMS_BSD_DRIVER_PCI_DC;
 RTEMS_BSD_DRIVER_UKPHY;
 
-#endif /* LIBBSP_POWERPC_MOTOROLA_POWERPC_BSP_H */
+#elif defined(LIBBSP_MICROBLAZE_FPGA_BSP_H)
+
+RTEMS_BSD_DEFINE_NEXUS_DEVICE(ofwbus, 0, 0, NULL);
+SYSINIT_DRIVER_REFERENCE(simplebus, ofwbus);
+SYSINIT_DRIVER_REFERENCE(xae, simplebus);
+SYSINIT_DRIVER_REFERENCE(axidma, simplebus);
+RTEMS_BSD_DRIVER_E1000PHY;
+
+#endif /* LIBBSP_MICROBLAZE_FPGA_BSP_H */
 
 #endif
diff --git a/rtemsbsd/include/rtems/bsd/local/xdma_if.h 
b/rtemsbsd/include/rtems/bsd/local/xdma_if.h
new file mode 100644
index 00000000..e5271f60
--- /dev/null
+++ b/rtemsbsd/include/rtems/bsd/local/xdma_if.h
@@ -0,0 +1,144 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ *   xdma_if.m
+ * with
+ *   makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+
+#ifndef _xdma_if_h_
+#define _xdma_if_h_
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_REQUEST() method */
+extern struct kobjop_desc xdma_channel_request_desc;
+/** @brief A function implementing the XDMA_CHANNEL_REQUEST() method */
+typedef int xdma_channel_request_t(device_t dev, struct xdma_channel *xchan,
+                                   struct xdma_request *req);
+
+static __inline int XDMA_CHANNEL_REQUEST(device_t dev,
+                                         struct xdma_channel *xchan,
+                                         struct xdma_request *req)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_request);
+       rc = ((xdma_channel_request_t *) _m)(dev, xchan, req);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_PREP_SG() method */
+extern struct kobjop_desc xdma_channel_prep_sg_desc;
+/** @brief A function implementing the XDMA_CHANNEL_PREP_SG() method */
+typedef int xdma_channel_prep_sg_t(device_t dev, struct xdma_channel *xchan);
+
+static __inline int XDMA_CHANNEL_PREP_SG(device_t dev,
+                                         struct xdma_channel *xchan)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_prep_sg);
+       rc = ((xdma_channel_prep_sg_t *) _m)(dev, xchan);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_CAPACITY() method */
+extern struct kobjop_desc xdma_channel_capacity_desc;
+/** @brief A function implementing the XDMA_CHANNEL_CAPACITY() method */
+typedef int xdma_channel_capacity_t(device_t dev, struct xdma_channel *xchan,
+                                    uint32_t *capacity);
+
+static __inline int XDMA_CHANNEL_CAPACITY(device_t dev,
+                                          struct xdma_channel *xchan,
+                                          uint32_t *capacity)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_capacity);
+       rc = ((xdma_channel_capacity_t *) _m)(dev, xchan, capacity);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_SUBMIT_SG() method */
+extern struct kobjop_desc xdma_channel_submit_sg_desc;
+/** @brief A function implementing the XDMA_CHANNEL_SUBMIT_SG() method */
+typedef int xdma_channel_submit_sg_t(device_t dev, struct xdma_channel *xchan,
+                                     struct xdma_sglist *sg, uint32_t sg_n);
+
+static __inline int XDMA_CHANNEL_SUBMIT_SG(device_t dev,
+                                           struct xdma_channel *xchan,
+                                           struct xdma_sglist *sg,
+                                           uint32_t sg_n)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_submit_sg);
+       rc = ((xdma_channel_submit_sg_t *) _m)(dev, xchan, sg, sg_n);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_OFW_MD_DATA() method */
+extern struct kobjop_desc xdma_ofw_md_data_desc;
+/** @brief A function implementing the XDMA_OFW_MD_DATA() method */
+typedef int xdma_ofw_md_data_t(device_t dev, pcell_t *cells, int ncells,
+                               void **data);
+
+static __inline int XDMA_OFW_MD_DATA(device_t dev, pcell_t *cells, int ncells,
+                                     void **data)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_ofw_md_data);
+       rc = ((xdma_ofw_md_data_t *) _m)(dev, cells, ncells, data);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_ALLOC() method */
+extern struct kobjop_desc xdma_channel_alloc_desc;
+/** @brief A function implementing the XDMA_CHANNEL_ALLOC() method */
+typedef int xdma_channel_alloc_t(device_t dev, struct xdma_channel *xchan);
+
+static __inline int XDMA_CHANNEL_ALLOC(device_t dev, struct xdma_channel 
*xchan)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_alloc);
+       rc = ((xdma_channel_alloc_t *) _m)(dev, xchan);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_FREE() method */
+extern struct kobjop_desc xdma_channel_free_desc;
+/** @brief A function implementing the XDMA_CHANNEL_FREE() method */
+typedef int xdma_channel_free_t(device_t dev, struct xdma_channel *xchan);
+
+static __inline int XDMA_CHANNEL_FREE(device_t dev, struct xdma_channel *xchan)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_free);
+       rc = ((xdma_channel_free_t *) _m)(dev, xchan);
+       return (rc);
+}
+
+/** @brief Unique descriptor for the XDMA_CHANNEL_CONTROL() method */
+extern struct kobjop_desc xdma_channel_control_desc;
+/** @brief A function implementing the XDMA_CHANNEL_CONTROL() method */
+typedef int xdma_channel_control_t(device_t dev, struct xdma_channel *xchan,
+                                   int cmd);
+
+static __inline int XDMA_CHANNEL_CONTROL(device_t dev,
+                                         struct xdma_channel *xchan, int cmd)
+{
+       kobjop_t _m;
+       int rc;
+       KOBJOPLOOKUP(((kobj_t)dev)->ops,xdma_channel_control);
+       rc = ((xdma_channel_control_t *) _m)(dev, xchan, cmd);
+       return (rc);
+}
+
+#endif /* _xdma_if_h_ */
diff --git a/rtemsbsd/local/xdma_if.c b/rtemsbsd/local/xdma_if.c
new file mode 100644
index 00000000..574d9598
--- /dev/null
+++ b/rtemsbsd/local/xdma_if.c
@@ -0,0 +1,57 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ *   xdma_if.m
+ * with
+ *   makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/kernel.h>
+#include <sys/kobj.h>
+#include <machine/bus.h>
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/xdma/xdma.h>
+#include <rtems/bsd/local/xdma_if.h>
+
+struct kobjop_desc xdma_channel_request_desc = {
+       0, { &xdma_channel_request_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_prep_sg_desc = {
+       0, { &xdma_channel_prep_sg_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_capacity_desc = {
+       0, { &xdma_channel_capacity_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_submit_sg_desc = {
+       0, { &xdma_channel_submit_sg_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_ofw_md_data_desc = {
+       0, { &xdma_ofw_md_data_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_alloc_desc = {
+       0, { &xdma_channel_alloc_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_free_desc = {
+       0, { &xdma_channel_free_desc, (kobjop_t)kobj_error_method }
+};
+
+struct kobjop_desc xdma_channel_control_desc = {
+       0, { &xdma_channel_control_desc, (kobjop_t)kobj_error_method }
+};
+
diff --git a/rtemsbsd/rtems/rtems-kernel-vmem.c 
b/rtemsbsd/rtems/rtems-kernel-vmem.c
index f64fbd17..70708902 100644
--- a/rtemsbsd/rtems/rtems-kernel-vmem.c
+++ b/rtemsbsd/rtems/rtems-kernel-vmem.c
@@ -41,6 +41,11 @@
 
 static MALLOC_DEFINE(M_VMEM, "vmem", "VMEM buffers");
 
+int vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
+{
+       return 0;
+}
+
 int
 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
 {
@@ -48,6 +53,18 @@ vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, 
vmem_addr_t *addrp)
        return 0;
 }
 
+static int dummy_vmem;
+
+vmem_t *vmem_create(const char *name, vmem_addr_t base,
+    vmem_size_t size, vmem_size_t quantum, vmem_size_t qcache_max, int flags)
+{
+       return &dummy_vmem;
+}
+
+void vmem_destroy(vmem_t *vm)
+{
+}
+
 void
 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
 {
-- 
2.30.2

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to