From: Srikanth Thokala <srikanth.thok...@intel.com>

Add logic to establish communication with the remote host which is through
ring buffer management and MSI/Doorbell interrupts

Cc: Arnd Bergmann <a...@arndb.de>
Cc: Greg Kroah-Hartman <gre...@linuxfoundation.org>
Reviewed-by: Mark Gross <mgr...@linux.intel.com>
Signed-off-by: Mark Gross <mgr...@linux.intel.com>
Signed-off-by: Srikanth Thokala <srikanth.thok...@intel.com>
---
 drivers/misc/xlink-pcie/local_host/Makefile |   2 +
 drivers/misc/xlink-pcie/local_host/core.c   | 806 ++++++++++++++++++++
 drivers/misc/xlink-pcie/local_host/core.h   | 247 ++++++
 drivers/misc/xlink-pcie/local_host/epf.c    | 116 ++-
 drivers/misc/xlink-pcie/local_host/epf.h    |  23 +
 drivers/misc/xlink-pcie/local_host/util.c   | 375 +++++++++
 drivers/misc/xlink-pcie/local_host/util.h   |  70 ++
 drivers/misc/xlink-pcie/local_host/xpcie.h  |  63 ++
 include/linux/xlink_drv_inf.h               |  58 ++
 9 files changed, 1752 insertions(+), 8 deletions(-)
 create mode 100644 drivers/misc/xlink-pcie/local_host/core.c
 create mode 100644 drivers/misc/xlink-pcie/local_host/core.h
 create mode 100644 drivers/misc/xlink-pcie/local_host/util.c
 create mode 100644 drivers/misc/xlink-pcie/local_host/util.h
 create mode 100644 include/linux/xlink_drv_inf.h

diff --git a/drivers/misc/xlink-pcie/local_host/Makefile 
b/drivers/misc/xlink-pcie/local_host/Makefile
index 54fc118e2dd1..28761751d43b 100644
--- a/drivers/misc/xlink-pcie/local_host/Makefile
+++ b/drivers/misc/xlink-pcie/local_host/Makefile
@@ -1,3 +1,5 @@
 obj-$(CONFIG_XLINK_PCIE_LH_DRIVER) += mxlk_ep.o
 mxlk_ep-objs := epf.o
 mxlk_ep-objs += dma.o
+mxlk_ep-objs += core.o
+mxlk_ep-objs += util.o
diff --git a/drivers/misc/xlink-pcie/local_host/core.c 
b/drivers/misc/xlink-pcie/local_host/core.c
new file mode 100644
index 000000000000..c67ce2c3067d
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/core.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "epf.h"
+#include "core.h"
+#include "util.h"
+
+static struct xpcie *global_xpcie;
+
+static struct xpcie *intel_xpcie_core_get_by_id(u32 sw_device_id)
+{
+       return (sw_device_id == xlink_sw_id) ? global_xpcie : NULL;
+}
+
+static int intel_xpcie_map_dma(struct xpcie *xpcie, struct xpcie_buf_desc *bd,
+                              int direction)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct pci_epf *epf = xpcie_epf->epf;
+       struct device *dma_dev = epf->epc->dev.parent;
+
+       bd->phys = dma_map_single(dma_dev, bd->data, bd->length, direction);
+
+       return dma_mapping_error(dma_dev, bd->phys);
+}
+
+static void intel_xpcie_unmap_dma(struct xpcie *xpcie,
+                                 struct xpcie_buf_desc *bd, int direction)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct pci_epf *epf = xpcie_epf->epf;
+       struct device *dma_dev = epf->epc->dev.parent;
+
+       dma_unmap_single(dma_dev, bd->phys, bd->length, direction);
+}
+
+static void intel_xpcie_set_cap_txrx(struct xpcie *xpcie)
+{
+       size_t tx_len = sizeof(struct xpcie_transfer_desc) *
+                               XPCIE_NUM_TX_DESCS;
+       size_t rx_len = sizeof(struct xpcie_transfer_desc) *
+                               XPCIE_NUM_RX_DESCS;
+       size_t hdr_len = sizeof(struct xpcie_cap_txrx);
+       u32 start = sizeof(struct xpcie_mmio);
+       struct xpcie_cap_txrx *cap;
+       struct xpcie_cap_hdr *hdr;
+       u16 next;
+
+       next = (u16)(start + hdr_len + tx_len + rx_len);
+       intel_xpcie_iowrite32(start, xpcie->mmio + XPCIE_MMIO_CAP_OFF);
+       cap = (void *)xpcie->mmio + start;
+       memset(cap, 0, sizeof(struct xpcie_cap_txrx));
+       cap->hdr.id = XPCIE_CAP_TXRX;
+       cap->hdr.next = next;
+       cap->fragment_size = XPCIE_FRAGMENT_SIZE;
+       cap->tx.ring = start + hdr_len;
+       cap->tx.ndesc = XPCIE_NUM_TX_DESCS;
+       cap->rx.ring = start + hdr_len + tx_len;
+       cap->rx.ndesc = XPCIE_NUM_RX_DESCS;
+
+       hdr = (struct xpcie_cap_hdr *)((void *)xpcie->mmio + next);
+       hdr->id = XPCIE_CAP_NULL;
+}
+
+static void intel_xpcie_txrx_cleanup(struct xpcie *xpcie)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct device *dma_dev = xpcie_epf->epf->epc->dev.parent;
+       struct xpcie_interface *inf = &xpcie->interfaces[0];
+       struct xpcie_stream *tx = &xpcie->tx;
+       struct xpcie_stream *rx = &xpcie->rx;
+       struct xpcie_transfer_desc *td;
+       int index;
+
+       xpcie->stop_flag = true;
+       xpcie->no_tx_buffer = false;
+       inf->data_avail = true;
+       wake_up_interruptible(&xpcie->tx_waitq);
+       wake_up_interruptible(&inf->rx_waitq);
+       mutex_lock(&xpcie->wlock);
+       mutex_lock(&inf->rlock);
+
+       for (index = 0; index < rx->pipe.ndesc; index++) {
+               td = rx->pipe.tdr + index;
+               intel_xpcie_set_td_address(td, 0);
+               intel_xpcie_set_td_length(td, 0);
+       }
+       for (index = 0; index < tx->pipe.ndesc; index++) {
+               td = tx->pipe.tdr + index;
+               intel_xpcie_set_td_address(td, 0);
+               intel_xpcie_set_td_length(td, 0);
+       }
+
+       intel_xpcie_list_cleanup(&xpcie->tx_pool);
+       intel_xpcie_list_cleanup(&xpcie->rx_pool);
+
+       if (xpcie_epf->tx_virt) {
+               dma_free_coherent(dma_dev, xpcie_epf->tx_size,
+                                 xpcie_epf->tx_virt, xpcie_epf->tx_phys);
+       }
+
+       mutex_unlock(&inf->rlock);
+       mutex_unlock(&xpcie->wlock);
+}
+
+/*
+ * The RX/TX are named for Remote Host, in Local Host
+ * RX/TX is reversed.
+ */
+static int intel_xpcie_txrx_init(struct xpcie *xpcie,
+                                struct xpcie_cap_txrx *cap)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct device *dma_dev = xpcie_epf->epf->epc->dev.parent;
+       struct xpcie_stream *tx = &xpcie->tx;
+       struct xpcie_stream *rx = &xpcie->rx;
+       int tx_pool_size, rx_pool_size;
+       struct xpcie_buf_desc *bd;
+       int index, ndesc, rc;
+
+       xpcie->txrx = cap;
+       xpcie->fragment_size = cap->fragment_size;
+       xpcie->stop_flag = false;
+
+       rx->pipe.ndesc = cap->tx.ndesc;
+       rx->pipe.head = &cap->tx.head;
+       rx->pipe.tail = &cap->tx.tail;
+       rx->pipe.tdr = (void *)xpcie->mmio + cap->tx.ring;
+
+       tx->pipe.ndesc = cap->rx.ndesc;
+       tx->pipe.head = &cap->rx.head;
+       tx->pipe.tail = &cap->rx.tail;
+       tx->pipe.tdr = (void *)xpcie->mmio + cap->rx.ring;
+
+       intel_xpcie_list_init(&xpcie->rx_pool);
+       rx_pool_size = roundup(SZ_32M, xpcie->fragment_size);
+       ndesc = rx_pool_size / xpcie->fragment_size;
+
+       /* Initialize reserved memory resources */
+       rc = of_reserved_mem_device_init(dma_dev);
+       if (rc) {
+               dev_err(dma_dev, "Could not get reserved memory\n");
+               goto error;
+       }
+
+       for (index = 0; index < ndesc; index++) {
+               bd = intel_xpcie_alloc_bd(xpcie->fragment_size);
+               if (bd) {
+                       intel_xpcie_list_put(&xpcie->rx_pool, bd);
+               } else {
+                       dev_err(xpcie_to_dev(xpcie),
+                               "failed to alloc all rx pool descriptors\n");
+                       goto error;
+               }
+       }
+
+       intel_xpcie_list_init(&xpcie->tx_pool);
+       tx_pool_size = roundup(SZ_32M, xpcie->fragment_size);
+       ndesc = tx_pool_size / xpcie->fragment_size;
+
+       xpcie_epf->tx_size = tx_pool_size;
+       xpcie_epf->tx_virt = dma_alloc_coherent(dma_dev,
+                                               xpcie_epf->tx_size,
+                                               &xpcie_epf->tx_phys,
+                                               GFP_KERNEL);
+       if (!xpcie_epf->tx_virt)
+               goto error;
+
+       for (index = 0; index < ndesc; index++) {
+               bd = intel_xpcie_alloc_bd_reuse(xpcie->fragment_size,
+                                               xpcie_epf->tx_virt +
+                                               (index *
+                                                xpcie->fragment_size),
+                                               xpcie_epf->tx_phys +
+                                               (index *
+                                                xpcie->fragment_size));
+               if (bd) {
+                       intel_xpcie_list_put(&xpcie->tx_pool, bd);
+               } else {
+                       dev_err(xpcie_to_dev(xpcie),
+                               "failed to alloc all tx pool descriptors\n");
+                       goto error;
+               }
+       }
+
+       return 0;
+
+error:
+       intel_xpcie_txrx_cleanup(xpcie);
+
+       return -ENOMEM;
+}
+
+static int intel_xpcie_discover_txrx(struct xpcie *xpcie)
+{
+       struct xpcie_cap_txrx *cap;
+       int error;
+
+       cap = intel_xpcie_cap_find(xpcie, 0, XPCIE_CAP_TXRX);
+       if (cap) {
+               error = intel_xpcie_txrx_init(xpcie, cap);
+       } else {
+               dev_err(xpcie_to_dev(xpcie), "xpcie txrx info not found\n");
+               error = -EIO;
+       }
+
+       return error;
+}
+
+static void intel_xpcie_start_tx(struct xpcie *xpcie, unsigned long delay)
+{
+       /*
+        * Use only one WQ for both Rx and Tx
+        *
+        * Synchronous Read and Writes to DDR is found to result in memory
+        * mismatch errors in stability tests due to silicon bug in A0 SoC.
+        */
+       if (xpcie->legacy_a0)
+               queue_delayed_work(xpcie->rx_wq, &xpcie->tx_event, delay);
+       else
+               queue_delayed_work(xpcie->tx_wq, &xpcie->tx_event, delay);
+}
+
+static void intel_xpcie_start_rx(struct xpcie *xpcie, unsigned long delay)
+{
+       queue_delayed_work(xpcie->rx_wq, &xpcie->rx_event, delay);
+}
+
+static void intel_xpcie_rx_event_handler(struct work_struct *work)
+{
+       struct xpcie *xpcie = container_of(work, struct xpcie, rx_event.work);
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct xpcie_buf_desc *bd_head, *bd_tail, *bd;
+       u32 head, tail, ndesc, length, initial_head;
+       unsigned long delay = msecs_to_jiffies(1);
+       struct xpcie_stream *rx = &xpcie->rx;
+       int descs_num = 0, chan = 0, rc;
+       struct xpcie_dma_ll_desc *desc;
+       struct xpcie_transfer_desc *td;
+       bool reset_work = false;
+       u16 interface;
+       u64 address;
+
+       if (intel_xpcie_get_host_status(xpcie) != XPCIE_STATUS_RUN)
+               return;
+
+       bd_head = NULL;
+       bd_tail = NULL;
+       ndesc = rx->pipe.ndesc;
+       tail = intel_xpcie_get_tdr_tail(&rx->pipe);
+       initial_head = intel_xpcie_get_tdr_head(&rx->pipe);
+       head = initial_head;
+
+       while (head != tail) {
+               td = rx->pipe.tdr + head;
+
+               bd = intel_xpcie_alloc_rx_bd(xpcie);
+               if (!bd) {
+                       reset_work = true;
+                       if (descs_num == 0) {
+                               delay = msecs_to_jiffies(10);
+                               goto task_exit;
+                       }
+                       break;
+               }
+
+               interface = intel_xpcie_get_td_interface(td);
+               length = intel_xpcie_get_td_length(td);
+               address = intel_xpcie_get_td_address(td);
+
+               bd->length = length;
+               bd->interface = interface;
+               rc = intel_xpcie_map_dma(xpcie, bd, DMA_FROM_DEVICE);
+               if (rc) {
+                       dev_err(xpcie_to_dev(xpcie),
+                               "failed to map rx bd (%d)\n", rc);
+                       intel_xpcie_free_rx_bd(xpcie, bd);
+                       break;
+               }
+
+               desc = &xpcie_epf->rx_desc_buf[chan].virt[descs_num++];
+               desc->dma_transfer_size = length;
+               desc->dst_addr = bd->phys;
+               desc->src_addr = address;
+
+               if (bd_head)
+                       bd_tail->next = bd;
+               else
+                       bd_head = bd;
+               bd_tail = bd;
+
+               head = XPCIE_CIRCULAR_INC(head, ndesc);
+       }
+
+       if (descs_num == 0)
+               goto task_exit;
+
+       rc = intel_xpcie_copy_from_host_ll(xpcie, chan, descs_num);
+
+       bd = bd_head;
+       while (bd) {
+               intel_xpcie_unmap_dma(xpcie, bd, DMA_FROM_DEVICE);
+               bd = bd->next;
+       }
+
+       if (rc) {
+               dev_err(xpcie_to_dev(xpcie),
+                       "failed to DMA from host (%d)\n", rc);
+               intel_xpcie_free_rx_bd(xpcie, bd_head);
+               delay = msecs_to_jiffies(5);
+               reset_work = true;
+               goto task_exit;
+       }
+
+       head = initial_head;
+       bd = bd_head;
+       while (bd) {
+               td = rx->pipe.tdr + head;
+               bd_head = bd_head->next;
+               bd->next = NULL;
+
+               if (likely(bd->interface < XPCIE_NUM_INTERFACES)) {
+                       intel_xpcie_set_td_status(td,
+                                                 XPCIE_DESC_STATUS_SUCCESS);
+                       intel_xpcie_add_bd_to_interface(xpcie, bd);
+               } else {
+                       dev_err(xpcie_to_dev(xpcie),
+                               "detected rx desc interface failure (%u)\n",
+                               bd->interface);
+                       intel_xpcie_set_td_status(td, XPCIE_DESC_STATUS_ERROR);
+                       intel_xpcie_free_rx_bd(xpcie, bd);
+               }
+
+               bd = bd_head;
+               head = XPCIE_CIRCULAR_INC(head, ndesc);
+       }
+
+       if (head != initial_head) {
+               intel_xpcie_set_tdr_head(&rx->pipe, head);
+               intel_xpcie_raise_irq(xpcie, DATA_RECEIVED);
+       }
+
+task_exit:
+       if (reset_work)
+               intel_xpcie_start_rx(xpcie, delay);
+}
+
+static void intel_xpcie_tx_event_handler(struct work_struct *work)
+{
+       struct xpcie *xpcie = container_of(work, struct xpcie, tx_event.work);
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct xpcie_buf_desc *bd_head, *bd_tail, *bd;
+       struct xpcie_stream *tx = &xpcie->tx;
+       u32 head, tail, ndesc, initial_tail;
+       struct xpcie_dma_ll_desc *desc;
+       struct xpcie_transfer_desc *td;
+       int descs_num = 0, chan = 0, rc;
+       size_t buffers = 0, bytes = 0;
+       u64 address;
+
+       if (intel_xpcie_get_host_status(xpcie) != XPCIE_STATUS_RUN)
+               return;
+
+       bd_head = NULL;
+       bd_tail = NULL;
+       ndesc = tx->pipe.ndesc;
+       initial_tail = intel_xpcie_get_tdr_tail(&tx->pipe);
+       tail = initial_tail;
+       head = intel_xpcie_get_tdr_head(&tx->pipe);
+
+       /* add new entries */
+       while (XPCIE_CIRCULAR_INC(tail, ndesc) != head) {
+               bd = intel_xpcie_list_get(&xpcie->write);
+               if (!bd)
+                       break;
+
+               td = tx->pipe.tdr + tail;
+               address = intel_xpcie_get_td_address(td);
+
+               desc = &xpcie_epf->tx_desc_buf[chan].virt[descs_num++];
+               desc->dma_transfer_size = bd->length;
+               desc->src_addr = bd->phys;
+               desc->dst_addr = address;
+
+               if (bd_head)
+                       bd_tail->next = bd;
+               else
+                       bd_head = bd;
+               bd_tail = bd;
+
+               tail = XPCIE_CIRCULAR_INC(tail, ndesc);
+       }
+
+       if (descs_num == 0)
+               goto task_exit;
+
+       rc = intel_xpcie_copy_to_host_ll(xpcie, chan, descs_num);
+
+       tail = initial_tail;
+       bd = bd_head;
+       while (bd) {
+               if (rc) {
+                       bd = bd->next;
+                       continue;
+               }
+
+               td = tx->pipe.tdr + tail;
+               intel_xpcie_set_td_status(td, XPCIE_DESC_STATUS_SUCCESS);
+               intel_xpcie_set_td_length(td, bd->length);
+               intel_xpcie_set_td_interface(td, bd->interface);
+
+               bd = bd->next;
+               tail = XPCIE_CIRCULAR_INC(tail, ndesc);
+       }
+
+       if (rc) {
+               dev_err(xpcie_to_dev(xpcie),
+                       "failed to DMA to host (%d)\n", rc);
+               intel_xpcie_list_put_head(&xpcie->write, bd_head);
+               return;
+       }
+
+       intel_xpcie_free_tx_bd(xpcie, bd_head);
+
+       if (intel_xpcie_get_tdr_tail(&tx->pipe) != tail) {
+               intel_xpcie_set_tdr_tail(&tx->pipe, tail);
+               intel_xpcie_raise_irq(xpcie, DATA_SENT);
+       }
+
+task_exit:
+       intel_xpcie_list_info(&xpcie->write, &bytes, &buffers);
+       if (buffers) {
+               xpcie->tx_pending = true;
+               head = intel_xpcie_get_tdr_head(&tx->pipe);
+               if (XPCIE_CIRCULAR_INC(tail, ndesc) != head)
+                       intel_xpcie_start_tx(xpcie, 0);
+       } else {
+               xpcie->tx_pending = false;
+       }
+}
+
+static irqreturn_t intel_xpcie_core_irq_cb(int irq, void *args)
+{
+       struct xpcie *xpcie = args;
+
+       if (intel_xpcie_get_doorbell(xpcie, TO_DEVICE, DATA_SENT)) {
+               intel_xpcie_set_doorbell(xpcie, TO_DEVICE, DATA_SENT, 0);
+               intel_xpcie_start_rx(xpcie, 0);
+       }
+       if (intel_xpcie_get_doorbell(xpcie, TO_DEVICE, DATA_RECEIVED)) {
+               intel_xpcie_set_doorbell(xpcie, TO_DEVICE, DATA_RECEIVED, 0);
+               if (xpcie->tx_pending)
+                       intel_xpcie_start_tx(xpcie, 0);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int intel_xpcie_events_init(struct xpcie *xpcie)
+{
+       xpcie->rx_wq = alloc_ordered_workqueue(XPCIE_DRIVER_NAME,
+                                              WQ_MEM_RECLAIM | WQ_HIGHPRI);
+       if (!xpcie->rx_wq) {
+               dev_err(xpcie_to_dev(xpcie), "failed to allocate workqueue\n");
+               return -ENOMEM;
+       }
+
+       if (!xpcie->legacy_a0) {
+               xpcie->tx_wq = alloc_ordered_workqueue(XPCIE_DRIVER_NAME,
+                                                      WQ_MEM_RECLAIM |
+                                                      WQ_HIGHPRI);
+               if (!xpcie->tx_wq) {
+                       dev_err(xpcie_to_dev(xpcie),
+                               "failed to allocate workqueue\n");
+                       destroy_workqueue(xpcie->rx_wq);
+                       return -ENOMEM;
+               }
+       }
+
+       INIT_DELAYED_WORK(&xpcie->rx_event, intel_xpcie_rx_event_handler);
+       INIT_DELAYED_WORK(&xpcie->tx_event, intel_xpcie_tx_event_handler);
+
+       return 0;
+}
+
+static void intel_xpcie_events_cleanup(struct xpcie *xpcie)
+{
+       cancel_delayed_work_sync(&xpcie->rx_event);
+       cancel_delayed_work_sync(&xpcie->tx_event);
+
+       destroy_workqueue(xpcie->rx_wq);
+       if (!xpcie->legacy_a0)
+               destroy_workqueue(xpcie->tx_wq);
+}
+
+int intel_xpcie_core_init(struct xpcie *xpcie)
+{
+       int error;
+
+       global_xpcie = xpcie;
+
+       intel_xpcie_set_cap_txrx(xpcie);
+
+       error = intel_xpcie_events_init(xpcie);
+       if (error)
+               return error;
+
+       error = intel_xpcie_discover_txrx(xpcie);
+       if (error)
+               goto error_txrx;
+
+       intel_xpcie_interfaces_init(xpcie);
+
+       intel_xpcie_set_doorbell(xpcie, TO_DEVICE, DATA_SENT, 0);
+       intel_xpcie_set_doorbell(xpcie, TO_DEVICE, DATA_RECEIVED, 0);
+       intel_xpcie_set_doorbell(xpcie, TO_DEVICE, DEV_EVENT, NO_OP);
+       intel_xpcie_set_doorbell(xpcie, FROM_DEVICE, DATA_SENT, 0);
+       intel_xpcie_set_doorbell(xpcie, FROM_DEVICE, DATA_RECEIVED, 0);
+       intel_xpcie_set_doorbell(xpcie, FROM_DEVICE, DEV_EVENT, NO_OP);
+
+       intel_xpcie_register_host_irq(xpcie, intel_xpcie_core_irq_cb);
+
+       return 0;
+
+error_txrx:
+       intel_xpcie_events_cleanup(xpcie);
+
+       return error;
+}
+
+void intel_xpcie_core_cleanup(struct xpcie *xpcie)
+{
+       if (xpcie->status == XPCIE_STATUS_RUN) {
+               intel_xpcie_events_cleanup(xpcie);
+               intel_xpcie_interfaces_cleanup(xpcie);
+               intel_xpcie_txrx_cleanup(xpcie);
+       }
+}
+
+int intel_xpcie_core_read(struct xpcie *xpcie, void *buffer,
+                         size_t *length, u32 timeout_ms)
+{
+       long jiffies_timeout = (long)msecs_to_jiffies(timeout_ms);
+       struct xpcie_interface *inf = &xpcie->interfaces[0];
+       unsigned long jiffies_start = jiffies;
+       struct xpcie_buf_desc *bd;
+       long jiffies_passed = 0;
+       size_t len, remaining;
+       int ret;
+
+       if (*length == 0)
+               return -EINVAL;
+
+       if (xpcie->status != XPCIE_STATUS_RUN)
+               return -ENODEV;
+
+       len = *length;
+       remaining = len;
+       *length = 0;
+
+       ret = mutex_lock_interruptible(&inf->rlock);
+       if (ret < 0)
+               return -EINTR;
+
+       do {
+               while (!inf->data_avail) {
+                       mutex_unlock(&inf->rlock);
+                       if (timeout_ms == 0) {
+                               ret =
+                               wait_event_interruptible(inf->rx_waitq,
+                                                        inf->data_avail);
+                       } else {
+                               ret =
+                       wait_event_interruptible_timeout(inf->rx_waitq,
+                                                        inf->data_avail,
+                                                        jiffies_timeout -
+                                                         jiffies_passed);
+                               if (ret == 0)
+                                       return -ETIME;
+                       }
+                       if (ret < 0 || xpcie->stop_flag)
+                               return -EINTR;
+
+                       ret = mutex_lock_interruptible(&inf->rlock);
+                       if (ret < 0)
+                               return -EINTR;
+               }
+
+               bd = (inf->partial_read) ? inf->partial_read :
+                                          intel_xpcie_list_get(&inf->read);
+
+               while (remaining && bd) {
+                       size_t bcopy;
+
+                       bcopy = min(remaining, bd->length);
+                       memcpy(buffer, bd->data, bcopy);
+
+                       buffer += bcopy;
+                       remaining -= bcopy;
+                       bd->data += bcopy;
+                       bd->length -= bcopy;
+
+                       if (bd->length == 0) {
+                               intel_xpcie_free_rx_bd(xpcie, bd);
+                               bd = intel_xpcie_list_get(&inf->read);
+                       }
+               }
+
+               /* save for next time */
+               inf->partial_read = bd;
+
+               if (!bd)
+                       inf->data_avail = false;
+
+               *length = len - remaining;
+
+               jiffies_passed = (long)jiffies - (long)jiffies_start;
+       } while (remaining > 0 && (jiffies_passed < jiffies_timeout ||
+                                  timeout_ms == 0));
+
+       mutex_unlock(&inf->rlock);
+
+       return 0;
+}
+
+int intel_xpcie_core_write(struct xpcie *xpcie, void *buffer,
+                          size_t *length, u32 timeout_ms)
+{
+       long jiffies_timeout = (long)msecs_to_jiffies(timeout_ms);
+       struct xpcie_interface *inf = &xpcie->interfaces[0];
+       unsigned long jiffies_start = jiffies;
+       struct xpcie_buf_desc *bd, *head;
+       long jiffies_passed = 0;
+       size_t remaining, len;
+       int ret;
+
+       if (*length == 0)
+               return -EINVAL;
+
+       if (xpcie->status != XPCIE_STATUS_RUN)
+               return -ENODEV;
+
+       if (intel_xpcie_get_host_status(xpcie) != XPCIE_STATUS_RUN)
+               return -ENODEV;
+
+       len = *length;
+       remaining = len;
+       *length = 0;
+
+       ret = mutex_lock_interruptible(&xpcie->wlock);
+       if (ret < 0)
+               return -EINTR;
+
+       do {
+               bd = intel_xpcie_alloc_tx_bd(xpcie);
+               head = bd;
+               while (!head) {
+                       mutex_unlock(&xpcie->wlock);
+                       if (timeout_ms == 0) {
+                               ret =
+                               wait_event_interruptible(xpcie->tx_waitq,
+                                                        !xpcie->no_tx_buffer);
+                       } else {
+                               ret =
+                       wait_event_interruptible_timeout(xpcie->tx_waitq,
+                                                        !xpcie->no_tx_buffer,
+                                                        jiffies_timeout -
+                                                         jiffies_passed);
+                               if (ret == 0)
+                                       return -ETIME;
+                       }
+                       if (ret < 0 || xpcie->stop_flag)
+                               return -EINTR;
+
+                       ret = mutex_lock_interruptible(&xpcie->wlock);
+                       if (ret < 0)
+                               return -EINTR;
+
+                       bd = intel_xpcie_alloc_tx_bd(xpcie);
+                       head = bd;
+               }
+
+               while (remaining && bd) {
+                       size_t bcopy;
+
+                       bcopy = min(bd->length, remaining);
+                       memcpy(bd->data, buffer, bcopy);
+
+                       buffer += bcopy;
+                       remaining -= bcopy;
+                       bd->length = bcopy;
+                       bd->interface = inf->id;
+
+                       if (remaining) {
+                               bd->next = intel_xpcie_alloc_tx_bd(xpcie);
+                               bd = bd->next;
+                       }
+               }
+
+               intel_xpcie_list_put(&inf->xpcie->write, head);
+               intel_xpcie_start_tx(xpcie, 0);
+
+               *length = len - remaining;
+
+               jiffies_passed = (long)jiffies - (long)jiffies_start;
+       } while (remaining > 0 && (jiffies_passed < jiffies_timeout ||
+                                  timeout_ms == 0));
+
+       mutex_unlock(&xpcie->wlock);
+
+       return 0;
+}
+
+int intel_xpcie_get_device_status_by_id(u32 id, u32 *status)
+{
+       struct xpcie *xpcie = intel_xpcie_core_get_by_id(id);
+
+       if (!xpcie)
+               return -ENODEV;
+
+       *status = xpcie->status;
+
+       return 0;
+}
+
+u32 intel_xpcie_get_device_num(u32 *id_list)
+{
+       u32 num_devices = 0;
+
+       if (xlink_sw_id) {
+               num_devices = 1;
+               *id_list = xlink_sw_id;
+       }
+
+       return num_devices;
+}
+
+int intel_xpcie_get_device_name_by_id(u32 id,
+                                     char *device_name, size_t name_size)
+{
+       struct xpcie *xpcie;
+
+       xpcie = intel_xpcie_core_get_by_id(id);
+       if (!xpcie)
+               return -ENODEV;
+
+       memset(device_name, 0, name_size);
+       if (name_size > strlen(XPCIE_DRIVER_NAME))
+               name_size = strlen(XPCIE_DRIVER_NAME);
+       memcpy(device_name, XPCIE_DRIVER_NAME, name_size);
+
+       return 0;
+}
+
+int intel_xpcie_pci_connect_device(u32 id)
+{
+       struct xpcie *xpcie;
+
+       xpcie = intel_xpcie_core_get_by_id(id);
+       if (!xpcie)
+               return -ENODEV;
+
+       if (xpcie->status != XPCIE_STATUS_RUN)
+               return -EIO;
+
+       return 0;
+}
+
+int intel_xpcie_pci_read(u32 id, void *data, size_t *size, u32 timeout)
+{
+       struct xpcie *xpcie;
+
+       xpcie = intel_xpcie_core_get_by_id(id);
+       if (!xpcie)
+               return -ENODEV;
+
+       return intel_xpcie_core_read(xpcie, data, size, timeout);
+}
+
+int intel_xpcie_pci_write(u32 id, void *data, size_t *size, u32 timeout)
+{
+       struct xpcie *xpcie;
+
+       xpcie = intel_xpcie_core_get_by_id(id);
+       if (!xpcie)
+               return -ENODEV;
+
+       return intel_xpcie_core_write(xpcie, data, size, timeout);
+}
+
+int intel_xpcie_pci_reset_device(u32 id)
+{
+       return 0;
+}
diff --git a/drivers/misc/xlink-pcie/local_host/core.h 
b/drivers/misc/xlink-pcie/local_host/core.h
new file mode 100644
index 000000000000..84985ef41a64
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/core.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*****************************************************************************
+ *
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ *
+ ****************************************************************************/
+
+#ifndef XPCIE_CORE_HEADER_
+#define XPCIE_CORE_HEADER_
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/mempool.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/wait.h>
+
+#include <linux/xlink_drv_inf.h>
+
+/* Number of interfaces to statically allocate resources for */
+#define XPCIE_NUM_INTERFACES (1)
+
+/* max should be always power of '2' */
+#define XPCIE_CIRCULAR_INC(val, max) (((val) + 1) & ((max) - 1))
+
+#define XPCIE_FRAGMENT_SIZE    SZ_128K
+
+/* Status encoding of the transfer descriptors */
+#define XPCIE_DESC_STATUS_SUCCESS      (0)
+#define XPCIE_DESC_STATUS_ERROR                (0xFFFF)
+
+/* Layout transfer descriptors used by device and host */
+struct xpcie_transfer_desc {
+       u64 address;
+       u32 length;
+       u16 status;
+       u16 interface;
+} __packed;
+
+struct xpcie_pipe {
+       u32 old;
+       u32 ndesc;
+       u32 *head;
+       u32 *tail;
+       struct xpcie_transfer_desc *tdr;
+};
+
+struct xpcie_buf_desc {
+       struct xpcie_buf_desc *next;
+       void *head;
+       dma_addr_t phys;
+       size_t true_len;
+       void *data;
+       size_t length;
+       int interface;
+       bool own_mem;
+};
+
+struct xpcie_stream {
+       size_t frag;
+       struct xpcie_pipe pipe;
+};
+
+struct xpcie_list {
+       spinlock_t lock; /* list lock */
+       size_t bytes;
+       size_t buffers;
+       struct xpcie_buf_desc *head;
+       struct xpcie_buf_desc *tail;
+};
+
+struct xpcie_interface {
+       int id;
+       struct xpcie *xpcie;
+       struct mutex rlock; /* read lock */
+       struct xpcie_list read;
+       struct xpcie_buf_desc *partial_read;
+       bool data_avail;
+       wait_queue_head_t rx_waitq;
+};
+
+struct xpcie_debug_stats {
+       struct {
+               size_t cnts;
+               size_t bytes;
+       } tx_krn, rx_krn, tx_usr, rx_usr;
+       size_t send_ints;
+       size_t interrupts;
+       size_t rx_event_runs;
+       size_t tx_event_runs;
+};
+
+/* Defined capabilities located in mmio space */
+#define XPCIE_CAP_NULL (0)
+#define XPCIE_CAP_TXRX (1)
+
+#define XPCIE_CAP_TTL (32)
+#define XPCIE_CAP_HDR_ID       (offsetof(struct xpcie_cap_hdr, id))
+#define XPCIE_CAP_HDR_NEXT     (offsetof(struct xpcie_cap_hdr, next))
+
+/* Header at the beginning of each capability to define and link to next */
+struct xpcie_cap_hdr {
+       u16 id;
+       u16 next;
+} __packed;
+
+struct xpcie_cap_pipe {
+       u32 ring;
+       u32 ndesc;
+       u32 head;
+       u32 tail;
+} __packed;
+
+/* Transmit and Receive capability */
+struct xpcie_cap_txrx {
+       struct xpcie_cap_hdr hdr;
+       u32 fragment_size;
+       struct xpcie_cap_pipe tx;
+       struct xpcie_cap_pipe rx;
+} __packed;
+
+static inline u64 _ioread64(void __iomem *addr)
+{
+       u64 low, high;
+
+       low = ioread32(addr);
+       high = ioread32(addr + sizeof(u32));
+
+       return low | (high << 32);
+}
+
+static inline void _iowrite64(u64 value, void __iomem *addr)
+{
+       iowrite32(value, addr);
+       iowrite32(value >> 32, addr + sizeof(u32));
+}
+
+#define intel_xpcie_iowrite64(value, addr) \
+                       _iowrite64(value, (void __iomem *)addr)
+#define intel_xpcie_iowrite32(value, addr) \
+                       iowrite32(value, (void __iomem *)addr)
+#define intel_xpcie_iowrite16(value, addr) \
+                       iowrite16(value, (void __iomem *)addr)
+#define intel_xpcie_iowrite8(value, addr) \
+                       iowrite8(value, (void __iomem *)addr)
+#define intel_xpcie_ioread64(addr) \
+                       _ioread64((void __iomem *)addr)
+#define intel_xpcie_ioread32(addr) \
+                       ioread32((void __iomem *)addr)
+#define intel_xpcie_ioread16(addr) \
+                       ioread16((void __iomem *)addr)
+#define intel_xpcie_ioread8(addr) \
+                       ioread8((void __iomem *)addr)
+
+static inline
+void intel_xpcie_set_td_address(struct xpcie_transfer_desc *td, u64 address)
+{
+       intel_xpcie_iowrite64(address, &td->address);
+}
+
+static inline
+u64 intel_xpcie_get_td_address(struct xpcie_transfer_desc *td)
+{
+       return intel_xpcie_ioread64(&td->address);
+}
+
+static inline
+void intel_xpcie_set_td_length(struct xpcie_transfer_desc *td, u32 length)
+{
+       intel_xpcie_iowrite32(length, &td->length);
+}
+
+static inline
+u32 intel_xpcie_get_td_length(struct xpcie_transfer_desc *td)
+{
+       return intel_xpcie_ioread32(&td->length);
+}
+
+static inline
+void intel_xpcie_set_td_interface(struct xpcie_transfer_desc *td, u16 
interface)
+{
+       intel_xpcie_iowrite16(interface, &td->interface);
+}
+
+static inline
+u16 intel_xpcie_get_td_interface(struct xpcie_transfer_desc *td)
+{
+       return intel_xpcie_ioread16(&td->interface);
+}
+
+static inline
+void intel_xpcie_set_td_status(struct xpcie_transfer_desc *td, u16 status)
+{
+       intel_xpcie_iowrite16(status, &td->status);
+}
+
+static inline
+u16 intel_xpcie_get_td_status(struct xpcie_transfer_desc *td)
+{
+       return intel_xpcie_ioread16(&td->status);
+}
+
+static inline
+void intel_xpcie_set_tdr_head(struct xpcie_pipe *p, u32 head)
+{
+       intel_xpcie_iowrite32(head, p->head);
+}
+
+static inline
+u32 intel_xpcie_get_tdr_head(struct xpcie_pipe *p)
+{
+       return intel_xpcie_ioread32(p->head);
+}
+
+static inline
+void intel_xpcie_set_tdr_tail(struct xpcie_pipe *p, u32 tail)
+{
+       intel_xpcie_iowrite32(tail, p->tail);
+}
+
+static inline
+u32 intel_xpcie_get_tdr_tail(struct xpcie_pipe *p)
+{
+       return intel_xpcie_ioread32(p->tail);
+}
+
+int intel_xpcie_core_init(struct xpcie *xpcie);
+void intel_xpcie_core_cleanup(struct xpcie *xpcie);
+int intel_xpcie_core_read(struct xpcie *xpcie, void *buffer, size_t *length,
+                         u32 timeout_ms);
+int intel_xpcie_core_write(struct xpcie *xpcie, void *buffer, size_t *length,
+                          u32 timeout_ms);
+u32 intel_xpcie_get_device_num(u32 *id_list);
+struct xpcie_dev *intel_xpcie_get_device_by_id(u32 id);
+int intel_xpcie_get_device_name_by_id(u32 id, char *device_name,
+                                     size_t name_size);
+int intel_xpcie_get_device_status_by_id(u32 id, u32 *status);
+int intel_xpcie_pci_connect_device(u32 id);
+int intel_xpcie_pci_read(u32 id, void *data, size_t *size, u32 timeout);
+int intel_xpcie_pci_write(u32 id, void *data, size_t *size, u32 timeout);
+int intel_xpcie_pci_reset_device(u32 id);
+#endif /* XPCIE_CORE_HEADER_ */
diff --git a/drivers/misc/xlink-pcie/local_host/epf.c 
b/drivers/misc/xlink-pcie/local_host/epf.c
index 7019aecd6a81..7fceed9d2a4f 100644
--- a/drivers/misc/xlink-pcie/local_host/epf.c
+++ b/drivers/misc/xlink-pcie/local_host/epf.c
@@ -7,6 +7,7 @@
 
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 
 #include "epf.h"
 
@@ -19,6 +20,12 @@
 #define PCIE_REGS_PCIE_ERR_INTR_FLAGS  0x24
 #define LINK_REQ_RST_FLG               BIT(15)
 
+#define PCIE_REGS_PCIE_SYS_CFG_CORE    0x7C
+#define PCIE_CFG_PBUS_NUM_OFFSET       8
+#define PCIE_CFG_PBUS_NUM_MASK         0xFF
+#define PCIE_CFG_PBUS_DEV_NUM_OFFSET   16
+#define PCIE_CFG_PBUS_DEV_NUM_MASK     0x1F
+
 static struct pci_epf_header xpcie_header = {
        .vendorid = PCI_VENDOR_ID_INTEL,
        .deviceid = PCI_DEVICE_ID_INTEL_KEEMBAY,
@@ -35,6 +42,45 @@ static const struct pci_epf_device_id xpcie_epf_ids[] = {
        {},
 };
 
+u32 xlink_sw_id;
+
+int intel_xpcie_copy_from_host_ll(struct xpcie *xpcie, int chan, int descs_num)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct pci_epf *epf = xpcie_epf->epf;
+
+       return intel_xpcie_ep_dma_read_ll(epf, chan, descs_num);
+}
+
+int intel_xpcie_copy_to_host_ll(struct xpcie *xpcie, int chan, int descs_num)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct pci_epf *epf = xpcie_epf->epf;
+
+       return intel_xpcie_ep_dma_write_ll(epf, chan, descs_num);
+}
+
+void intel_xpcie_register_host_irq(struct xpcie *xpcie, irq_handler_t func)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+
+       xpcie_epf->core_irq_callback = func;
+}
+
+int intel_xpcie_raise_irq(struct xpcie *xpcie, enum xpcie_doorbell_type type)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+       struct pci_epf *epf = xpcie_epf->epf;
+
+       intel_xpcie_set_doorbell(xpcie, FROM_DEVICE, type, 1);
+
+       return pci_epc_raise_irq(epf->epc, epf->func_no, PCI_EPC_IRQ_MSI, 1);
+}
+
 static irqreturn_t intel_xpcie_err_interrupt(int irq, void *args)
 {
        struct xpcie_epf *xpcie_epf;
@@ -55,6 +101,7 @@ static irqreturn_t intel_xpcie_host_interrupt(int irq, void 
*args)
 {
        struct xpcie_epf *xpcie_epf;
        struct xpcie *xpcie = args;
+       u8 event;
        u32 val;
 
        xpcie_epf = container_of(xpcie, struct xpcie_epf, xpcie);
@@ -62,6 +109,18 @@ static irqreturn_t intel_xpcie_host_interrupt(int irq, void 
*args)
        if (val & LBC_CII_EVENT_FLAG) {
                iowrite32(LBC_CII_EVENT_FLAG,
                          xpcie_epf->apb_base + PCIE_REGS_PCIE_INTR_FLAGS);
+
+               event = intel_xpcie_get_doorbell(xpcie, TO_DEVICE, DEV_EVENT);
+               if (unlikely(event != NO_OP)) {
+                       intel_xpcie_set_doorbell(xpcie, TO_DEVICE,
+                                                DEV_EVENT, NO_OP);
+                       if (event == REQUEST_RESET)
+                               orderly_reboot();
+                       return IRQ_HANDLED;
+               }
+
+               if (likely(xpcie_epf->core_irq_callback))
+                       xpcie_epf->core_irq_callback(irq, xpcie);
        }
 
        return IRQ_HANDLED;
@@ -231,6 +290,7 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
        struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
        const struct pci_epc_features *features;
        struct pci_epc *epc = epf->epc;
+       u32 bus_num, dev_num;
        struct device *dev;
        size_t align = SZ_16K;
        int ret;
@@ -260,12 +320,12 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
 
        if (!strcmp(xpcie_epf->stepping, "A0")) {
                xpcie_epf->xpcie.legacy_a0 = true;
-               iowrite32(1, (void __iomem *)xpcie_epf->xpcie.mmio +
-                            XPCIE_MMIO_LEGACY_A0);
+               intel_xpcie_iowrite32(1, xpcie_epf->xpcie.mmio +
+                                        XPCIE_MMIO_LEGACY_A0);
        } else {
                xpcie_epf->xpcie.legacy_a0 = false;
-               iowrite32(0, (void __iomem *)xpcie_epf->xpcie.mmio +
-                            XPCIE_MMIO_LEGACY_A0);
+               intel_xpcie_iowrite32(0, xpcie_epf->xpcie.mmio +
+                                        XPCIE_MMIO_LEGACY_A0);
        }
 
        /* Enable interrupt */
@@ -290,13 +350,46 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
        ret = intel_xpcie_ep_dma_init(epf);
        if (ret) {
                dev_err(&epf->dev, "DMA initialization failed\n");
-               goto err_free_err_irq;
+               goto err_cleanup_bars;
        }
 
+       intel_xpcie_set_device_status(&xpcie_epf->xpcie, XPCIE_STATUS_READY);
+
+       ret = ioread32(xpcie_epf->apb_base + PCIE_REGS_PCIE_SYS_CFG_CORE);
+       bus_num = (ret >> PCIE_CFG_PBUS_NUM_OFFSET) & PCIE_CFG_PBUS_NUM_MASK;
+       dev_num = (ret >> PCIE_CFG_PBUS_DEV_NUM_OFFSET) &
+                       PCIE_CFG_PBUS_DEV_NUM_MASK;
+
+       xlink_sw_id = FIELD_PREP(XLINK_DEV_INF_TYPE_MASK,
+                                XLINK_DEV_INF_PCIE) |
+                     FIELD_PREP(XLINK_DEV_PHYS_ID_MASK,
+                                bus_num << 8 | dev_num) |
+                     FIELD_PREP(XLINK_DEV_TYPE_MASK, XLINK_DEV_TYPE_KMB) |
+                     FIELD_PREP(XLINK_DEV_PCIE_ID_MASK, XLINK_DEV_PCIE_0) |
+                     FIELD_PREP(XLINK_DEV_FUNC_MASK, XLINK_DEV_FUNC_VPU);
+
+       ret = intel_xpcie_core_init(&xpcie_epf->xpcie);
+       if (ret) {
+               dev_err(&epf->dev, "Core component configuration failed\n");
+               goto err_uninit_dma;
+       }
+
+       intel_xpcie_iowrite32(XPCIE_STATUS_UNINIT,
+                             xpcie_epf->xpcie.mmio + XPCIE_MMIO_HOST_STATUS);
+       intel_xpcie_set_device_status(&xpcie_epf->xpcie, XPCIE_STATUS_RUN);
+       intel_xpcie_set_doorbell(&xpcie_epf->xpcie, FROM_DEVICE,
+                                DEV_EVENT, NO_OP);
+       memcpy(xpcie_epf->xpcie.mmio + XPCIE_MMIO_MAGIC_OFF, XPCIE_MAGIC_YOCTO,
+              strlen(XPCIE_MAGIC_YOCTO));
+
        return 0;
 
-err_free_err_irq:
-       free_irq(xpcie_epf->irq_err, &xpcie_epf->xpcie);
+err_uninit_dma:
+       intel_xpcie_set_device_status(&xpcie_epf->xpcie, XPCIE_STATUS_ERROR);
+       memcpy(xpcie_epf->xpcie.mmio + XPCIE_MMIO_MAGIC_OFF, XPCIE_MAGIC_YOCTO,
+              strlen(XPCIE_MAGIC_YOCTO));
+
+       intel_xpcie_ep_dma_uninit(epf);
 
 err_cleanup_bars:
        intel_xpcie_cleanup_bars(epf);
@@ -306,8 +399,12 @@ static int intel_xpcie_epf_bind(struct pci_epf *epf)
 
 static void intel_xpcie_epf_unbind(struct pci_epf *epf)
 {
+       struct xpcie_epf *xpcie_epf = epf_get_drvdata(epf);
        struct pci_epc *epc = epf->epc;
 
+       intel_xpcie_core_cleanup(&xpcie_epf->xpcie);
+       intel_xpcie_set_device_status(&xpcie_epf->xpcie, XPCIE_STATUS_READY);
+
        intel_xpcie_ep_dma_uninit(epf);
 
        pci_epc_stop(epc);
@@ -339,8 +436,11 @@ static void intel_xpcie_epf_shutdown(struct device *dev)
        xpcie_epf = epf_get_drvdata(epf);
 
        /* Notify host in case PCIe hot plug not supported */
-       if (xpcie_epf)
+       if (xpcie_epf && xpcie_epf->xpcie.status == XPCIE_STATUS_RUN) {
+               intel_xpcie_set_doorbell(&xpcie_epf->xpcie, FROM_DEVICE,
+                                        DEV_EVENT, DEV_SHUTDOWN);
                pci_epc_raise_irq(epf->epc, epf->func_no, PCI_EPC_IRQ_MSI, 1);
+       }
 }
 
 static struct pci_epf_ops ops = {
diff --git a/drivers/misc/xlink-pcie/local_host/epf.h 
b/drivers/misc/xlink-pcie/local_host/epf.h
index 82410404d54e..7220cead0973 100644
--- a/drivers/misc/xlink-pcie/local_host/epf.h
+++ b/drivers/misc/xlink-pcie/local_host/epf.h
@@ -12,6 +12,7 @@
 #include <linux/pci-epf.h>
 
 #include "xpcie.h"
+#include "util.h"
 
 #define XPCIE_DRIVER_NAME "mxlk_pcie_epf"
 #define XPCIE_DRIVER_DESC "Intel(R) xLink PCIe endpoint function driver"
@@ -24,6 +25,7 @@
 #define XPCIE_NUM_RX_DESCS     (64)
 
 extern bool dma_ll_mode;
+extern u32 xlink_sw_id;
 
 struct xpcie_dma_ll_desc {
        u32 dma_ch_control1;
@@ -65,14 +67,35 @@ struct xpcie_epf {
        void __iomem *dbi_base;
        char stepping[KEEMBAY_XPCIE_STEPPING_MAXLEN];
 
+       irq_handler_t                   core_irq_callback;
+       dma_addr_t                      tx_phys;
+       void                            *tx_virt;
+       size_t                          tx_size;
+
        struct xpcie_dma_ll_desc_buf    tx_desc_buf[DMA_CHAN_NUM];
        struct xpcie_dma_ll_desc_buf    rx_desc_buf[DMA_CHAN_NUM];
 };
 
+static inline struct device *xpcie_to_dev(struct xpcie *xpcie)
+{
+       struct xpcie_epf *xpcie_epf = container_of(xpcie,
+                                                  struct xpcie_epf, xpcie);
+
+       return &xpcie_epf->epf->dev;
+}
+
 int intel_xpcie_ep_dma_init(struct pci_epf *epf);
 int intel_xpcie_ep_dma_uninit(struct pci_epf *epf);
 int intel_xpcie_ep_dma_reset(struct pci_epf *epf);
 int intel_xpcie_ep_dma_read_ll(struct pci_epf *epf, int chan, int descs_num);
 int intel_xpcie_ep_dma_write_ll(struct pci_epf *epf, int chan, int descs_num);
 
+void intel_xpcie_register_host_irq(struct xpcie *xpcie,
+                                  irq_handler_t func);
+int intel_xpcie_raise_irq(struct xpcie *xpcie,
+                         enum xpcie_doorbell_type type);
+int intel_xpcie_copy_from_host_ll(struct xpcie *xpcie,
+                                 int chan, int descs_num);
+int intel_xpcie_copy_to_host_ll(struct xpcie *xpcie,
+                               int chan, int descs_num);
 #endif /* XPCIE_EPF_HEADER_ */
diff --git a/drivers/misc/xlink-pcie/local_host/util.c 
b/drivers/misc/xlink-pcie/local_host/util.c
new file mode 100644
index 000000000000..ec808b0cd72b
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/util.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*****************************************************************************
+ *
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ *
+ ****************************************************************************/
+
+#include "util.h"
+
+void intel_xpcie_set_device_status(struct xpcie *xpcie, u32 status)
+{
+       xpcie->status = status;
+       intel_xpcie_iowrite32(status, xpcie->mmio + XPCIE_MMIO_DEV_STATUS);
+}
+
+u32 intel_xpcie_get_device_status(struct xpcie *xpcie)
+{
+       return intel_xpcie_ioread32(xpcie->mmio + XPCIE_MMIO_DEV_STATUS);
+}
+
+static size_t intel_xpcie_doorbell_offset(struct xpcie *xpcie,
+                                         enum xpcie_doorbell_direction dirt,
+                                         enum xpcie_doorbell_type type)
+{
+       if (dirt == TO_DEVICE && type == DATA_SENT)
+               return XPCIE_MMIO_HTOD_TX_DOORBELL;
+       if (dirt == TO_DEVICE && type == DATA_RECEIVED)
+               return XPCIE_MMIO_HTOD_RX_DOORBELL;
+       if (dirt == TO_DEVICE && type == DEV_EVENT)
+               return XPCIE_MMIO_HTOD_EVENT_DOORBELL;
+       if (dirt == FROM_DEVICE && type == DATA_SENT)
+               return XPCIE_MMIO_DTOH_TX_DOORBELL;
+       if (dirt == FROM_DEVICE && type == DATA_RECEIVED)
+               return XPCIE_MMIO_DTOH_RX_DOORBELL;
+       if (dirt == FROM_DEVICE && type == DEV_EVENT)
+               return XPCIE_MMIO_DTOH_EVENT_DOORBELL;
+
+       return 0;
+}
+
+void intel_xpcie_set_doorbell(struct xpcie *xpcie,
+                             enum xpcie_doorbell_direction dirt,
+                             enum xpcie_doorbell_type type, u8 value)
+{
+       size_t offset = intel_xpcie_doorbell_offset(xpcie, dirt, type);
+
+       intel_xpcie_iowrite8(value, xpcie->mmio + offset);
+}
+
+u8 intel_xpcie_get_doorbell(struct xpcie *xpcie,
+                           enum xpcie_doorbell_direction dirt,
+                           enum xpcie_doorbell_type type)
+{
+       size_t offset = intel_xpcie_doorbell_offset(xpcie, dirt, type);
+
+       return intel_xpcie_ioread8(xpcie->mmio + offset);
+}
+
+u32 intel_xpcie_get_host_status(struct xpcie *xpcie)
+{
+       return intel_xpcie_ioread32(xpcie->mmio + XPCIE_MMIO_HOST_STATUS);
+}
+
+void intel_xpcie_set_host_status(struct xpcie *xpcie, u32 status)
+{
+       xpcie->status = status;
+       intel_xpcie_iowrite32(status, xpcie->mmio + XPCIE_MMIO_HOST_STATUS);
+}
+
+struct xpcie_buf_desc *intel_xpcie_alloc_bd(size_t length)
+{
+       struct xpcie_buf_desc *bd;
+
+       bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+       if (!bd)
+               return NULL;
+
+       bd->head = kzalloc(roundup(length, cache_line_size()), GFP_KERNEL);
+       if (!bd->head) {
+               kfree(bd);
+               return NULL;
+       }
+
+       bd->data = bd->head;
+       bd->length = length;
+       bd->true_len = length;
+       bd->next = NULL;
+       bd->own_mem = true;
+
+       return bd;
+}
+
+struct xpcie_buf_desc *intel_xpcie_alloc_bd_reuse(size_t length, void *virt,
+                                                 dma_addr_t phys)
+{
+       struct xpcie_buf_desc *bd;
+
+       bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+       if (!bd)
+               return NULL;
+
+       bd->head = virt;
+       bd->phys = phys;
+       bd->data = bd->head;
+       bd->length = length;
+       bd->true_len = length;
+       bd->next = NULL;
+       bd->own_mem = false;
+
+       return bd;
+}
+
+void intel_xpcie_free_bd(struct xpcie_buf_desc *bd)
+{
+       if (bd) {
+               if (bd->own_mem)
+                       kfree(bd->head);
+               kfree(bd);
+       }
+}
+
+int intel_xpcie_list_init(struct xpcie_list *list)
+{
+       spin_lock_init(&list->lock);
+       list->bytes = 0;
+       list->buffers = 0;
+       list->head = NULL;
+       list->tail = NULL;
+
+       return 0;
+}
+
+void intel_xpcie_list_cleanup(struct xpcie_list *list)
+{
+       struct xpcie_buf_desc *bd;
+
+       spin_lock(&list->lock);
+       while (list->head) {
+               bd = list->head;
+               list->head = bd->next;
+               intel_xpcie_free_bd(bd);
+       }
+
+       list->head = NULL;
+       list->tail = NULL;
+       spin_unlock(&list->lock);
+}
+
+int intel_xpcie_list_put(struct xpcie_list *list, struct xpcie_buf_desc *bd)
+{
+       if (!bd)
+               return -EINVAL;
+
+       spin_lock(&list->lock);
+       if (list->head)
+               list->tail->next = bd;
+       else
+               list->head = bd;
+
+       while (bd) {
+               list->tail = bd;
+               list->bytes += bd->length;
+               list->buffers++;
+               bd = bd->next;
+       }
+       spin_unlock(&list->lock);
+
+       return 0;
+}
+
+int intel_xpcie_list_put_head(struct xpcie_list *list,
+                             struct xpcie_buf_desc *bd)
+{
+       struct xpcie_buf_desc *old_head;
+
+       if (!bd)
+               return -EINVAL;
+
+       spin_lock(&list->lock);
+       old_head = list->head;
+       list->head = bd;
+       while (bd) {
+               list->bytes += bd->length;
+               list->buffers++;
+               if (!bd->next) {
+                       list->tail = list->tail ? list->tail : bd;
+                       bd->next = old_head;
+                       break;
+               }
+               bd = bd->next;
+       }
+       spin_unlock(&list->lock);
+
+       return 0;
+}
+
+struct xpcie_buf_desc *intel_xpcie_list_get(struct xpcie_list *list)
+{
+       struct xpcie_buf_desc *bd;
+
+       spin_lock(&list->lock);
+       bd = list->head;
+       if (list->head) {
+               list->head = list->head->next;
+               if (!list->head)
+                       list->tail = NULL;
+               bd->next = NULL;
+               list->bytes -= bd->length;
+               list->buffers--;
+       }
+       spin_unlock(&list->lock);
+
+       return bd;
+}
+
+void intel_xpcie_list_info(struct xpcie_list *list,
+                          size_t *bytes, size_t *buffers)
+{
+       spin_lock(&list->lock);
+       *bytes = list->bytes;
+       *buffers = list->buffers;
+       spin_unlock(&list->lock);
+}
+
+struct xpcie_buf_desc *intel_xpcie_alloc_rx_bd(struct xpcie *xpcie)
+{
+       struct xpcie_buf_desc *bd;
+
+       bd = intel_xpcie_list_get(&xpcie->rx_pool);
+       if (bd) {
+               bd->data = bd->head;
+               bd->length = bd->true_len;
+               bd->next = NULL;
+               bd->interface = 0;
+       }
+
+       return bd;
+}
+
+void intel_xpcie_free_rx_bd(struct xpcie *xpcie, struct xpcie_buf_desc *bd)
+{
+       if (bd)
+               intel_xpcie_list_put(&xpcie->rx_pool, bd);
+}
+
+struct xpcie_buf_desc *intel_xpcie_alloc_tx_bd(struct xpcie *xpcie)
+{
+       struct xpcie_buf_desc *bd;
+
+       bd = intel_xpcie_list_get(&xpcie->tx_pool);
+       if (bd) {
+               bd->data = bd->head;
+               bd->length = bd->true_len;
+               bd->next = NULL;
+               bd->interface = 0;
+       } else {
+               xpcie->no_tx_buffer = true;
+       }
+
+       return bd;
+}
+
+void intel_xpcie_free_tx_bd(struct xpcie *xpcie, struct xpcie_buf_desc *bd)
+{
+       if (!bd)
+               return;
+
+       intel_xpcie_list_put(&xpcie->tx_pool, bd);
+
+       xpcie->no_tx_buffer = false;
+       wake_up_interruptible(&xpcie->tx_waitq);
+}
+
+int intel_xpcie_interface_init(struct xpcie *xpcie, int id)
+{
+       struct xpcie_interface *inf = xpcie->interfaces + id;
+
+       inf->id = id;
+       inf->xpcie = xpcie;
+
+       inf->partial_read = NULL;
+       intel_xpcie_list_init(&inf->read);
+       mutex_init(&inf->rlock);
+       inf->data_avail = false;
+       init_waitqueue_head(&inf->rx_waitq);
+
+       return 0;
+}
+
+void intel_xpcie_interface_cleanup(struct xpcie_interface *inf)
+{
+       struct xpcie_buf_desc *bd;
+
+       intel_xpcie_free_rx_bd(inf->xpcie, inf->partial_read);
+       while ((bd = intel_xpcie_list_get(&inf->read)))
+               intel_xpcie_free_rx_bd(inf->xpcie, bd);
+
+       mutex_destroy(&inf->rlock);
+}
+
+void intel_xpcie_interfaces_cleanup(struct xpcie *xpcie)
+{
+       int index;
+
+       for (index = 0; index < XPCIE_NUM_INTERFACES; index++)
+               intel_xpcie_interface_cleanup(xpcie->interfaces + index);
+
+       intel_xpcie_list_cleanup(&xpcie->write);
+       mutex_destroy(&xpcie->wlock);
+}
+
+int intel_xpcie_interfaces_init(struct xpcie *xpcie)
+{
+       int index;
+
+       mutex_init(&xpcie->wlock);
+       intel_xpcie_list_init(&xpcie->write);
+       init_waitqueue_head(&xpcie->tx_waitq);
+       xpcie->no_tx_buffer = false;
+
+       for (index = 0; index < XPCIE_NUM_INTERFACES; index++)
+               intel_xpcie_interface_init(xpcie, index);
+
+       return 0;
+}
+
+void intel_xpcie_add_bd_to_interface(struct xpcie *xpcie,
+                                    struct xpcie_buf_desc *bd)
+{
+       struct xpcie_interface *inf;
+
+       inf = xpcie->interfaces + bd->interface;
+
+       intel_xpcie_list_put(&inf->read, bd);
+
+       mutex_lock(&inf->rlock);
+       inf->data_avail = true;
+       mutex_unlock(&inf->rlock);
+       wake_up_interruptible(&inf->rx_waitq);
+}
+
+void *intel_xpcie_cap_find(struct xpcie *xpcie, u32 start, u16 id)
+{
+       int ttl = XPCIE_CAP_TTL;
+       void *hdr;
+       u16 id_out, next;
+
+       /* If user didn't specify start, assume start of mmio */
+       if (!start)
+               start = intel_xpcie_ioread32(xpcie->mmio + XPCIE_MMIO_CAP_OFF);
+
+       /* Read header info */
+       hdr = xpcie->mmio + start;
+
+       /* Check if we still have time to live */
+       while (ttl--) {
+               id_out = intel_xpcie_ioread16(hdr + XPCIE_CAP_HDR_ID);
+               next = intel_xpcie_ioread16(hdr + XPCIE_CAP_HDR_NEXT);
+
+               /* If cap matches, return header */
+               if (id_out == id)
+                       return hdr;
+               /* If cap is NULL, we are at the end of the list */
+               else if (id_out == XPCIE_CAP_NULL)
+                       return NULL;
+               /* If no match and no end of list, traverse the linked list */
+               else
+                       hdr = xpcie->mmio + next;
+       }
+
+       /* If we reached here, the capability list is corrupted */
+       return NULL;
+}
diff --git a/drivers/misc/xlink-pcie/local_host/util.h 
b/drivers/misc/xlink-pcie/local_host/util.h
new file mode 100644
index 000000000000..908be897a61d
--- /dev/null
+++ b/drivers/misc/xlink-pcie/local_host/util.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*****************************************************************************
+ *
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ *
+ ****************************************************************************/
+
+#ifndef XPCIE_UTIL_HEADER_
+#define XPCIE_UTIL_HEADER_
+
+#include "xpcie.h"
+
+enum xpcie_doorbell_direction {
+       TO_DEVICE,
+       FROM_DEVICE
+};
+
+enum xpcie_doorbell_type {
+       DATA_SENT,
+       DATA_RECEIVED,
+       DEV_EVENT
+};
+
+enum xpcie_event_type {
+       NO_OP,
+       REQUEST_RESET,
+       DEV_SHUTDOWN
+};
+
+void intel_xpcie_set_doorbell(struct xpcie *xpcie,
+                             enum xpcie_doorbell_direction dirt,
+                             enum xpcie_doorbell_type type, u8 value);
+u8 intel_xpcie_get_doorbell(struct xpcie *xpcie,
+                           enum xpcie_doorbell_direction dirt,
+                           enum xpcie_doorbell_type type);
+
+void intel_xpcie_set_device_status(struct xpcie *xpcie, u32 status);
+u32 intel_xpcie_get_device_status(struct xpcie *xpcie);
+u32 intel_xpcie_get_host_status(struct xpcie *xpcie);
+void intel_xpcie_set_host_status(struct xpcie *xpcie, u32 status);
+
+struct xpcie_buf_desc *intel_xpcie_alloc_bd(size_t length);
+struct xpcie_buf_desc *intel_xpcie_alloc_bd_reuse(size_t length, void *virt,
+                                                 dma_addr_t phys);
+void intel_xpcie_free_bd(struct xpcie_buf_desc *bd);
+
+int intel_xpcie_list_init(struct xpcie_list *list);
+void intel_xpcie_list_cleanup(struct xpcie_list *list);
+int intel_xpcie_list_put(struct xpcie_list *list, struct xpcie_buf_desc *bd);
+int intel_xpcie_list_put_head(struct xpcie_list *list,
+                             struct xpcie_buf_desc *bd);
+struct xpcie_buf_desc *intel_xpcie_list_get(struct xpcie_list *list);
+void intel_xpcie_list_info(struct xpcie_list *list, size_t *bytes,
+                          size_t *buffers);
+
+struct xpcie_buf_desc *intel_xpcie_alloc_rx_bd(struct xpcie *xpcie);
+void intel_xpcie_free_rx_bd(struct xpcie *xpcie, struct xpcie_buf_desc *bd);
+struct xpcie_buf_desc *intel_xpcie_alloc_tx_bd(struct xpcie *xpcie);
+void intel_xpcie_free_tx_bd(struct xpcie *xpcie, struct xpcie_buf_desc *bd);
+
+int intel_xpcie_interface_init(struct xpcie *xpcie, int id);
+void intel_xpcie_interface_cleanup(struct xpcie_interface *inf);
+void intel_xpcie_interfaces_cleanup(struct xpcie *xpcie);
+int intel_xpcie_interfaces_init(struct xpcie *xpcie);
+void intel_xpcie_add_bd_to_interface(struct xpcie *xpcie,
+                                    struct xpcie_buf_desc *bd);
+void *intel_xpcie_cap_find(struct xpcie *xpcie, u32 start, u16 id);
+#endif /* XPCIE_UTIL_HEADER_ */
diff --git a/drivers/misc/xlink-pcie/local_host/xpcie.h 
b/drivers/misc/xlink-pcie/local_host/xpcie.h
index 0745e6dfee10..8a559617daba 100644
--- a/drivers/misc/xlink-pcie/local_host/xpcie.h
+++ b/drivers/misc/xlink-pcie/local_host/xpcie.h
@@ -14,6 +14,8 @@
 #include <linux/module.h>
 #include <linux/pci_ids.h>
 
+#include "core.h"
+
 #ifndef PCI_DEVICE_ID_INTEL_KEEMBAY
 #define PCI_DEVICE_ID_INTEL_KEEMBAY 0x6240
 #endif
@@ -21,18 +23,79 @@
 #define XPCIE_IO_COMM_SIZE SZ_16K
 #define XPCIE_MMIO_OFFSET SZ_4K
 
+/* Status encoding of both device and host */
+#define XPCIE_STATUS_ERROR     (0xFFFFFFFF)
+#define XPCIE_STATUS_UNINIT    (0)
+#define XPCIE_STATUS_READY     (1)
+#define XPCIE_STATUS_RECOVERY  (2)
+#define XPCIE_STATUS_OFF       (3)
+#define XPCIE_STATUS_RUN       (4)
+
+#define XPCIE_MAGIC_STRLEN     (16)
+#define XPCIE_MAGIC_YOCTO      "VPUYOCTO"
+
 /* MMIO layout and offsets shared between device and host */
 struct xpcie_mmio {
+       u32 device_status;
+       u32 host_status;
        u8 legacy_a0;
+       u8 htod_tx_doorbell;
+       u8 htod_rx_doorbell;
+       u8 htod_event_doorbell;
+       u8 dtoh_tx_doorbell;
+       u8 dtoh_rx_doorbell;
+       u8 dtoh_event_doorbell;
+       u8 reserved;
+       u32 cap_offset;
+       u8 magic[XPCIE_MAGIC_STRLEN];
 } __packed;
 
+#define XPCIE_MMIO_DEV_STATUS  (offsetof(struct xpcie_mmio, device_status))
+#define XPCIE_MMIO_HOST_STATUS (offsetof(struct xpcie_mmio, host_status))
 #define XPCIE_MMIO_LEGACY_A0   (offsetof(struct xpcie_mmio, legacy_a0))
+#define XPCIE_MMIO_HTOD_TX_DOORBELL \
+       (offsetof(struct xpcie_mmio, htod_tx_doorbell))
+#define XPCIE_MMIO_HTOD_RX_DOORBELL \
+       (offsetof(struct xpcie_mmio, htod_rx_doorbell))
+#define XPCIE_MMIO_HTOD_EVENT_DOORBELL \
+       (offsetof(struct xpcie_mmio, htod_event_doorbell))
+#define XPCIE_MMIO_DTOH_TX_DOORBELL \
+       (offsetof(struct xpcie_mmio, dtoh_tx_doorbell))
+#define XPCIE_MMIO_DTOH_RX_DOORBELL \
+       (offsetof(struct xpcie_mmio, dtoh_rx_doorbell))
+#define XPCIE_MMIO_DTOH_EVENT_DOORBELL \
+       (offsetof(struct xpcie_mmio, dtoh_event_doorbell))
+#define XPCIE_MMIO_CAP_OFF     (offsetof(struct xpcie_mmio, cap_offset))
+#define XPCIE_MMIO_MAGIC_OFF   (offsetof(struct xpcie_mmio, magic))
 
 struct xpcie {
        u32 status;
        bool legacy_a0;
        void *mmio;
        void *bar4;
+
+       struct workqueue_struct *rx_wq;
+       struct workqueue_struct *tx_wq;
+
+       struct xpcie_interface interfaces[XPCIE_NUM_INTERFACES];
+
+       size_t fragment_size;
+       struct xpcie_cap_txrx *txrx;
+       struct xpcie_stream tx;
+       struct xpcie_stream rx;
+
+       struct mutex wlock; /* write lock */
+       struct xpcie_list write;
+       bool no_tx_buffer;
+       wait_queue_head_t tx_waitq;
+       bool tx_pending;
+       bool stop_flag;
+
+       struct xpcie_list rx_pool;
+       struct xpcie_list tx_pool;
+
+       struct delayed_work rx_event;
+       struct delayed_work tx_event;
 };
 
 #endif /* XPCIE_HEADER_ */
diff --git a/include/linux/xlink_drv_inf.h b/include/linux/xlink_drv_inf.h
new file mode 100644
index 000000000000..8ffbaafecc88
--- /dev/null
+++ b/include/linux/xlink_drv_inf.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Keem Bay XLink PCIe Driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#ifndef _XLINK_DRV_INF_H_
+#define _XLINK_DRV_INF_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define XLINK_DEV_INF_TYPE_MASK                GENMASK(27, 24)
+#define XLINK_DEV_PHYS_ID_MASK         GENMASK(23, 8)
+#define XLINK_DEV_TYPE_MASK            GENMASK(6, 4)
+#define XLINK_DEV_PCIE_ID_MASK         GENMASK(3, 1)
+#define XLINK_DEV_FUNC_MASK            GENMASK(0, 0)
+
+enum xlink_device_inf_type {
+       XLINK_DEV_INF_PCIE = 1,
+};
+
+enum xlink_device_type {
+       XLINK_DEV_TYPE_KMB = 0,
+};
+
+enum xlink_device_pcie {
+       XLINK_DEV_PCIE_0 = 0,
+};
+
+enum xlink_device_func {
+       XLINK_DEV_FUNC_VPU = 0,
+};
+
+enum _xlink_device_status {
+       _XLINK_DEV_OFF,
+       _XLINK_DEV_ERROR,
+       _XLINK_DEV_BUSY,
+       _XLINK_DEV_RECOVERY,
+       _XLINK_DEV_READY
+};
+
+int xlink_pcie_get_device_list(u32 *sw_device_id_list,
+                              u32 *num_devices);
+int xlink_pcie_get_device_name(u32 sw_device_id, char *device_name,
+                              size_t name_size);
+int xlink_pcie_get_device_status(u32 sw_device_id,
+                                u32 *device_status);
+int xlink_pcie_boot_device(u32 sw_device_id, const char *binary_name);
+int xlink_pcie_connect(u32 sw_device_id);
+int xlink_pcie_read(u32 sw_device_id, void *data, size_t *const size,
+                   u32 timeout);
+int xlink_pcie_write(u32 sw_device_id, void *data, size_t *const size,
+                    u32 timeout);
+int xlink_pcie_reset_device(u32 sw_device_id);
+#endif
-- 
2.17.1

Reply via email to