+{
+ struct platform_data *data = (struct platform_data *) platform_data;
+ struct spa *spa = data->spa;
+ struct ocxl_process_element *pe;
+
+ if (pasid > SPA_PASID_MAX)
+ return -EINVAL;
+
+ *pe_handle = pasid & SPA_PE_MASK;
+ pe = spa->spa_mem + *pe_handle;
+
+ if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID))
+ return -EINVAL;
+
+ *pid = be32_to_cpu(pe->pid);
+ *tid = be32_to_cpu(pe->tid);
+
+ memset(pe, 0, sizeof(struct ocxl_process_element));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_ocxl_remove_pe);
+
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index 70f8f1c3929d..b9150da0ea6b 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
-#include <asm/pnv-ocxl.h>
#include "ocxl_internal.h"
#include "trace.h"
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 58d111afd9f6..85ba4d1b7be7 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -10,28 +10,8 @@
#include "ocxl_internal.h"
#include "trace.h"
-
-#define SPA_PASID_BITS 15
-#define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
-#define SPA_PE_MASK SPA_PASID_MAX
-#define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
-
-#define SPA_CFG_SF (1ull << (63-0))
-#define SPA_CFG_TA (1ull << (63-1))
-#define SPA_CFG_HV (1ull << (63-3))
-#define SPA_CFG_UV (1ull << (63-4))
-#define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table
(HPT) mode */
-#define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
-#define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
-#define SPA_CFG_PR (1ull << (63-49))
-#define SPA_CFG_TC (1ull << (63-54))
-#define SPA_CFG_DR (1ull << (63-59))
-
-#define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
-#define SPA_XSL_S (1ull << (63-38)) /* Store operation */
-
-#define SPA_PE_VALID 0x80000000
-
+#define XSL_TF (1ull << (63-3)) /* Translation fault */
+#define XSL_S (1ull << (63-38)) /* Store operation */
struct pe_data {
struct mm_struct *mm;
@@ -42,32 +22,6 @@ struct pe_data {
struct rcu_head rcu;
};
-struct spa {
- struct ocxl_process_element *spa_mem;
- int spa_order;
- struct mutex spa_lock;
- struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
- char *irq_name;
- int virq;
- void __iomem *reg_dsisr;
- void __iomem *reg_dar;
- void __iomem *reg_tfc;
- void __iomem *reg_pe_handle;
- /*
- * The following field are used by the memory fault
- * interrupt handler. We can only have one interrupt at a
- * time. The NPU won't raise another interrupt until the
- * previous one has been ack'd by writing to the TFC register
- */
- struct xsl_fault {
- struct work_struct fault_work;
- u64 pe;
- u64 dsisr;
- u64 dar;
- struct pe_data pe_data;
- } xsl_fault;
-};
-
/*
* A opencapi link can be used be by several PCI functions. We have
* one link per device slot.
@@ -82,9 +36,26 @@ struct ocxl_link {
int domain;
int bus;
int dev;
+ char *irq_name;
+ int virq;
+ struct mutex pe_lock;
atomic_t irq_available;
- struct spa *spa;
void *platform_data;
+ struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
+
+ /*
+ * The following field are used by the memory fault
+ * interrupt handler. We can only have one interrupt at a
+ * time. The NPU won't raise another interrupt until the
+ * previous one has been ack'd by writing to the TFC register
+ */
+ struct xsl_fault {
+ struct work_struct fault_work;
+ u64 pe;
+ u64 dsisr;
+ u64 dar;
+ struct pe_data pe_data;
+ } xsl_fault;
};
static struct list_head links_list = LIST_HEAD_INIT(links_list);
static DEFINE_MUTEX(links_list_lock);
@@ -95,18 +66,7 @@ enum xsl_response {
RESTART,
};
-
-static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
-{
- u64 reg;
-
- *dsisr = in_be64(spa->reg_dsisr);
- *dar = in_be64(spa->reg_dar);
- reg = in_be64(spa->reg_pe_handle);
- *pe = reg & SPA_PE_MASK;
-}
-
-static void ack_irq(struct spa *spa, enum xsl_response r)
+static void ack_irq(struct ocxl_link *link, enum xsl_response r)
{
u64 reg = 0;
@@ -119,9 +79,11 @@ static void ack_irq(struct spa *spa, enum
xsl_response r)
WARN(1, "Invalid irq response %d\n", r);
if (reg) {
- trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
- spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
- out_be64(spa->reg_tfc, reg);
+ trace_ocxl_fault_ack(link->xsl_fault.pe,
+ link->xsl_fault.dsisr,
+ link->xsl_fault.dar,
+ reg);
+ pnv_ocxl_write_xsl_tfc(link->platform_data, reg);
}
}
@@ -132,7 +94,7 @@ static void xsl_fault_handler_bh(struct work_struct
*fault_work)
enum xsl_response r;
struct xsl_fault *fault = container_of(fault_work, struct
xsl_fault,
fault_work);
- struct spa *spa = container_of(fault, struct spa, xsl_fault);
+ struct ocxl_link *link = container_of(fault, struct ocxl_link,
xsl_fault);
int rc;
@@ -160,7 +122,7 @@ static void xsl_fault_handler_bh(struct
work_struct *fault_work)
* just call hash_page_mm() here.
*/
access = _PAGE_PRESENT | _PAGE_READ;
- if (fault->dsisr & SPA_XSL_S)
+ if (fault->dsisr & XSL_S)
access |= _PAGE_WRITE;
if (get_region_id(fault->dar) != USER_REGION_ID)
@@ -174,25 +136,21 @@ static void xsl_fault_handler_bh(struct
work_struct *fault_work)
r = RESTART;
ack:
mmput(fault->pe_data.mm);
- ack_irq(spa, r);
+ ack_irq(link, r);
}
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
struct ocxl_link *link = (struct ocxl_link *) data;
- struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
- struct ocxl_process_element *pe;
int pid;
bool schedule = false;
- read_irq(spa, &dsisr, &dar, &pe_handle);
- trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
+ pnv_ocxl_read_xsl_regs(link->platform_data, &dsisr, &dar,
+ &pe_handle, &pid);
+ trace_ocxl_fault(pe_handle, dsisr, dar, -1);
- WARN_ON(pe_handle > SPA_PE_MASK);
- pe = spa->spa_mem + pe_handle;
- pid = be32_to_cpu(pe->pid);
/* We could be reading all null values here if the PE is being
* removed while an interrupt kicks in. It's not supposed to
* happen if the driver notified the AFU to terminate the
@@ -200,14 +158,14 @@ static irqreturn_t xsl_fault_handler(int irq,
void *data)
* acknowledging. But even if it happens, we won't find a
* memory context below and fail silently, so it should be ok.
*/
- if (!(dsisr & SPA_XSL_TF)) {
+ if (!(dsisr & XSL_TF)) {
WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
- ack_irq(spa, ADDRESS_ERROR);
+ ack_irq(link, ADDRESS_ERROR);
return IRQ_HANDLED;
}
rcu_read_lock();
- pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
+ pe_data = radix_tree_lookup(&link->pe_tree, pe_handle);
if (!pe_data) {
/*
* Could only happen if the driver didn't notify the
@@ -221,7 +179,7 @@ static irqreturn_t xsl_fault_handler(int irq, void
*data)
*/
rcu_read_unlock();
pr_debug("Unknown mm context for xsl interrupt\n");
- ack_irq(spa, ADDRESS_ERROR);
+ ack_irq(link, ADDRESS_ERROR);
return IRQ_HANDLED;
}
@@ -232,56 +190,35 @@ static irqreturn_t xsl_fault_handler(int irq,
void *data)
*/
rcu_read_unlock();
pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
- ack_irq(spa, ADDRESS_ERROR);
+ ack_irq(link, ADDRESS_ERROR);
return IRQ_HANDLED;
}
WARN_ON(pe_data->mm->context.id != pid);
if (mmget_not_zero(pe_data->mm)) {
- spa->xsl_fault.pe = pe_handle;
- spa->xsl_fault.dar = dar;
- spa->xsl_fault.dsisr = dsisr;
- spa->xsl_fault.pe_data = *pe_data;
+ link->xsl_fault.pe = pe_handle;
+ link->xsl_fault.dar = dar;
+ link->xsl_fault.dsisr = dsisr;
+ link->xsl_fault.pe_data = *pe_data;
schedule = true;
/* mm_users count released by bottom half */
}
rcu_read_unlock();
if (schedule)
- schedule_work(&spa->xsl_fault.fault_work);
+ schedule_work(&link->xsl_fault.fault_work);
else
- ack_irq(spa, ADDRESS_ERROR);
+ ack_irq(link, ADDRESS_ERROR);
return IRQ_HANDLED;
}
-static void unmap_irq_registers(struct spa *spa)
-{
- pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
- spa->reg_pe_handle);
-}
-
-static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
-{
- return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
- &spa->reg_tfc, &spa->reg_pe_handle);
-}
-
-static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
+static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link,
+ int hwirq)
{
- struct spa *spa = link->spa;
int rc;
- int hwirq;
- rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
- if (rc)
- return rc;
-
- rc = map_irq_registers(dev, spa);
- if (rc)
- return rc;
-
- spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
- link->domain, link->bus, link->dev);
- if (!spa->irq_name) {
+ link->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
+ link->domain, link->bus, link->dev);
+ if (!link->irq_name) {
dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
rc = -ENOMEM;
goto err_xsl;
@@ -290,17 +227,17 @@ static int setup_xsl_irq(struct pci_dev *dev,
struct ocxl_link *link)
* At some point, we'll need to look into allowing a higher
* number of interrupts. Could we have an IRQ domain per link?
*/
- spa->virq = irq_create_mapping(NULL, hwirq);
- if (!spa->virq) {
+ link->virq = irq_create_mapping(NULL, hwirq);
+ if (!link->virq) {
dev_err(&dev->dev,
"irq_create_mapping failed for translation interrupt\n");
rc = -EINVAL;
goto err_name;
}
- dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq,
spa->virq);
+ dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq,
link->virq);
- rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
+ rc = request_irq(link->virq, xsl_fault_handler, 0, link->irq_name,
link);
if (rc) {
dev_err(&dev->dev,
@@ -312,70 +249,26 @@ static int setup_xsl_irq(struct pci_dev *dev,
struct ocxl_link *link)
return 0;
err_mapping:
- irq_dispose_mapping(spa->virq);
+ irq_dispose_mapping(link->virq);
err_name:
- kfree(spa->irq_name);
+ kfree(link->irq_name);
err_xsl:
- unmap_irq_registers(spa);
return rc;
}
static void release_xsl_irq(struct ocxl_link *link)
{
- struct spa *spa = link->spa;
-
- if (spa->virq) {
- free_irq(spa->virq, link);
- irq_dispose_mapping(spa->virq);
- }
- kfree(spa->irq_name);
- unmap_irq_registers(spa);
-}
-
-static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
-{
- struct spa *spa;
-
- spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
- if (!spa)
- return -ENOMEM;
-
- mutex_init(&spa->spa_lock);
- INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
- INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
-
- spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
- spa->spa_mem = (struct ocxl_process_element *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
- if (!spa->spa_mem) {
- dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
- kfree(spa);
- return -ENOMEM;
- }
- pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain,
link->bus,
- link->dev, spa->spa_mem);
-
- link->spa = spa;
- return 0;
-}
-
-static void free_spa(struct ocxl_link *link)
-{
- struct spa *spa = link->spa;
-
- pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
- link->dev);
-
- if (spa && spa->spa_mem) {
- free_pages((unsigned long) spa->spa_mem, spa->spa_order);
- kfree(spa);
- link->spa = NULL;
+ if (link->virq) {
+ free_irq(link->virq, link);
+ irq_dispose_mapping(link->virq);
}
+ kfree(link->irq_name);
}
static int alloc_link(struct pci_dev *dev, int PE_mask, struct
ocxl_link **out_link)
{
struct ocxl_link *link;
+ int xsl_irq;
int rc;
link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
@@ -387,18 +280,18 @@ static int alloc_link(struct pci_dev *dev, int
PE_mask, struct ocxl_link **out_l
link->bus = dev->bus->number;
link->dev = PCI_SLOT(dev->devfn);
atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
+ INIT_WORK(&link->xsl_fault.fault_work, xsl_fault_handler_bh);
- rc = alloc_spa(dev, link);
+ /* platform specific hook */
+ rc = pnv_ocxl_platform_setup(dev, PE_mask, &xsl_irq,
+ &link->platform_data);
if (rc)
goto err_free;
- rc = setup_xsl_irq(dev, link);
- if (rc)
- goto err_spa;
+ mutex_init(&link->pe_lock);
+ INIT_RADIX_TREE(&link->pe_tree, GFP_KERNEL);
- /* platform specific hook */
- rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
- &link->platform_data);
+ rc = setup_xsl_irq(dev, link, xsl_irq);
if (rc)
goto err_xsl_irq;
@@ -406,9 +299,7 @@ static int alloc_link(struct pci_dev *dev, int
PE_mask, struct ocxl_link **out_l
return 0;
err_xsl_irq:
- release_xsl_irq(link);
-err_spa:
- free_spa(link);
+ pnv_ocxl_platform_release(link->platform_data);
err_free:
kfree(link);
return rc;
@@ -417,7 +308,6 @@ static int alloc_link(struct pci_dev *dev, int
PE_mask, struct ocxl_link **out_l
static void free_link(struct ocxl_link *link)
{
release_xsl_irq(link);
- free_spa(link);
kfree(link);
}
@@ -455,7 +345,7 @@ static void release_xsl(struct kref *ref)
list_del(&link->list);
/* call platform code before releasing data */
- pnv_ocxl_spa_release(link->platform_data);
+ pnv_ocxl_platform_release(link->platform_data);
free_link(link);
}
@@ -469,52 +359,20 @@ void ocxl_link_release(struct pci_dev *dev, void
*link_handle)
}
EXPORT_SYMBOL_GPL(ocxl_link_release);
-static u64 calculate_cfg_state(bool kernel)
-{
- u64 state;
-
- state = SPA_CFG_DR;
- if (mfspr(SPRN_LPCR) & LPCR_TC)
- state |= SPA_CFG_TC;
- if (radix_enabled())
- state |= SPA_CFG_XLAT_ror;
- else
- state |= SPA_CFG_XLAT_hpt;
- state |= SPA_CFG_HV;
- if (kernel) {
- if (mfmsr() & MSR_SF)
- state |= SPA_CFG_SF;
- } else {
- state |= SPA_CFG_PR;
- if (!test_tsk_thread_flag(current, TIF_32BIT))
- state |= SPA_CFG_SF;
- }
- return state;
-}
-
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
u64 amr, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
- struct spa *spa = link->spa;
- struct ocxl_process_element *pe;
int pe_handle, rc = 0;
struct pe_data *pe_data;
- BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
- if (pasid > SPA_PASID_MAX)
- return -EINVAL;
-
- mutex_lock(&spa->spa_lock);
- pe_handle = pasid & SPA_PE_MASK;
- pe = spa->spa_mem + pe_handle;
-
- if (pe->software_state) {
- rc = -EBUSY;
+ mutex_lock(&link->pe_lock);
+ rc = pnv_ocxl_set_pe(link->platform_data, mfspr(SPRN_LPID), pasid,
+ pidr, tidr, amr, &pe_handle);
+ if (rc)