[PATCH 3/5] ocxl: Tally up the LPC memory on a link & allow it to be mapped
From: Alastair D'Silva Tally up the LPC memory on an OpenCAPI link & allow it to be mapped Signed-off-by: Alastair D'Silva --- drivers/misc/ocxl/core.c | 9 + drivers/misc/ocxl/link.c | 61 +++ drivers/misc/ocxl/ocxl_internal.h | 42 + 3 files changed, 112 insertions(+) diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c index b7a09b21ab36..fdfe4e0a34e1 100644 --- a/drivers/misc/ocxl/core.c +++ b/drivers/misc/ocxl/core.c @@ -230,8 +230,17 @@ static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev) if (rc) goto err_free_pasid; + if (afu->config.lpc_mem_size || afu->config.special_purpose_mem_size) { + rc = ocxl_link_add_lpc_mem(afu->fn->link, + afu->config.lpc_mem_size + afu->config.special_purpose_mem_size); + if (rc) + goto err_free_mmio; + } + return 0; +err_free_mmio: + unmap_mmio_areas(afu); err_free_pasid: reclaim_afu_pasid(afu); err_free_actag: diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 58d111afd9f6..2874811a4398 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -84,6 +84,11 @@ struct ocxl_link { int dev; atomic_t irq_available; struct spa *spa; + struct mutex lpc_mem_lock; + u64 lpc_mem_sz; /* Total amount of LPC memory presented on the link */ + u64 lpc_mem; + int lpc_consumers; + void *platform_data; }; static struct list_head links_list = LIST_HEAD_INIT(links_list); @@ -396,6 +401,8 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l if (rc) goto err_spa; + mutex_init(&link->lpc_mem_lock); + /* platform specific hook */ rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, &link->platform_data); @@ -711,3 +718,57 @@ void ocxl_link_free_irq(void *link_handle, int hw_irq) atomic_inc(&link->irq_available); } EXPORT_SYMBOL_GPL(ocxl_link_free_irq); + +int ocxl_link_add_lpc_mem(void *link_handle, u64 size) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + u64 orig_size; + bool good = false; + + mutex_lock(&link->lpc_mem_lock); + orig_size = link->lpc_mem_sz; + link->lpc_mem_sz += size; + + good = orig_size < link->lpc_mem_sz; + mutex_unlock(&link->lpc_mem_lock); + + // Check for overflow + return (good) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(ocxl_link_add_lpc_mem); + +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&link->lpc_mem_lock); + if (link->lpc_mem) { + u64 lpc_mem = link->lpc_mem; + + link->lpc_consumers++; + mutex_unlock(&link->lpc_mem_lock); + return lpc_mem; + } + + link->lpc_mem = pnv_ocxl_platform_lpc_setup(pdev, link->lpc_mem_sz); + if (link->lpc_mem) + link->lpc_consumers++; + mutex_unlock(&link->lpc_mem_lock); + + return link->lpc_mem; +} + +void ocxl_link_lpc_release(void *link_handle, struct pci_dev *pdev) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&link->lpc_mem_lock); + link->lpc_consumers--; + if (link->lpc_consumers == 0) { + pnv_ocxl_platform_lpc_release(pdev); + link->lpc_mem = 0; + } + + mutex_unlock(&link->lpc_mem_lock); +} diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index 97415afd79f3..db2647a90fc8 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -141,4 +141,46 @@ int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset); u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id); void ocxl_afu_irq_free_all(struct ocxl_context *ctx); +/** + * Increment the amount of memory required by an OpenCAPI link + * + * link_handle: The OpenCAPI link handle + * size: The amount of memory to increment by + * + * Return 0 on success, negative on overflow + */ +extern int ocxl_link_add_lpc_mem(void *link_handle, u64 size); + +/** + * Get the amount of memory required by an OpenCAPI link + * + * link_handle: The OpenCAPI link handle + * + * Return the amount of memory required by the link, this value is undefined if + * ocxl_link_add_lpc_mem failed. + */ +extern u64 ocxl_link_get_lpc_mem_sz(void *link_handle); + +/** + * Map the LPC memory for an OpenCAPI device + * + * Since LPC memory belongs to a link, the whole LPC memory available + * on the link bust be mapped in order to make it accessible to a device. + * + * @link_handle: The OpenCAPI link handle + * @pdev: A device that is on the link + */ +u64 ocxl_link_lpc_map(void *link_hand
Re: [PATCH 3/5] ocxl: Tally up the LPC memory on a link & allow it to be mapped
Le 17/09/2019 à 03:42, Alastair D'Silva a écrit : From: Alastair D'Silva Tally up the LPC memory on an OpenCAPI link & allow it to be mapped Signed-off-by: Alastair D'Silva --- drivers/misc/ocxl/core.c | 9 + drivers/misc/ocxl/link.c | 61 +++ drivers/misc/ocxl/ocxl_internal.h | 42 + 3 files changed, 112 insertions(+) diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c index b7a09b21ab36..fdfe4e0a34e1 100644 --- a/drivers/misc/ocxl/core.c +++ b/drivers/misc/ocxl/core.c @@ -230,8 +230,17 @@ static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev) if (rc) goto err_free_pasid; + if (afu->config.lpc_mem_size || afu->config.special_purpose_mem_size) { + rc = ocxl_link_add_lpc_mem(afu->fn->link, + afu->config.lpc_mem_size + afu->config.special_purpose_mem_size); I don't think we should count the special purpose memory, as it's not meant to be accessed through the GPU mem BAR, but I'll check. What happens when unconfiguring the AFU? We should reduce the range (see also below). Partial reconfig doesn't seem so far off, so we should take it into account. + if (rc) + goto err_free_mmio; + } + return 0; +err_free_mmio: + unmap_mmio_areas(afu); err_free_pasid: reclaim_afu_pasid(afu); err_free_actag: diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 58d111afd9f6..2874811a4398 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -84,6 +84,11 @@ struct ocxl_link { int dev; atomic_t irq_available; struct spa *spa; + struct mutex lpc_mem_lock; + u64 lpc_mem_sz; /* Total amount of LPC memory presented on the link */ + u64 lpc_mem; + int lpc_consumers; + void *platform_data; }; static struct list_head links_list = LIST_HEAD_INIT(links_list); @@ -396,6 +401,8 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l if (rc) goto err_spa; + mutex_init(&link->lpc_mem_lock); + /* platform specific hook */ rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, &link->platform_data); @@ -711,3 +718,57 @@ void ocxl_link_free_irq(void *link_handle, int hw_irq) atomic_inc(&link->irq_available); } EXPORT_SYMBOL_GPL(ocxl_link_free_irq); + +int ocxl_link_add_lpc_mem(void *link_handle, u64 size) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + u64 orig_size; + bool good = false; + + mutex_lock(&link->lpc_mem_lock); + orig_size = link->lpc_mem_sz; + link->lpc_mem_sz += size; We have a choice to make here: 1. either we only support one LPC memory-carrying AFU (and the above is overkill) 2. or we support multiple AFUs with LPC memory (on the same function), but then I think the above is too simple. From the opencapi spec, each AFU can define a chunk of memory with a starting address and a size. There's no rule which says they have to be contiguous. There's no rule which says it must start at 0. So to support multiple AFUs with LPC memory, we should record the current maximum range instead of just the global size. Ultimately, we need to tell the NPU the range of permissible addresses. It starts at 0, so we need to take into account any intial offset and holes. I would go for option 2, to at least be consistent within ocxl and support multiple AFUs. Even though I don't think we'll see FPGA images with multiple AFUs with LPC memory any time soon. + good = orig_size < link->lpc_mem_sz; + mutex_unlock(&link->lpc_mem_lock); + + // Check for overflow + return (good) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(ocxl_link_add_lpc_mem); Do the symbol really need to be exported? IIUC, the next patch defines a higher level ocxl_afu_map_lpc_mem() which is meant to be called by a calling driver. + +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&link->lpc_mem_lock); + if (link->lpc_mem) { + u64 lpc_mem = link->lpc_mem; + + link->lpc_consumers++; + mutex_unlock(&link->lpc_mem_lock); + return lpc_mem; + } + + link->lpc_mem = pnv_ocxl_platform_lpc_setup(pdev, link->lpc_mem_sz); + if (link->lpc_mem) + link->lpc_consumers++; + mutex_unlock(&link->lpc_mem_lock); + + return link->lpc_mem; Should be cached in a temp variable, like on the fast path, otherwise it's accessed with no lock. +} + +void ocxl_link_lpc_release(void *link_handle, struct pci_dev *pdev) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&link->lpc_mem_l
Re: [PATCH 3/5] ocxl: Tally up the LPC memory on a link & allow it to be mapped
On Wed, 2019-09-18 at 16:02 +0200, Frederic Barrat wrote: > > Le 17/09/2019 à 03:42, Alastair D'Silva a écrit : > > From: Alastair D'Silva > > > > Tally up the LPC memory on an OpenCAPI link & allow it to be mapped > > > > Signed-off-by: Alastair D'Silva > > --- > > drivers/misc/ocxl/core.c | 9 + > > drivers/misc/ocxl/link.c | 61 > > +++ > > drivers/misc/ocxl/ocxl_internal.h | 42 + > > 3 files changed, 112 insertions(+) > > > > diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c > > index b7a09b21ab36..fdfe4e0a34e1 100644 > > --- a/drivers/misc/ocxl/core.c > > +++ b/drivers/misc/ocxl/core.c > > @@ -230,8 +230,17 @@ static int configure_afu(struct ocxl_afu *afu, > > u8 afu_idx, struct pci_dev *dev) > > if (rc) > > goto err_free_pasid; > > > > + if (afu->config.lpc_mem_size || afu- > > >config.special_purpose_mem_size) { > > + rc = ocxl_link_add_lpc_mem(afu->fn->link, > > + afu->config.lpc_mem_size + afu- > > >config.special_purpose_mem_size); > > I don't think we should count the special purpose memory, as it's > not > meant to be accessed through the GPU mem BAR, but I'll check. At least for OpenCAPI 3.0, there is no other in-spec way to access the memory if it is not mapped by the NPU. > > What happens when unconfiguring the AFU? We should reduce the range > (see > also below). Partial reconfig doesn't seem so far off, so we should > take > it into account. > The mapping is left until the last AFU on the link offlines it's memory, at which point we clear the mapping from the NPU. > > > + if (rc) > > + goto err_free_mmio; > > + } > > + > > return 0; > > > > +err_free_mmio: > > + unmap_mmio_areas(afu); > > err_free_pasid: > > reclaim_afu_pasid(afu); > > err_free_actag: > > diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c > > index 58d111afd9f6..2874811a4398 100644 > > --- a/drivers/misc/ocxl/link.c > > +++ b/drivers/misc/ocxl/link.c > > @@ -84,6 +84,11 @@ struct ocxl_link { > > int dev; > > atomic_t irq_available; > > struct spa *spa; > > + struct mutex lpc_mem_lock; > > + u64 lpc_mem_sz; /* Total amount of LPC memory presented on the > > link */ > > + u64 lpc_mem; > > + int lpc_consumers; > > + > > void *platform_data; > > }; > > static struct list_head links_list = LIST_HEAD_INIT(links_list); > > @@ -396,6 +401,8 @@ static int alloc_link(struct pci_dev *dev, int > > PE_mask, struct ocxl_link **out_l > > if (rc) > > goto err_spa; > > > > + mutex_init(&link->lpc_mem_lock); > > + > > /* platform specific hook */ > > rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, > > &link->platform_data); > > @@ -711,3 +718,57 @@ void ocxl_link_free_irq(void *link_handle, int > > hw_irq) > > atomic_inc(&link->irq_available); > > } > > EXPORT_SYMBOL_GPL(ocxl_link_free_irq); > > + > > +int ocxl_link_add_lpc_mem(void *link_handle, u64 size) > > +{ > > + struct ocxl_link *link = (struct ocxl_link *) link_handle; > > + > > + u64 orig_size; > > + bool good = false; > > + > > + mutex_lock(&link->lpc_mem_lock); > > + orig_size = link->lpc_mem_sz; > > + link->lpc_mem_sz += size; > > > We have a choice to make here: > 1. either we only support one LPC memory-carrying AFU (and the above > is > overkill) > 2. or we support multiple AFUs with LPC memory (on the same > function), > but then I think the above is too simple. > > From the opencapi spec, each AFU can define a chunk of memory with > a > starting address and a size. There's no rule which says they have to > be > contiguous. There's no rule which says it must start at 0. So to > support > multiple AFUs with LPC memory, we should record the current maximum > range instead of just the global size. Ultimately, we need to tell > the > NPU the range of permissible addresses. It starts at 0, so we need > to > take into account any intial offset and holes. > > I would go for option 2, to at least be consistent within ocxl and > support multiple AFUs. Even though I don't think we'll see FPGA > images > with multiple AFUs with LPC memory any time soon. > Ill rework this to take an offset & size, the NPU will map from the base address up to the largest offset + size provided across all AFUs on the link. > > > + good = orig_size < link->lpc_mem_sz; > > + mutex_unlock(&link->lpc_mem_lock); > > + > > + // Check for overflow > > + return (good) ? 0 : -EINVAL; > > +} > > +EXPORT_SYMBOL_GPL(ocxl_link_add_lpc_mem); > > Do the symbol really need to be exported? IIUC, the next patch > defines a > higher level ocxl_afu_map_lpc_mem() which is meant to be called by a > calling driver. > No, I'll remove it. > > > + > > +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev) > > +{ > > + struct ocxl_link *link = (struct ocxl_lin
Re: [PATCH 3/5] ocxl: Tally up the LPC memory on a link & allow it to be mapped
Le 19/09/2019 à 06:55, Alastair D'Silva a écrit : On Wed, 2019-09-18 at 16:02 +0200, Frederic Barrat wrote: Le 17/09/2019 à 03:42, Alastair D'Silva a écrit : From: Alastair D'Silva Tally up the LPC memory on an OpenCAPI link & allow it to be mapped Signed-off-by: Alastair D'Silva --- drivers/misc/ocxl/core.c | 9 + drivers/misc/ocxl/link.c | 61 +++ drivers/misc/ocxl/ocxl_internal.h | 42 + 3 files changed, 112 insertions(+) diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c index b7a09b21ab36..fdfe4e0a34e1 100644 --- a/drivers/misc/ocxl/core.c +++ b/drivers/misc/ocxl/core.c @@ -230,8 +230,17 @@ static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev) if (rc) goto err_free_pasid; + if (afu->config.lpc_mem_size || afu- config.special_purpose_mem_size) { + rc = ocxl_link_add_lpc_mem(afu->fn->link, + afu->config.lpc_mem_size + afu- config.special_purpose_mem_size); I don't think we should count the special purpose memory, as it's not meant to be accessed through the GPU mem BAR, but I'll check. At least for OpenCAPI 3.0, there is no other in-spec way to access the memory if it is not mapped by the NPU. Yes, that's clarified now and we should take the special purpose memory into account when defining the full range. Fred What happens when unconfiguring the AFU? We should reduce the range (see also below). Partial reconfig doesn't seem so far off, so we should take it into account. The mapping is left until the last AFU on the link offlines it's memory, at which point we clear the mapping from the NPU. + if (rc) + goto err_free_mmio; + } + return 0; +err_free_mmio: + unmap_mmio_areas(afu); err_free_pasid: reclaim_afu_pasid(afu); err_free_actag: diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 58d111afd9f6..2874811a4398 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -84,6 +84,11 @@ struct ocxl_link { int dev; atomic_t irq_available; struct spa *spa; + struct mutex lpc_mem_lock; + u64 lpc_mem_sz; /* Total amount of LPC memory presented on the link */ + u64 lpc_mem; + int lpc_consumers; + void *platform_data; }; static struct list_head links_list = LIST_HEAD_INIT(links_list); @@ -396,6 +401,8 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l if (rc) goto err_spa; + mutex_init(&link->lpc_mem_lock); + /* platform specific hook */ rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, &link->platform_data); @@ -711,3 +718,57 @@ void ocxl_link_free_irq(void *link_handle, int hw_irq) atomic_inc(&link->irq_available); } EXPORT_SYMBOL_GPL(ocxl_link_free_irq); + +int ocxl_link_add_lpc_mem(void *link_handle, u64 size) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + u64 orig_size; + bool good = false; + + mutex_lock(&link->lpc_mem_lock); + orig_size = link->lpc_mem_sz; + link->lpc_mem_sz += size; We have a choice to make here: 1. either we only support one LPC memory-carrying AFU (and the above is overkill) 2. or we support multiple AFUs with LPC memory (on the same function), but then I think the above is too simple. From the opencapi spec, each AFU can define a chunk of memory with a starting address and a size. There's no rule which says they have to be contiguous. There's no rule which says it must start at 0. So to support multiple AFUs with LPC memory, we should record the current maximum range instead of just the global size. Ultimately, we need to tell the NPU the range of permissible addresses. It starts at 0, so we need to take into account any intial offset and holes. I would go for option 2, to at least be consistent within ocxl and support multiple AFUs. Even though I don't think we'll see FPGA images with multiple AFUs with LPC memory any time soon. Ill rework this to take an offset & size, the NPU will map from the base address up to the largest offset + size provided across all AFUs on the link. + good = orig_size < link->lpc_mem_sz; + mutex_unlock(&link->lpc_mem_lock); + + // Check for overflow + return (good) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(ocxl_link_add_lpc_mem); Do the symbol really need to be exported? IIUC, the next patch defines a higher level ocxl_afu_map_lpc_mem() which is meant to be called by a calling driver. No, I'll remove it. + +u64 ocxl_link_lpc_map(void *link_handle, struct pci_dev *pdev) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&link->lpc_mem_lock); + if (link->lpc_mem) { + u64 lpc_mem = link->