On Sun, Jan 18, 2026 at 10:54:15PM +0900, Koichiro Den wrote:
> pci-epf-vntb can pack multiple memory windows into a single BAR using
> mwN_offset. With the NTB core gaining support for programming multiple
> translation ranges for a window, the EPF needs to provide the per-BAR
> subrange layout to the endpoint controller (EPC).
>
> Implement .mw_set_trans_ranges() for pci-epf-vntb. Track subranges for
> each BAR and pass them to pci_epc_set_bar() so EPC drivers can select an
> appropriate inbound mapping mode (e.g. Address Match mode on DesignWare
> controllers) when subrange mappings are required.
>
> Signed-off-by: Koichiro Den <[email protected]>
> ---
>  drivers/pci/endpoint/functions/pci-epf-vntb.c | 183 +++++++++++++++++-
>  1 file changed, 175 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c 
> b/drivers/pci/endpoint/functions/pci-epf-vntb.c
> index 39e784e21236..98128c2c5079 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
> @@ -42,6 +42,7 @@
>  #include <linux/log2.h>
>  #include <linux/module.h>
>  #include <linux/slab.h>
> +#include <linux/sort.h>
>
>  #include <linux/pci-ep-msi.h>
>  #include <linux/pci-epc.h>
> @@ -144,6 +145,10 @@ struct epf_ntb {
>
>       enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
>
> +     /* Cache for subrange mapping */
> +     struct ntb_mw_subrange *mw_subrange[MAX_MW];
> +     unsigned int num_subrange[MAX_MW];
> +
>       struct epf_ntb_ctrl *reg;
>
>       u32 *epf_db;
> @@ -736,6 +741,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
>               ntb->epf->bar[barno].flags |= upper_32_bits(size) ?
>                               PCI_BASE_ADDRESS_MEM_TYPE_64 :
>                               PCI_BASE_ADDRESS_MEM_TYPE_32;
> +             ntb->epf->bar[barno].num_submap = 0;
>
>               ret = pci_epc_set_bar(ntb->epf->epc,
>                                     ntb->epf->func_no,
> @@ -1405,28 +1411,188 @@ static int vntb_epf_db_set_mask(struct ntb_dev *ntb, 
> u64 db_bits)
>       return 0;
>  }
>
> -static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
> -             dma_addr_t addr, resource_size_t size)
> +struct vntb_mw_order {
> +     u64 off;
> +     unsigned int mw;
> +};
> +
> +static int vntb_cmp_mw_order(const void *a, const void *b)
> +{
> +     const struct vntb_mw_order *ma = a;
> +     const struct vntb_mw_order *mb = b;
> +
> +     if (ma->off < mb->off)
> +             return -1;
> +     if (ma->off > mb->off)
> +             return 1;
> +     return 0;
> +}
> +
> +static int vntb_epf_mw_set_trans_ranges(struct ntb_dev *ndev, int pidx, int 
> idx,
> +                                     unsigned int num_ranges,
> +                                     const struct ntb_mw_subrange *ranges)
>  {
>       struct epf_ntb *ntb = ntb_ndev(ndev);
> +     struct pci_epf_bar_submap *submap;
> +     struct vntb_mw_order mws[MAX_MW];
>       struct pci_epf_bar *epf_bar;
> +     struct ntb_mw_subrange *r;
>       enum pci_barno barno;
> +     struct device *dev, *epf_dev;
> +     unsigned int total_ranges = 0;
> +     unsigned int mw_cnt = 0;
> +     unsigned int cur = 0;
> +     u64 expected_off = 0;
> +     unsigned int i, j;
>       int ret;
> +
> +     dev = &ntb->ntb->dev;
> +     epf_dev = &ntb->epf->dev;
> +     barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
> +     epf_bar = &ntb->epf->bar[barno];
> +     epf_bar->barno = barno;
> +
> +     r = devm_kmemdup(epf_dev, ranges, num_ranges * sizeof(*ranges), 
> GFP_KERNEL);

size_mul(sizeof(*ranges), num_ranges)

> +     if (!r)
> +             return -ENOMEM;
> +
> +     if (ntb->mw_subrange[idx])
> +             devm_kfree(epf_dev, ntb->mw_subrange[idx]);
> +
> +     ntb->mw_subrange[idx] = r;
> +     ntb->num_subrange[idx] = num_ranges;
> +
> +     /* Defer pci_epc_set_bar() until all MWs in this BAR have range info. */
> +     for (i = 0; i < MAX_MW; i++) {
> +             enum pci_barno bar = ntb->epf_ntb_bar[BAR_MW1 + i];
> +
> +             if (bar != barno)
> +                     continue;
> +             if (!ntb->num_subrange[i])
> +                     return 0;
> +
> +             mws[mw_cnt].mw = i;
> +             mws[mw_cnt].off = ntb->mws_offset[i];
> +             mw_cnt++;
> +     }
> +
> +     sort(mws, mw_cnt, sizeof(mws[0]), vntb_cmp_mw_order, NULL);

Can we require mws_offset is ordered? So needn't sort here.

> +
> +     /* BAR submap must cover the whole BAR with no holes. */
> +     for (i = 0; i < mw_cnt; i++) {
> +             unsigned int mw = mws[i].mw;
> +             u64 sum = 0;
> +
> +             if (mws[i].off != expected_off) {

can we all use size instead 'off' to keep align with submap?

Frank
> +                     dev_err(dev,
> +                             "BAR%d: hole/overlap at %#llx (MW%d@%#llx)\n",
> +                             barno, expected_off, mw + 1, mws[i].off);
> +                     return -EINVAL;
> +             }
> +
> +             total_ranges += ntb->num_subrange[mw];
> +             for (j = 0; j < ntb->num_subrange[mw]; j++)
> +                     sum += ntb->mw_subrange[mw][j].size;
> +
> +             if (sum != ntb->mws_size[mw]) {
> +                     dev_err(dev,
> +                             "MW%d: ranges size %#llx != window size 
> %#llx\n",
> +                             mw + 1, sum, ntb->mws_size[mw]);
> +                     return -EINVAL;
> +             }
> +             expected_off += ntb->mws_size[mw];
> +     }
> +
> +     submap = devm_krealloc_array(epf_dev, epf_bar->submap, total_ranges,
> +                                  sizeof(*submap), GFP_KERNEL);
> +     if (!submap)
> +             return -ENOMEM;
> +
> +     epf_bar->submap = submap;
> +     epf_bar->num_submap = total_ranges;
> +     dev_dbg(dev, "Requesting BAR%d layout (#. of subranges is %u):\n",
> +             barno, total_ranges);
> +
> +     for (i = 0; i < mw_cnt; i++) {
> +             unsigned int mw = mws[i].mw;
> +
> +             dev_dbg(dev, "- MW%d\n", 1 + mw);
> +             for (j = 0; j < ntb->num_subrange[mw]; j++) {
> +                     dev_dbg(dev, "  - addr/size = %#llx/%#llx\n",
> +                             ntb->mw_subrange[mw][j].addr,
> +                             ntb->mw_subrange[mw][j].size);
> +                     submap[cur].phys_addr = ntb->mw_subrange[mw][j].addr;
> +                     submap[cur].size = ntb->mw_subrange[mw][j].size;
> +                     cur++;
> +             }
> +     }
> +
> +     ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
> +                           ntb->epf->vfunc_no, epf_bar);
> +     if (ret)
> +             dev_err(dev, "BAR%d: failed to program mappings for MW%d: %d\n",
> +                     barno, idx + 1, ret);
> +
> +     return ret;
> +}
> +
> +static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
> +                              dma_addr_t addr, resource_size_t size)
> +{
> +     struct epf_ntb *ntb = ntb_ndev(ndev);
> +     struct pci_epf_bar *epf_bar;
> +     resource_size_t bar_size;
> +     enum pci_barno barno;
>       struct device *dev;
> +     unsigned int i;
> +     int ret;
>
>       dev = &ntb->ntb->dev;
>       barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
>       epf_bar = &ntb->epf->bar[barno];
>       epf_bar->phys_addr = addr;
>       epf_bar->barno = barno;
> -     epf_bar->size = size;
>
> -     ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar);
> -     if (ret) {
> -             dev_err(dev, "failure set mw trans\n");
> -             return ret;
> +     bar_size = epf_bar->size;
> +     if (!bar_size || !size)
> +             return -EINVAL;
> +
> +     if (size != ntb->mws_size[idx])
> +             return -EINVAL;
> +
> +     /*
> +      * Even if the caller intends to map the entire MW, the MW might
> +      * actually be just a part of the BAR. In that case, redirect the
> +      * handling to vntb_epf_mw_set_trans_ranges().
> +      */
> +     if (size < bar_size) {
> +             struct ntb_mw_subrange r = {
> +                     .addr = addr,
> +                     .size = size,
> +             };
> +             return vntb_epf_mw_set_trans_ranges(ndev, pidx, idx, 1, &r);
>       }
> -     return 0;
> +
> +     /* Drop any stale cache for the BAR. */
> +     for (i = 0; i < MAX_MW; i++) {
> +             if (ntb->epf_ntb_bar[BAR_MW1 + i] != barno)
> +                     continue;
> +             devm_kfree(&ntb->epf->dev, ntb->mw_subrange[i]);
> +             ntb->mw_subrange[i] = NULL;
> +             ntb->num_subrange[i] = 0;
> +     }
> +
> +     /* Not use subrange mapping. If it's used in the past, clear it off. */
> +     devm_kfree(&ntb->epf->dev, epf_bar->submap);
> +     epf_bar->submap = NULL;
> +     epf_bar->num_submap = 0;
> +
> +     ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
> +                           ntb->epf->vfunc_no, epf_bar);
> +     if (ret)
> +             dev_err(dev, "failure set mw trans\n");
> +
> +     return ret;
>  }
>
>  static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx)
> @@ -1590,6 +1756,7 @@ static const struct ntb_dev_ops vntb_epf_ops = {
>       .db_vector_mask         = vntb_epf_db_vector_mask,
>       .db_set_mask            = vntb_epf_db_set_mask,
>       .mw_set_trans           = vntb_epf_mw_set_trans,
> +     .mw_set_trans_ranges    = vntb_epf_mw_set_trans_ranges,
>       .mw_clear_trans         = vntb_epf_mw_clear_trans,
>       .peer_mw_get_addr       = vntb_epf_peer_mw_get_addr,
>       .link_enable            = vntb_epf_link_enable,
> --
> 2.51.0
>

Reply via email to