Allow memory providers to configure rx queues with a custom receive
page size. It's passed in struct pp_memory_provider_params, which is
copied into the queue, so it's preserved across queue restarts. Then,
it's propagated to the driver in a new queue config parameter.

Drivers should explicitly opt into using it by setting
QCFG_RX_PAGE_SIZE, in which case they should implement ndo_default_qcfg,
validate the size on queue restart and honour the current config in case
of a reset.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 include/net/netdev_queues.h   | 10 ++++++++++
 include/net/page_pool/types.h |  1 +
 net/core/netdev_rx_queue.c    |  9 +++++++++
 3 files changed, 20 insertions(+)

diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index f6f1f71a24e1..feca25131930 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -15,6 +15,7 @@ struct netdev_config {
 };
 
 struct netdev_queue_config {
+       u32     rx_page_size;
 };
 
 /* See the netdev.yaml spec for definition of each statistic */
@@ -114,6 +115,11 @@ void netdev_stat_queue_sum(struct net_device *netdev,
                           int tx_start, int tx_end,
                           struct netdev_queue_stats_tx *tx_sum);
 
+enum {
+       /* The queue checks and honours the page size qcfg parameter */
+       QCFG_RX_PAGE_SIZE       = 0x1,
+};
+
 /**
  * struct netdev_queue_mgmt_ops - netdev ops for queue management
  *
@@ -135,6 +141,8 @@ void netdev_stat_queue_sum(struct net_device *netdev,
  *
  * @ndo_default_qcfg:  Populate queue config struct with defaults. Optional.
  *
+ * @supported_params:  Bitmask of supported parameters, see QCFG_*.
+ *
  * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
  * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
  * be called for an interface which is open.
@@ -158,6 +166,8 @@ struct netdev_queue_mgmt_ops {
                                    struct netdev_queue_config *qcfg);
        struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
                                                 int idx);
+
+       unsigned int supported_params;
 };
 
 bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 1509a536cb85..0d453484a585 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -161,6 +161,7 @@ struct memory_provider_ops;
 struct pp_memory_provider_params {
        void *mp_priv;
        const struct memory_provider_ops *mp_ops;
+       u32 rx_page_size;
 };
 
 struct page_pool {
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 86d1c0a925e3..b81cad90ba2f 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -30,12 +30,21 @@ int netdev_rx_queue_restart(struct net_device *dev, 
unsigned int rxq_idx)
            !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start)
                return -EOPNOTSUPP;
 
+       if (WARN_ON_ONCE(qops->supported_params && !qops->ndo_default_qcfg))
+               return -EINVAL;
+
        netdev_assert_locked(dev);
 
        memset(&qcfg, 0, sizeof(qcfg));
        if (qops->ndo_default_qcfg)
                qops->ndo_default_qcfg(dev, &qcfg);
 
+       if (rxq->mp_params.rx_page_size) {
+               if (!(qops->supported_params & QCFG_RX_PAGE_SIZE))
+                       return -EOPNOTSUPP;
+               qcfg.rx_page_size = rxq->mp_params.rx_page_size;
+       }
+
        new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL);
        if (!new_mem)
                return -ENOMEM;
-- 
2.52.0


Reply via email to