Re: [PATCH net-next 2/4] mvpp2: use page_pool allocator

2020-07-02 Thread Matteo Croce
On Thu, Jul 2, 2020 at 9:31 AM  wrote:
>
> Hi Matteo,
>
> Thanks for working on this!
>

:)

> On Tue, Jun 30, 2020 at 08:09:28PM +0200, Matteo Croce wrote:
> > From: Matteo Croce 
> > -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
> > +/* Returns a struct page if page_pool is set, otherwise a buffer */
> > +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
> > +   struct page_pool *page_pool)
> >  {
> > + if (page_pool)
> > + return page_pool_alloc_pages(page_pool,
> > +  GFP_ATOMIC | __GFP_NOWARN);
>
> page_pool_dev_alloc_pages() can set these flags for you, instead of explicitly
> calling them
>

Ok

> >
> > + if (priv->percpu_pools) {
> > + err = xdp_rxq_info_reg(>xdp_rxq_short, port->dev, 
> > rxq->id);
> > + if (err < 0)
> > + goto err_free_dma;
> > +
> > + err = xdp_rxq_info_reg(>xdp_rxq_long, port->dev, 
> > rxq->id);
> > + if (err < 0)
> > + goto err_unregister_rxq_short;
> > +
> > + /* Every RXQ has a pool for short and another for long 
> > packets */
> > + err = xdp_rxq_info_reg_mem_model(>xdp_rxq_short,
> > +  MEM_TYPE_PAGE_POOL,
> > +  
> > priv->page_pool[rxq->logic_rxq]);
> > + if (err < 0)
> > + goto err_unregister_rxq_short;
> > +
> > + err = xdp_rxq_info_reg_mem_model(>xdp_rxq_long,
> > +  MEM_TYPE_PAGE_POOL,
> > +  
> > priv->page_pool[rxq->logic_rxq +
> > +  
> > port->nrxqs]);
> > + if (err < 0)
> > + goto err_unregister_rxq_long;
>
> Since mvpp2_rxq_init() will return an error shouldn't we unregister the short
> memory pool as well?
>

Ok, I'll add another label like:

err_unregister_mem_rxq_short:
xdp_rxq_info_unreg_mem_model(>xdp_rxq_short);

-- 
per aspera ad upstream


Re: [PATCH net-next 2/4] mvpp2: use page_pool allocator

2020-07-02 Thread ilias . apalodimas
Hi Matteo, 

Thanks for working on this!

On Tue, Jun 30, 2020 at 08:09:28PM +0200, Matteo Croce wrote:
> From: Matteo Croce 
> 
> Use the page_pool API for memory management. This is a prerequisite for
> native XDP support.
> 
> Tested-by: Sven Auhagen 
> Signed-off-by: Matteo Croce 
> ---
>  drivers/net/ethernet/marvell/Kconfig  |   1 +
>  drivers/net/ethernet/marvell/mvpp2/mvpp2.h|   8 +
>  .../net/ethernet/marvell/mvpp2/mvpp2_main.c   | 155 +++---
>  3 files changed, 139 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/Kconfig 
> b/drivers/net/ethernet/marvell/Kconfig
> index cd8ddd1ef6f2..ef4f35ba077d 100644
> --- a/drivers/net/ethernet/marvell/Kconfig
> +++ b/drivers/net/ethernet/marvell/Kconfig
> @@ -87,6 +87,7 @@ config MVPP2
>   depends on ARCH_MVEBU || COMPILE_TEST
>   select MVMDIO
>   select PHYLINK
> + select PAGE_POOL
>   help
> This driver supports the network interface units in the
> Marvell ARMADA 375, 7K and 8K SoCs.
> diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h 
> b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
> index 543a310ec102..4c16c9e9c1e5 100644
> --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
> +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
> @@ -15,6 +15,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  /* Fifo Registers */
>  #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)(0x00 + 4 * (port))
> @@ -820,6 +821,9 @@ struct mvpp2 {
>  
>   /* RSS Indirection tables */
>   struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
> +
> + /* page_pool allocator */
> + struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
>  };
>  
>  struct mvpp2_pcpu_stats {
> @@ -1161,6 +1165,10 @@ struct mvpp2_rx_queue {
>  
>   /* Port's logic RXQ number to which physical RXQ is mapped */
>   int logic_rxq;
> +
> + /* XDP memory accounting */
> + struct xdp_rxq_info xdp_rxq_short;
> + struct xdp_rxq_info xdp_rxq_long;
>  };
>  
>  struct mvpp2_bm_pool {
> diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 
> b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
> index 027de7291f92..9e2e8fb0a0b8 100644
> --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
> +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
> @@ -95,6 +95,22 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, 
> int cpu)
>   return cpu % priv->nthreads;
>  }
>  
> +static struct page_pool *
> +mvpp2_create_page_pool(struct device *dev, int num, int len)
> +{
> + struct page_pool_params pp_params = {
> + /* internal DMA mapping in page_pool */
> + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
> + .pool_size = num,
> + .nid = NUMA_NO_NODE,
> + .dev = dev,
> + .dma_dir = DMA_FROM_DEVICE,
> + .max_len = len,
> + };
> +
> + return page_pool_create(_params);
> +}
> +
>  /* These accessors should be used to access:
>   *
>   * - per-thread registers, where each thread has its own copy of the
> @@ -327,17 +343,26 @@ static inline int mvpp2_txq_phys(int port, int txq)
>   return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
>  }
>  
> -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
> +/* Returns a struct page if page_pool is set, otherwise a buffer */
> +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
> +   struct page_pool *page_pool)
>  {
> + if (page_pool)
> + return page_pool_alloc_pages(page_pool,
> +  GFP_ATOMIC | __GFP_NOWARN);

page_pool_dev_alloc_pages() can set these flags for you, instead of explicitly
calling them

> +
>   if (likely(pool->frag_size <= PAGE_SIZE))
>   return netdev_alloc_frag(pool->frag_size);
> - else
> - return kmalloc(pool->frag_size, GFP_ATOMIC);
> +
> + return kmalloc(pool->frag_size, GFP_ATOMIC);
>  }
>  
> -static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
> +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
> + struct page_pool *page_pool, void *data)
>  {
> - if (likely(pool->frag_size <= PAGE_SIZE))
> + if (page_pool)
> + page_pool_put_full_page(page_pool, virt_to_head_page(data), 
> false);
> + else if (likely(pool->frag_size <= PAGE_SIZE))
>   skb_free_frag(data);
>   else
>   kfree(data);
> @@ -442,6 +467,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, 
> struct mvpp2 *priv,
>  static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
>  struct mvpp2_bm_pool *bm_pool, int buf_num)
>  {
> + struct page_pool *pp = NULL;
>   int i;
>  
>   if (buf_num > bm_pool->buf_num) {
> @@ -450,6 +476,9 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct 
> mvpp2 *priv,
>   buf_num = bm_pool->buf_num;