RE: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Dexuan Cui
> From: Stephen Hemminger 
> Sent: Thursday, April 15, 2021 2:15 PM
> > ...
> > +   netif_carrier_off(ndev);
> > +
> > +   get_random_bytes(apc->hashkey, MANA_HASH_KEY_SIZE);
> 
> Current practice for network drivers is to use netdev_rss_key_fill() for this.

Will change to netdev_rss_key_fill(). Thanks!


Re: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Stephen Hemminger
On Wed, 14 Apr 2021 22:45:19 -0700
Dexuan Cui  wrote:

> +static int mana_probe_port(struct mana_context *ac, int port_idx,
> +struct net_device **ndev_storage)
> +{
> + struct gdma_context *gc = ac->gdma_dev->gdma_context;
> + struct mana_port_context *apc;
> + struct net_device *ndev;
> + int err;
> +
> + ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
> +  gc->max_num_queues);
> + if (!ndev)
> + return -ENOMEM;
> +
> + *ndev_storage = ndev;
> +
> + apc = netdev_priv(ndev);
> + apc->ac = ac;
> + apc->ndev = ndev;
> + apc->max_queues = gc->max_num_queues;
> + apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
> + apc->port_handle = INVALID_MANA_HANDLE;
> + apc->port_idx = port_idx;
> +
> + ndev->netdev_ops = _devops;
> + ndev->ethtool_ops = _ethtool_ops;
> + ndev->mtu = ETH_DATA_LEN;
> + ndev->max_mtu = ndev->mtu;
> + ndev->min_mtu = ndev->mtu;
> + ndev->needed_headroom = MANA_HEADROOM;
> + SET_NETDEV_DEV(ndev, gc->dev);
> +
> + netif_carrier_off(ndev);
> +
> + get_random_bytes(apc->hashkey, MANA_HASH_KEY_SIZE);

Current practice for network drivers is to use netdev_rss_key_fill() for this.


RE: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Haiyang Zhang



> -Original Message-
> From: Stephen Hemminger 
> Sent: Thursday, April 15, 2021 5:08 PM
> To: Dexuan Cui 
> Cc: da...@davemloft.net; k...@kernel.org; KY Srinivasan
> ; Haiyang Zhang ; Stephen
> Hemminger ; wei@kernel.org; Wei Liu
> ; net...@vger.kernel.org; l...@kernel.org;
> and...@lunn.ch; be...@petrovitsch.priv.at; rdun...@infradead.org;
> Shachar Raindel ; linux-kernel@vger.kernel.org;
> linux-hyp...@vger.kernel.org
> Subject: Re: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure
> Network Adapter (MANA)
> 
> On Wed, 14 Apr 2021 22:45:19 -0700
> Dexuan Cui  wrote:
> 
> > +static int mana_query_vport_cfg(struct mana_port_context *apc, u32
> vport_index,
> > +   u32 *max_sq, u32 *max_rq, u32
> *num_indir_entry) {
> > +   struct mana_query_vport_cfg_resp resp = {};
> > +   struct mana_query_vport_cfg_req req = {};
> > +   int err;
> > +
> > +   mana_gd_init_req_hdr(, MANA_QUERY_VPORT_CONFIG,
> > +sizeof(req), sizeof(resp));
> > +
> > +   req.vport_index = vport_index;
> > +
> > +   err = mana_send_request(apc->ac, , sizeof(req), ,
> > +   sizeof(resp));
> > +   if (err)
> > +   return err;
> > +
> > +   err = mana_verify_resp_hdr(,
> MANA_QUERY_VPORT_CONFIG,
> > +  sizeof(resp));
> > +   if (err)
> > +   return err;
> > +
> > +   if (resp.hdr.status)
> > +   return -EPROTO;
> > +
> > +   *max_sq = resp.max_num_sq;
> > +   *max_rq = resp.max_num_rq;
> > +   *num_indir_entry = resp.num_indirection_ent;
> > +
> > +   apc->port_handle = resp.vport;
> > +   memcpy(apc->mac_addr, resp.mac_addr, ETH_ALEN);
> 
> You could use ether_addr_copy here.
> 
> 
> > +int mana_do_attach(struct net_device *ndev, enum mana_attach_caller
> > +caller) {
> > +   struct mana_port_context *apc = netdev_priv(ndev);
> > +   struct gdma_dev *gd = apc->ac->gdma_dev;
> > +   u32 max_txq, max_rxq, max_queues;
> > +   int port_idx = apc->port_idx;
> > +   u32 num_indirect_entries;
> > +   int err;
> > +
> > +   if (caller == MANA_OPEN)
> > +   goto start_open;
> > +
> > +   err = mana_init_port_context(apc);
> > +   if (err)
> > +   return err;
> > +
> > +   err = mana_query_vport_cfg(apc, port_idx, _txq, _rxq,
> > +  _indirect_entries);
> > +   if (err) {
> > +   netdev_err(ndev, "Failed to query info for vPort 0\n");
> > +   goto reset_apc;
> > +   }
> > +
> > +   max_queues = min_t(u32, max_txq, max_rxq);
> > +   if (apc->max_queues > max_queues)
> > +   apc->max_queues = max_queues;
> > +
> > +   if (apc->num_queues > apc->max_queues)
> > +   apc->num_queues = apc->max_queues;
> > +
> > +   memcpy(ndev->dev_addr, apc->mac_addr, ETH_ALEN);
> 
> And here use ether_addr_copy().

Thanks, I will update these.

- Haiyang


Re: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Stephen Hemminger
On Wed, 14 Apr 2021 22:45:19 -0700
Dexuan Cui  wrote:

> +static int mana_query_vport_cfg(struct mana_port_context *apc, u32 
> vport_index,
> + u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
> +{
> + struct mana_query_vport_cfg_resp resp = {};
> + struct mana_query_vport_cfg_req req = {};
> + int err;
> +
> + mana_gd_init_req_hdr(, MANA_QUERY_VPORT_CONFIG,
> +  sizeof(req), sizeof(resp));
> +
> + req.vport_index = vport_index;
> +
> + err = mana_send_request(apc->ac, , sizeof(req), ,
> + sizeof(resp));
> + if (err)
> + return err;
> +
> + err = mana_verify_resp_hdr(, MANA_QUERY_VPORT_CONFIG,
> +sizeof(resp));
> + if (err)
> + return err;
> +
> + if (resp.hdr.status)
> + return -EPROTO;
> +
> + *max_sq = resp.max_num_sq;
> + *max_rq = resp.max_num_rq;
> + *num_indir_entry = resp.num_indirection_ent;
> +
> + apc->port_handle = resp.vport;
> + memcpy(apc->mac_addr, resp.mac_addr, ETH_ALEN);

You could use ether_addr_copy here.


> +int mana_do_attach(struct net_device *ndev, enum mana_attach_caller caller)
> +{
> + struct mana_port_context *apc = netdev_priv(ndev);
> + struct gdma_dev *gd = apc->ac->gdma_dev;
> + u32 max_txq, max_rxq, max_queues;
> + int port_idx = apc->port_idx;
> + u32 num_indirect_entries;
> + int err;
> +
> + if (caller == MANA_OPEN)
> + goto start_open;
> +
> + err = mana_init_port_context(apc);
> + if (err)
> + return err;
> +
> + err = mana_query_vport_cfg(apc, port_idx, _txq, _rxq,
> +_indirect_entries);
> + if (err) {
> + netdev_err(ndev, "Failed to query info for vPort 0\n");
> + goto reset_apc;
> + }
> +
> + max_queues = min_t(u32, max_txq, max_rxq);
> + if (apc->max_queues > max_queues)
> + apc->max_queues = max_queues;
> +
> + if (apc->num_queues > apc->max_queues)
> + apc->num_queues = apc->max_queues;
> +
> + memcpy(ndev->dev_addr, apc->mac_addr, ETH_ALEN);

And here use ether_addr_copy().



RE: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Dexuan Cui
> From: Jakub Kicinski 
> Sent: Thursday, April 15, 2021 10:44 AM
>  ...
> On Wed, 14 Apr 2021 22:45:19 -0700 Dexuan Cui wrote:
> > +   buf = dma_alloc_coherent(gmi->dev, length, _handle,
> > +GFP_KERNEL | __GFP_ZERO);
> 
> No need for GFP_ZERO, dma_alloc_coherent() zeroes the memory these days.

Yes, indeed. Will remove __GFP_ZERO.

> 
> > +static int mana_gd_register_irq(struct gdma_queue *queue,
> > +   const struct gdma_queue_spec *spec)
> > ...
> > +   struct gdma_irq_context *gic;
> > +
> > +   struct gdma_context *gc;
> 
> Why the empty line?

No good reason. Will remove this line. I'll check the whole patch
for similar issues.

> 
> > +   queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> > +   if (!queue)
> > +   return -ENOMEM;
> > +
> > +   gmi = >mem_info;
> > +   err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
> > +   if (err)
> > +   return err;
> 
> Leaks the memory from 'queue'?

Sorry. This should be a bug I introduced when moving arouond some code.

> Same code in mana_gd_create_mana_eq(), ...wq_cq(), etc.

Will fix all of them, and check for the code similar issues.

> > +int mana_do_attach(struct net_device *ndev, enum mana_attach_caller
> caller)
> > +{
> > +   struct mana_port_context *apc = netdev_priv(ndev);
> > +   struct gdma_dev *gd = apc->ac->gdma_dev;
> > +   u32 max_txq, max_rxq, max_queues;
> > +   int port_idx = apc->port_idx;
> > +   u32 num_indirect_entries;
> > +   int err;
> > +
> > +   if (caller == MANA_OPEN)
> > +   goto start_open;
> > +
> > +   err = mana_init_port_context(apc);
> > +   if (err)
> > +   return err;
> > +
> > +   err = mana_query_vport_cfg(apc, port_idx, _txq, _rxq,
> > +  _indirect_entries);
> > +   if (err) {
> > +   netdev_err(ndev, "Failed to query info for vPort 0\n");
> > +   goto reset_apc;
> > +   }
> > +
> > +   max_queues = min_t(u32, max_txq, max_rxq);
> > +   if (apc->max_queues > max_queues)
> > +   apc->max_queues = max_queues;
> > +
> > +   if (apc->num_queues > apc->max_queues)
> > +   apc->num_queues = apc->max_queues;
> > +
> > +   memcpy(ndev->dev_addr, apc->mac_addr, ETH_ALEN);
> > +
> > +   if (caller == MANA_PROBE)
> > +   return 0;
> > +
> > +start_open:
> 
> Why keep this as a single function, there is no overlap between what's
> done for OPEN and PROBE, it seems.
> 
> Similarly detach should probably be split into clearly distinct parts.

Will improve the code. Thanks for the suggestion!

Thanks,
Dexuan


Re: [PATCH v6 net-next] net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)

2021-04-15 Thread Jakub Kicinski
On Wed, 14 Apr 2021 22:45:19 -0700 Dexuan Cui wrote:
> + buf = dma_alloc_coherent(gmi->dev, length, _handle,
> +  GFP_KERNEL | __GFP_ZERO);

No need for GFP_ZERO, dma_alloc_coherent() zeroes the memory these days.

> +static int mana_gd_register_irq(struct gdma_queue *queue,
> + const struct gdma_queue_spec *spec)
> ...
> + struct gdma_irq_context *gic;
> +
> + struct gdma_context *gc;

Why the empty line?

> + queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> + if (!queue)
> + return -ENOMEM;
> +
> + gmi = >mem_info;
> + err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
> + if (err)
> + return err;

Leaks the memory from 'queue'?

Same code in mana_gd_create_mana_eq(), ...wq_cq(), etc.

> +int mana_do_attach(struct net_device *ndev, enum mana_attach_caller caller)
> +{
> + struct mana_port_context *apc = netdev_priv(ndev);
> + struct gdma_dev *gd = apc->ac->gdma_dev;
> + u32 max_txq, max_rxq, max_queues;
> + int port_idx = apc->port_idx;
> + u32 num_indirect_entries;
> + int err;
> +
> + if (caller == MANA_OPEN)
> + goto start_open;
> +
> + err = mana_init_port_context(apc);
> + if (err)
> + return err;
> +
> + err = mana_query_vport_cfg(apc, port_idx, _txq, _rxq,
> +_indirect_entries);
> + if (err) {
> + netdev_err(ndev, "Failed to query info for vPort 0\n");
> + goto reset_apc;
> + }
> +
> + max_queues = min_t(u32, max_txq, max_rxq);
> + if (apc->max_queues > max_queues)
> + apc->max_queues = max_queues;
> +
> + if (apc->num_queues > apc->max_queues)
> + apc->num_queues = apc->max_queues;
> +
> + memcpy(ndev->dev_addr, apc->mac_addr, ETH_ALEN);
> +
> + if (caller == MANA_PROBE)
> + return 0;
> +
> +start_open:

Why keep this as a single function, there is no overlap between what's
done for OPEN and PROBE, it seems.

Similarly detach should probably be split into clearly distinct parts.

> + err = mana_create_eq(apc);
> + if (err)
> + goto reset_apc;
> +
> + err = mana_create_vport(apc, ndev);
> + if (err)
> + goto destroy_eq;
> +
> + err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
> + if (err)
> + goto destroy_vport;
> +
> + err = mana_add_rx_queues(apc, ndev);
> + if (err)
> + goto destroy_vport;
> +
> + apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
> +
> + err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
> + if (err)
> + goto destroy_vport;
> +
> + mana_rss_table_init(apc);
> +
> + err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
> + if (err)
> + goto destroy_vport;
> +
> + return 0;
> +
> +destroy_vport:
> + mana_destroy_vport(apc);
> +destroy_eq:
> + mana_destroy_eq(gd->gdma_context, apc);
> +reset_apc:
> + if (caller == MANA_OPEN)
> + return err;
> + kfree(apc->rxqs);
> + apc->rxqs = NULL;
> + return err;
> +}
> +
> +int mana_attach(struct net_device *ndev)
> +{
> + struct mana_port_context *apc = netdev_priv(ndev);
> + int err;
> +
> + ASSERT_RTNL();
> +
> + err = mana_do_attach(ndev, MANA_ATTACH);
> + if (err)
> + return err;
> +
> + netif_device_attach(ndev);
> +
> + apc->port_is_up = apc->port_st_save;
> +
> + /* Ensure port state updated before txq state */
> + smp_wmb();
> +
> + if (apc->port_is_up) {
> + netif_carrier_on(ndev);
> + netif_tx_wake_all_queues(ndev);
> + }
> +
> + return 0;
> +}