Re: [PATCH 01/10] wcn36xx: fix buffer commit logic on TX path

2018-05-17 Thread Ramon Fried
On Wed, May 16, 2018 at 5:08 PM, Daniel Mack  wrote:
> When wcn36xx_dxe_tx_frame() is entered while the device is still processing
> the queue asyncronously, we are racing against the firmware code with
> updates to the buffer descriptors. Presumably, the firmware scans the ring
> buffer that holds the descriptors and scans for a valid control descriptor,
> and then assumes that the next descriptor contains the payload. If, however,
> the control descriptor is marked valid, but the payload descriptor isn't,
> the packet is not sent out.
>
> Another issue with the current code is that is lacks memory barriers before
> descriptors are marked valid. This is important because the CPU may reorder
> writes to memory, even if it is allocated as coherent DMA area, and hence
> the device may see incompletely written data.
>
> To fix this, the code in wcn36xx_dxe_tx_frame() was restructured a bit so
> that the payload descriptor is made valid before the control descriptor.
> Memory barriers are added to ensure coherency of shared memory areas.
>
> Signed-off-by: Daniel Mack 
> ---
>  drivers/net/wireless/ath/wcn36xx/dxe.c | 75 
> +-
>  1 file changed, 38 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c 
> b/drivers/net/wireless/ath/wcn36xx/dxe.c
> index bd2b946a65c9..3d1cf7dd1f8e 100644
> --- a/drivers/net/wireless/ath/wcn36xx/dxe.c
> +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
> @@ -642,8 +642,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
>  struct sk_buff *skb,
>  bool is_low)
>  {
> -   struct wcn36xx_dxe_ctl *ctl = NULL;
> -   struct wcn36xx_dxe_desc *desc = NULL;
> +   struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
> +   struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
> struct wcn36xx_dxe_ch *ch = NULL;
> unsigned long flags;
> int ret;
> @@ -651,74 +651,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
> ch = is_low ? >dxe_tx_l_ch : >dxe_tx_h_ch;
>
> spin_lock_irqsave(>lock, flags);
> -   ctl = ch->head_blk_ctl;
> +   ctl_bd = ch->head_blk_ctl;
> +   ctl_skb = ctl_bd->next;
>
> /*
>  * If skb is not null that means that we reached the tail of the ring
>  * hence ring is full. Stop queues to let mac80211 back off until ring
>  * has an empty slot again.
>  */
> -   if (NULL != ctl->next->skb) {
> +   if (NULL != ctl_skb->skb) {
> ieee80211_stop_queues(wcn->hw);
> wcn->queues_stopped = true;
> spin_unlock_irqrestore(>lock, flags);
> return -EBUSY;
> }
>
> -   ctl->skb = NULL;
> -   desc = ctl->desc;
> +   if (unlikely(ctl_skb->bd_cpu_addr)) {
> +   wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
> +   ret = -EINVAL;
> +   goto unlock;
> +   }
> +
> +   desc_bd = ctl_bd->desc;
> +   desc_skb = ctl_skb->desc;
> +
> +   ctl_bd->skb = NULL;
>
> /* write buffer descriptor */
> -   memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd));
> +   memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
>
> /* Set source address of the BD we send */
> -   desc->src_addr_l = ctl->bd_phy_addr;
> -
> -   desc->dst_addr_l = ch->dxe_wq;
> -   desc->fr_len = sizeof(struct wcn36xx_tx_bd);
> -   desc->ctrl = ch->ctrl_bd;
> +   desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
> +   desc_bd->dst_addr_l = ch->dxe_wq;
> +   desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
>
> wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
>
> wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
> -(char *)desc, sizeof(*desc));
> +(char *)desc_bd, sizeof(*desc_bd));
> wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
> -"BD   >>> ", (char *)ctl->bd_cpu_addr,
> +"BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
>  sizeof(struct wcn36xx_tx_bd));
>
> -   /* Set source address of the SKB we send */
> -   ctl = ctl->next;
> -   desc = ctl->desc;
> -   if (ctl->bd_cpu_addr) {
> -   wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
> -   ret = -EINVAL;
> -   goto unlock;
> -   }
> -
> -   desc->src_addr_l = dma_map_single(wcn->dev,
> - skb->data,
> - skb->len,
> - DMA_TO_DEVICE);
> -   if (dma_mapping_error(wcn->dev, desc->src_addr_l)) {
> +   desc_skb->src_addr_l = dma_map_single(wcn->dev,
> + skb->data,
> + skb->len,
> + DMA_TO_DEVICE);
> +   if (dma_mapping_error(wcn->dev, 

[PATCH 01/10] wcn36xx: fix buffer commit logic on TX path

2018-05-16 Thread Daniel Mack
When wcn36xx_dxe_tx_frame() is entered while the device is still processing
the queue asyncronously, we are racing against the firmware code with
updates to the buffer descriptors. Presumably, the firmware scans the ring
buffer that holds the descriptors and scans for a valid control descriptor,
and then assumes that the next descriptor contains the payload. If, however,
the control descriptor is marked valid, but the payload descriptor isn't,
the packet is not sent out.

Another issue with the current code is that is lacks memory barriers before
descriptors are marked valid. This is important because the CPU may reorder
writes to memory, even if it is allocated as coherent DMA area, and hence
the device may see incompletely written data.

To fix this, the code in wcn36xx_dxe_tx_frame() was restructured a bit so
that the payload descriptor is made valid before the control descriptor.
Memory barriers are added to ensure coherency of shared memory areas.

Signed-off-by: Daniel Mack 
---
 drivers/net/wireless/ath/wcn36xx/dxe.c | 75 +-
 1 file changed, 38 insertions(+), 37 deletions(-)

diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c 
b/drivers/net/wireless/ath/wcn36xx/dxe.c
index bd2b946a65c9..3d1cf7dd1f8e 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -642,8 +642,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
 struct sk_buff *skb,
 bool is_low)
 {
-   struct wcn36xx_dxe_ctl *ctl = NULL;
-   struct wcn36xx_dxe_desc *desc = NULL;
+   struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
+   struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
struct wcn36xx_dxe_ch *ch = NULL;
unsigned long flags;
int ret;
@@ -651,74 +651,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
ch = is_low ? >dxe_tx_l_ch : >dxe_tx_h_ch;
 
spin_lock_irqsave(>lock, flags);
-   ctl = ch->head_blk_ctl;
+   ctl_bd = ch->head_blk_ctl;
+   ctl_skb = ctl_bd->next;
 
/*
 * If skb is not null that means that we reached the tail of the ring
 * hence ring is full. Stop queues to let mac80211 back off until ring
 * has an empty slot again.
 */
-   if (NULL != ctl->next->skb) {
+   if (NULL != ctl_skb->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
spin_unlock_irqrestore(>lock, flags);
return -EBUSY;
}
 
-   ctl->skb = NULL;
-   desc = ctl->desc;
+   if (unlikely(ctl_skb->bd_cpu_addr)) {
+   wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+   ret = -EINVAL;
+   goto unlock;
+   }
+
+   desc_bd = ctl_bd->desc;
+   desc_skb = ctl_skb->desc;
+
+   ctl_bd->skb = NULL;
 
/* write buffer descriptor */
-   memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd));
+   memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
 
/* Set source address of the BD we send */
-   desc->src_addr_l = ctl->bd_phy_addr;
-
-   desc->dst_addr_l = ch->dxe_wq;
-   desc->fr_len = sizeof(struct wcn36xx_tx_bd);
-   desc->ctrl = ch->ctrl_bd;
+   desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
+   desc_bd->dst_addr_l = ch->dxe_wq;
+   desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
 
wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
 
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
-(char *)desc, sizeof(*desc));
+(char *)desc_bd, sizeof(*desc_bd));
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
-"BD   >>> ", (char *)ctl->bd_cpu_addr,
+"BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
 sizeof(struct wcn36xx_tx_bd));
 
-   /* Set source address of the SKB we send */
-   ctl = ctl->next;
-   desc = ctl->desc;
-   if (ctl->bd_cpu_addr) {
-   wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
-   ret = -EINVAL;
-   goto unlock;
-   }
-
-   desc->src_addr_l = dma_map_single(wcn->dev,
- skb->data,
- skb->len,
- DMA_TO_DEVICE);
-   if (dma_mapping_error(wcn->dev, desc->src_addr_l)) {
+   desc_skb->src_addr_l = dma_map_single(wcn->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+   if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
ret = -ENOMEM;
goto unlock;
}
 
-   ctl->skb = skb;
-   desc->dst_addr_l = ch->dxe_wq;
-   desc->fr_len = ctl->skb->len;
-
-   /* set dxe