Hi,

Mathias Nyman <mathias.ny...@linux.intel.com> writes:
> @@ -3098,24 +3136,66 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, 
> int transferred,
>       return (total_packet_count - ((transferred + trb_buff_len) / maxp));
>  }
>  
> +
>  static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 
> enqd_len,
> -                      u32 *trb_buff_len)
> +                      u32 *trb_buff_len, struct xhci_segment *seg)
>  {
> +     struct device *dev = xhci_to_hcd(xhci)->self.controller;
>       unsigned int unalign;
>       unsigned int max_pkt;
> +     u32 new_buff_len;
>  
> -     max_pkt = usb_endpoint_maxp(&urb->ep->desc); /*FIXME MATTU GET_MAX..? */
> +     max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
>       unalign = (enqd_len + *trb_buff_len) % max_pkt;
>  
>       /* we got lucky, last normal TRB data on segment is packet aligned */
>       if (unalign == 0)
>               return 0;
>  
> +     xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
> +              unalign, *trb_buff_len);
> +
>       /* is the last nornal TRB alignable by splitting it */
>       if (*trb_buff_len > unalign) {
>               *trb_buff_len -= unalign;
> +             xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
>               return 0;
>       }
> +
> +     /*
> +      * We want enqd_len + trb_buff_len to sum up to a number aligned to
> +      * number which is divisible by the endpoint's wMaxPacketSize. IOW:
> +      * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
> +      */
> +     new_buff_len = max_pkt - (enqd_len % max_pkt);
> +
> +     if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
> +             new_buff_len = (urb->transfer_buffer_length - enqd_len);
> +
> +     /* create a max max_pkt sized bounce buffer pointed to by last trb */
> +     if (usb_urb_dir_out(urb)) {
> +             sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
> +                                seg->bounce_buf, new_buff_len, enqd_len);
> +             seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
> +                                              max_pkt, DMA_TO_DEVICE);
> +     } else {
> +             seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
> +                                              max_pkt, DMA_FROM_DEVICE);
> +     }
> +
> +     if (dma_mapping_error(dev, seg->bounce_dma)) {
> +             /* try without aligning. Some host controllers survive */
> +             xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
> +             return 0;
> +     }
> +     *trb_buff_len = new_buff_len;
> +     seg->bounce_len = new_buff_len;
> +     seg->bounce_offs = enqd_len;
> +
> +     xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
> +
> +     /* FIXME MATTU make sure memory allocated memory is 64k aligned */
        ^^^^^^^^^^^^^^^^^^^^^
        do you wanna clean this up ?

-- 
balbi

Attachment: signature.asc
Description: PGP signature

Reply via email to