On Tue, 2024-07-02 at 13:08 +0200, Paolo Abeni wrote: > On Fri, 2024-06-28 at 00:32 +0000, Mina Almasry wrote: > > +int net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, > > + struct net_devmem_dmabuf_binding **out) > > +{ > > + struct net_devmem_dmabuf_binding *binding; > > + static u32 id_alloc_next; > > + struct scatterlist *sg; > > + struct dma_buf *dmabuf; > > + unsigned int sg_idx, i; > > + unsigned long virtual; > > + int err; > > + > > + dmabuf = dma_buf_get(dmabuf_fd); > > + if (IS_ERR(dmabuf)) > > + return -EBADFD; > > + > > + binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, > > + dev_to_node(&dev->dev)); > > + if (!binding) { > > + err = -ENOMEM; > > + goto err_put_dmabuf; > > + } > > + > > + binding->dev = dev; > > + > > + err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, > > + binding, xa_limit_32b, &id_alloc_next, > > + GFP_KERNEL); > > + if (err < 0) > > + goto err_free_binding; > > + > > + xa_init_flags(&binding->bound_rxq_list, XA_FLAGS_ALLOC); > > + > > + refcount_set(&binding->ref, 1); > > + > > + binding->dmabuf = dmabuf; > > + > > + binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); > > + if (IS_ERR(binding->attachment)) { > > + err = PTR_ERR(binding->attachment); > > + goto err_free_id; > > + } > > + > > + binding->sgt = > > + dma_buf_map_attachment(binding->attachment, DMA_FROM_DEVICE); > > + if (IS_ERR(binding->sgt)) { > > + err = PTR_ERR(binding->sgt); > > + goto err_detach; > > + } > > + > > + /* For simplicity we expect to make PAGE_SIZE allocations, but the > > + * binding can be much more flexible than that. We may be able to > > + * allocate MTU sized chunks here. Leave that for future work... > > + */ > > + binding->chunk_pool = > > + gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev)); > > + if (!binding->chunk_pool) { > > + err = -ENOMEM; > > + goto err_unmap; > > + } > > + > > + virtual = 0; > > + for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { > > + dma_addr_t dma_addr = sg_dma_address(sg); > > + struct dmabuf_genpool_chunk_owner *owner; > > + size_t len = sg_dma_len(sg); > > + struct net_iov *niov; > > + > > + owner = kzalloc_node(sizeof(*owner), GFP_KERNEL, > > + dev_to_node(&dev->dev)); > > I'm sorry for not catching this earlier, but it looks like the above > allocation lacks a NULL check.
FTR, given the size of the series, the number of iterations already done and the issue not preventing the functionality, I agree to merge the series as-is, and handle this with a follow-up. Thanks, Paolo