Le 25/04/2025 à 22:47, Mina Almasry a écrit :
Augment dmabuf binding to be able to handle TX. Additional to all the RX
binding, we also create tx_vec needed for the TX path.

Provide API for sendmsg to be able to send dmabufs bound to this device:

- Provide a new dmabuf_tx_cmsg which includes the dmabuf to send from.
- MSG_ZEROCOPY with SCM_DEVMEM_DMABUF cmsg indicates send from dma-buf.

Devmem is uncopyable, so piggyback off the existing MSG_ZEROCOPY
implementation, while disabling instances where MSG_ZEROCOPY falls back
to copying.

...

@@ -270,24 +284,34 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
int dmabuf_fd,
                        niov->owner = &owner->area;
                        page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
                                                      
net_devmem_get_dma_addr(niov));
+                       if (direction == DMA_TO_DEVICE)
+                               binding->tx_vec[owner->area.base_virtual / 
PAGE_SIZE + i] = niov;
                }
virtual += len;
        }
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+                             binding, xa_limit_32b, &id_alloc_next,
+                             GFP_KERNEL);
+       if (err < 0)
+               goto err_free_id;
+
        return binding;
+err_free_id:
+       xa_erase(&net_devmem_dmabuf_bindings, binding->id);

Not sure this is correct now that xa_alloc_cyclic() is the last function which is called.
I guess that that the last goto should be to err_free_chunks.

  err_free_chunks:
        gen_pool_for_each_chunk(binding->chunk_pool,
                                net_devmem_dmabuf_free_chunk_owner, NULL);
        gen_pool_destroy(binding->chunk_pool);
+err_tx_vec:
+       kvfree(binding->tx_vec);
  err_unmap:
        dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
                                          DMA_FROM_DEVICE);
  err_detach:
        dma_buf_detach(dmabuf, binding->attachment);
-err_free_id:
-       xa_erase(&net_devmem_dmabuf_bindings, binding->id);
  err_free_binding:
        kfree(binding);
  err_put_dmabuf:

...

diff --git a/net/core/sock.c b/net/core/sock.c
index b64df2463300b..9dd2989040357 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3017,6 +3017,12 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr 
*cmsg,
                if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg)))
                        return -EPERM;
                sockc->priority = *(u32 *)CMSG_DATA(cmsg);
+               break;
+       case SCM_DEVMEM_DMABUF:
+               if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+                       return -EINVAL;
+               sockc->dmabuf_id = *(u32 *)CMSG_DATA(cmsg);
+

Nitpick: Unneeded newline, to be consistent with the surrounding code.

                break;
        default:
                return -EINVAL;

...

CJ

Reply via email to