On Wed, Mar 10, 2021 at 06:32 AM CET, Cong Wang wrote:
> From: Cong Wang <[email protected]>
>
> We only have skb_send_sock_locked() which requires callers
> to use lock_sock(). Introduce a variant skb_send_sock()
> which locks on its own, callers do not need to lock it
> any more. This will save us from adding a ->sendmsg_locked
> for each protocol.
>
> To reuse the code, pass function pointers to __skb_send_sock()
> and build skb_send_sock() and skb_send_sock_locked() on top.
>
> Cc: John Fastabend <[email protected]>
> Cc: Daniel Borkmann <[email protected]>
> Cc: Jakub Sitnicki <[email protected]>
> Cc: Lorenz Bauer <[email protected]>
> Signed-off-by: Cong Wang <[email protected]>
> ---
> include/linux/skbuff.h | 1 +
> net/core/skbuff.c | 52 ++++++++++++++++++++++++++++++++++++------
> 2 files changed, 46 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 0503c917d773..2fc8c3657c53 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -3626,6 +3626,7 @@ int skb_splice_bits(struct sk_buff *skb, struct sock
> *sk, unsigned int offset,
> unsigned int flags);
> int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
> int len);
> +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
> void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
> unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
> int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 545a472273a5..396586bd6ae3 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -2500,9 +2500,12 @@ int skb_splice_bits(struct sk_buff *skb, struct sock
> *sk, unsigned int offset,
> }
> EXPORT_SYMBOL_GPL(skb_splice_bits);
>
> -/* Send skb data on a socket. Socket must be locked. */
> -int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
> - int len)
> +typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg,
> + struct kvec *vec, size_t num, size_t size);
> +typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
> + size_t size, int flags);
> +static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
> + int len, sendmsg_func sendmsg, sendpage_func
> sendpage)
> {
> unsigned int orig_len = len;
> struct sk_buff *head = skb;
> @@ -2522,7 +2525,7 @@ int skb_send_sock_locked(struct sock *sk, struct
> sk_buff *skb, int offset,
> memset(&msg, 0, sizeof(msg));
> msg.msg_flags = MSG_DONTWAIT;
>
> - ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
> + ret = sendmsg(sk, &msg, &kv, 1, slen);
Maybe use INDIRECT_CALLABLE_DECLARE() and INDIRECT_CALL_2() since there
are just two possibilities? Same for sendpage below.
> if (ret <= 0)
> goto error;
>
> @@ -2553,9 +2556,9 @@ int skb_send_sock_locked(struct sock *sk, struct
> sk_buff *skb, int offset,
> slen = min_t(size_t, len, skb_frag_size(frag) - offset);
>
> while (slen) {
> - ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
> - skb_frag_off(frag) +
> offset,
> - slen, MSG_DONTWAIT);
> + ret = sendpage(sk, skb_frag_page(frag),
> + skb_frag_off(frag) + offset,
> + slen, MSG_DONTWAIT);
> if (ret <= 0)
> goto error;
>
[...]