The commit b65ac44674dd ("udp: try to avoid 2 cache miss on dequeue")
leveraged the scratched area helpers for UDP v4 but I forgot to
update accordingly the IPv6 code path.

This change extends the scratch area usage to the IPv6 code, synching
the two implementations and giving some performance benefit.
IPv6 is again almost on the same level of IPv4, performance-wide.

Signed-off-by: Paolo Abeni <pab...@redhat.com>
---
 net/ipv6/udp.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1d7288..450829d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -362,7 +362,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, 
size_t len,
        if (!skb)
                return err;
 
-       ulen = skb->len;
+       ulen = udp_skb_len(skb);
        copied = len;
        if (copied > ulen - off)
                copied = ulen - off;
@@ -379,14 +379,18 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, 
size_t len,
 
        if (copied < ulen || peeking ||
            (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
-               checksum_valid = !udp_lib_checksum_complete(skb);
+               checksum_valid = udp_skb_csum_unnecessary(skb) ||
+                               !__udp_lib_checksum_complete(skb);
                if (!checksum_valid)
                        goto csum_copy_err;
        }
 
-       if (checksum_valid || skb_csum_unnecessary(skb))
-               err = skb_copy_datagram_msg(skb, off, msg, copied);
-       else {
+       if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
+               if (udp_skb_is_linear(skb))
+                       err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
+               else
+                       err = skb_copy_datagram_msg(skb, off, msg, copied);
+       } else {
                err = skb_copy_and_csum_datagram_msg(skb, off, msg);
                if (err == -EINVAL)
                        goto csum_copy_err;
-- 
2.9.4

Reply via email to