From: Caleb Raitto <carai...@google.com>

The driver disables tx napi if it's not certain that completions will
be processed affine with tx service.

Its heuristic doesn't account for some scenarios where it is, such as
when the queue pair count matches the core but not hyperthread count.

Allow userspace to override the heuristic. This is an alternative
solution to that in the linked patch. That added more logic in the
kernel for these cases, but the agreement was that this was better left
to user control.

Do not expand the existing napi_tx variable to a ternary value,
because doing so can break user applications that expect
boolean ('Y'/'N') instead of integer output. Add a new param instead.

Link: https://patchwork.ozlabs.org/patch/725249/
Acked-by: Willem de Bruijn <will...@google.com>
Acked-by: Jon Olson <jonol...@google.com>
Signed-off-by: Caleb Raitto <carai...@google.com>
---
 drivers/net/virtio_net.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2ff08bc103a9..d9aca4e90d6b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -39,10 +39,11 @@
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
 
-static bool csum = true, gso = true, napi_tx;
+static bool csum = true, gso = true, napi_tx, force_napi_tx;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
 module_param(napi_tx, bool, 0644);
+module_param(force_napi_tx, bool, 0644);
 
 /* FIXME: MTU in config. */
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -1201,7 +1202,7 @@ static void virtnet_napi_tx_enable(struct virtnet_info 
*vi,
        /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
         * enable the feature if this is likely affine with the transmit path.
         */
-       if (!vi->affinity_hint_set) {
+       if (!vi->affinity_hint_set && !force_napi_tx) {
                napi->weight = 0;
                return;
        }
@@ -2646,7 +2647,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
                netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
                               napi_weight);
                netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
-                                 napi_tx ? napi_weight : 0);
+                                 (napi_tx || force_napi_tx) ? napi_weight : 0);
 
                sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
                ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
-- 
2.18.0.233.g985f88cf7e-goog

Reply via email to