Add the APIs to support the dpif operate function to propagate to
the dpif-offload layer for flow manipulation and querying.

Signed-off-by: Eelco Chaudron <[email protected]>
---
 lib/dpif-offload-provider.h | 14 +++++++
 lib/dpif-offload.c          | 84 ++++++++++++++++++++++++++++++++++++-
 lib/dpif.c                  | 20 ++++++++-
 3 files changed, 116 insertions(+), 2 deletions(-)

diff --git a/lib/dpif-offload-provider.h b/lib/dpif-offload-provider.h
index abb3e2469..4d3950664 100644
--- a/lib/dpif-offload-provider.h
+++ b/lib/dpif-offload-provider.h
@@ -199,6 +199,18 @@ struct dpif_offload_class {
     void (*flow_dump_thread_destroy)(
         struct dpif_offload_flow_dump_thread *);
 
+    /* Executes each of the 'n_ops' operations in 'ops' in order if their
+     * 'error' field is negative, placing each operation's results in the
+     * 'output' members documented in comments and the 'error' member of each
+     * dpif_op. Operations with a non-negative 'error' value have already been
+     * processed by a higher priority offload provider.
+     *
+     * Note that only the DPIF_OP_FLOW_PUT/DEL/GET operations should be
+     * handled, and this is only needed for the DPIF_OFFLOAD_IMPL_HW_ONLY type
+     * of offload providers. */
+    void (*operate)(struct dpif *, const struct dpif_offload *,
+                    struct dpif_op **, size_t n_ops);
+
     /* Returns the number of flows offloaded by the offload provider. */
     uint64_t (*flow_get_n_offloaded)(const struct dpif_offload *);
 
@@ -314,6 +326,8 @@ int dpif_offload_flow_dump_next(struct 
dpif_flow_dump_thread *,
 void dpif_offload_flow_dump_thread_create(struct dpif_flow_dump_thread *,
                                           struct dpif_flow_dump *);
 void dpif_offload_flow_dump_thread_destroy(struct dpif_flow_dump_thread *);
+size_t dpif_offload_operate(struct dpif *, struct dpif_op **, size_t n_ops,
+                            enum dpif_offload_type offload_type);
 
 static inline void dpif_offload_assert_class(
     const struct dpif_offload *dpif_offload,
diff --git a/lib/dpif-offload.c b/lib/dpif-offload.c
index 9c4c88c55..b35b6c7ea 100644
--- a/lib/dpif-offload.c
+++ b/lib/dpif-offload.c
@@ -31,7 +31,7 @@
 
 VLOG_DEFINE_THIS_MODULE(dpif_offload);
 
-static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(1, 5);
+static struct vlog_rate_limit rl_dbg = VLOG_RATE_LIMIT_INIT(100, 100);
 
 static struct ovs_mutex dpif_offload_mutex = OVS_MUTEX_INITIALIZER;
 static struct shash dpif_offload_classes \
@@ -1120,6 +1120,88 @@ dpif_offload_get_netdev_by_port_id(struct dpif *dpif,
     return netdev;
 }
 
+/* This function tries to offload the operations to the dpif-offload
+ * providers. It will return the number of operations not handled, whose
+ * pointers are re-arranged and available in **ops. */
+size_t
+dpif_offload_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
+                     enum dpif_offload_type offload_type)
+{
+    struct dp_offload *dp_offload = dpif_offload_get_dp_offload(dpif);
+    const struct dpif_offload *offload;
+    size_t n_ops_left = 0;
+
+    if (!dp_offload || !dpif_offload_is_offload_enabled()) {
+        return n_ops;
+    }
+
+    for (size_t i = 0; i < n_ops; i++) {
+        ops[i]->error = -1;
+    }
+
+    LIST_FOR_EACH (offload, dpif_list_node, &dp_offload->offload_providers) {
+        if (offload->class->operate
+            && offload->class->impl_type == DPIF_OFFLOAD_IMPL_HW_ONLY) {
+
+            offload->class->operate(dpif, offload, ops, n_ops);
+
+            for (size_t i = 0; i < n_ops; i++) {
+                struct dpif_op *op = ops[i];
+
+                if (op->error == EOPNOTSUPP) {
+                    /* Not supported by this offload provider, try next one. */
+                    op->error = -1;
+                } else {
+                    VLOG_DBG("Tried offloading %d to dpif-offload provider "
+                             "%s, error %d",
+                             op->type,
+                             dpif_offload_name(offload), op->error);
+
+                    switch (op->type) {
+                    case DPIF_OP_FLOW_PUT:
+                        log_flow_put_message(dpif, &this_module,
+                                             &op->flow_put, 0);
+                        break;
+                    case DPIF_OP_FLOW_DEL:
+                        log_flow_del_message(dpif, &this_module,
+                                             &op->flow_del, 0);
+                        break;
+                    case DPIF_OP_FLOW_GET:
+                        log_flow_get_message(dpif, &this_module,
+                                             &op->flow_get, 0);
+                        break;
+                    case DPIF_OP_EXECUTE:
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    for (size_t i = 0; i < n_ops; i++) {
+        struct dpif_op *op = ops[i];
+
+        if (offload_type == DPIF_OFFLOAD_ALWAYS) {
+          /* For DPIF_OFFLOAD_ALWAYS, we should keep the error values,
+           * and mark the unprocessed ones as EOPNOTSUPP. This way, they
+           * will not be processed by the dpif layer. */
+            if (op->error < 0) {
+                op->error = EOPNOTSUPP;
+            }
+            continue;
+        }
+
+        /* For the other offload types, operations that were not handled or
+         * failed to offload should be processed by the dpif layer. */
+        if (op->error != 0 && op->error != EEXIST) {
+            op->error = 0;
+            ops[n_ops_left++] = op;
+        }
+    }
+
+    return n_ops_left;
+}
+
 
 int
 dpif_offload_netdev_flush_flows(struct netdev *netdev)
diff --git a/lib/dpif.c b/lib/dpif.c
index 8c40bec18..14c870b0e 100644
--- a/lib/dpif.c
+++ b/lib/dpif.c
@@ -1396,7 +1396,25 @@ dpif_operate(struct dpif *dpif, struct dpif_op **ops, 
size_t n_ops,
              * handle itself, without help. */
             size_t i;
 
-            dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
+            /* See if we can handle some flow add/del/get through the
+             * dpif-offload provider. */
+            if (offload_type != DPIF_OFFLOAD_NEVER
+                && dpif_offload_is_offload_enabled()) {
+                struct dpif_op **ops_copy;
+                size_t ops_left;
+
+                ops_copy = xmemdup(ops, sizeof(*ops_copy) * chunk);
+
+                ops_left = dpif_offload_operate(dpif, ops_copy, chunk,
+                                                offload_type);
+                if (ops_left) {
+                    dpif->dpif_class->operate(dpif, ops_copy, ops_left,
+                                              offload_type);
+                }
+                free(ops_copy);
+            } else {
+                dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
+            }
 
             for (i = 0; i < chunk; i++) {
                 struct dpif_op *op = ops[i];
-- 
2.50.1

_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to