We know the limitation that sync IPC can't be invoked in mp handler
itself which will cause deadlock, the patch introduce new API
rte_eal_mp_task_add to support mp handler be delegated in a separate
task.

Signed-off-by: Qi Zhang <qi.z.zh...@intel.com>
---
 lib/librte_eal/common/eal_common_proc.c | 67 +++++++++++++++++++++++++++++++++
 lib/librte_eal/common/include/rte_eal.h | 31 +++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/lib/librte_eal/common/eal_common_proc.c 
b/lib/librte_eal/common/eal_common_proc.c
index fc0eb4d17..166bb0951 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -27,6 +27,7 @@
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_tailq.h>
+#include <rte_spinlock.h>
 
 #include "eal_private.h"
 #include "eal_filesystem.h"
@@ -738,6 +739,72 @@ void rte_mp_init_callback_cleanup(void)
        }
 }
 
+struct mp_task {
+       TAILQ_ENTRY(mp_task) next;
+       rte_eal_mp_task task;
+       void *args;
+};
+
+TAILQ_HEAD(mp_task_list, mp_task);
+static struct mp_task_list mp_task_list =
+       TAILQ_HEAD_INITIALIZER(mp_task_list);
+static rte_spinlock_t mp_task_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void *schedule_mp_task(void *args __rte_unused)
+{
+       struct mp_task *task;
+
+       rte_spinlock_lock(&mp_task_lock);
+       while (!TAILQ_EMPTY(&mp_task_list)) {
+
+               task = TAILQ_FIRST(&mp_task_list);
+               rte_spinlock_unlock(&mp_task_lock);
+
+               task->task(task->args);
+
+               rte_spinlock_lock(&mp_task_lock);
+               TAILQ_REMOVE(&mp_task_list, task, next);
+               if (task->args)
+                       free(task->args);
+               free(task);
+       }
+
+       rte_spinlock_unlock(&mp_task_lock);
+       return NULL;
+}
+
+int __rte_experimental
+rte_eal_mp_task_add(rte_eal_mp_task task, void *args)
+{
+       struct mp_task *t = calloc(1, sizeof(struct mp_task));
+       pthread_t tid;
+
+       if (t == NULL)
+               return -ENOMEM;
+
+       t->task = task;
+       t->args = args;
+
+       rte_spinlock_lock(&mp_task_lock);
+
+       if (TAILQ_EMPTY(&mp_task_list)) {
+               TAILQ_INSERT_TAIL(&mp_task_list, t, next);
+               rte_spinlock_unlock(&mp_task_lock);
+
+               if (rte_ctrl_thread_create(&tid,
+                       "rte_mp_handle", NULL, schedule_mp_task, NULL) < 0) {
+                       RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
+                               strerror(errno));
+               }
+               return 0;
+       }
+
+       TAILQ_INSERT_TAIL(&mp_task_list, t, next);
+       rte_spinlock_unlock(&mp_task_lock);
+
+       return 0;
+}
+
 /**
  * Return -1, as fail to send message and it's caused by the local side.
  * Return 0, as fail to send message and it's caused by the remote side.
diff --git a/lib/librte_eal/common/include/rte_eal.h 
b/lib/librte_eal/common/include/rte_eal.h
index 506f17f34..0ce49668c 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -546,6 +546,37 @@ typedef int (*rte_eal_mp_init_callback_t)(void);
 int __rte_experimental
 rte_eal_register_mp_init(rte_eal_mp_init_callback_t callback);
 
+/**
+ * Function to perform the task that handle mp request,
+ * it will be scheduled on a separate task.
+ *
+ * @param args
+ *   argument parse to the function point.
+ */
+typedef void (*rte_eal_mp_task)(void *args);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add a rte_eal_mp_task into a task list, it will invoked in a
+ * separate task, the purpose is to prevent deadlock if sync IPC
+ * is required in the task.
+ *
+ * @param task
+ *   function point to perform the task.
+ *
+ * @param args
+ *   argument parse to the function point.
+ *
+ * @return
+ *  - 0 on success.
+ *  - (<0) on failure.
+ */
+int __rte_experimental
+rte_eal_mp_task_add(rte_eal_mp_task task, void *args);
+
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.13.6

Reply via email to