I commented out code in rte_eal_init() relating to threading:

int
rte_eal_init(int argc, char **argv)
{
  int i, fctret, ret;
//  pthread_t thread_id;
//  static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
  const char *logid;
//  char cpuset[RTE_CPU_AFFINITY_STR_LEN];
//  char thread_name[RTE_MAX_THREAD_NAME_LEN];

//  if (!rte_atomic32_test_and_set(&run_once))
//    return -1;

  logid = strrchr(argv[0], '/');
  logid = strdup(logid ? logid + 1: argv[0]);

//  thread_id = pthread_self();

  if (rte_eal_log_early_init() < 0)
    rte_panic("Cannot init early logs\n");

  eal_log_level_parse(argc, argv);

  /* set log level as early as possible */
  rte_set_log_level(internal_config.log_level);

  if (rte_eal_cpu_init() < 0)
    rte_panic("Cannot detect lcores\n");

  fctret = eal_parse_args(argc, argv);
  if (fctret < 0)
    exit(1);

  if (internal_config.no_hugetlbfs == 0 &&
      internal_config.process_type != RTE_PROC_SECONDARY &&
      internal_config.xen_dom0_support == 0 &&
      eal_hugepage_info_init() < 0)
    rte_panic("Cannot get hugepage information\n");

  if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
    if (internal_config.no_hugetlbfs)
      internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
  }

  if (internal_config.vmware_tsc_map == 1) {
#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
    rte_cycles_vmware_tsc_map = 1;
    RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
        "you must have monitor_control.pseudo_perfctr = TRUE\n");
#else
    RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
        "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
#endif
  }

  rte_srand(rte_rdtsc());

  rte_config_init();

//  if (rte_eal_pci_init() < 0)
//    rte_panic("Cannot init PCI\n");
//
//#ifdef VFIO_PRESENT
//  if (rte_eal_vfio_setup() < 0)
//    rte_panic("Cannot init VFIO\n");
//#endif
//
//#ifdef RTE_LIBRTE_IVSHMEM
//  if (rte_eal_ivshmem_init() < 0)
//    rte_panic("Cannot init IVSHMEM\n");
//#endif

  if (rte_eal_memory_init() < 0)
    rte_panic("Cannot init memory\n");

  /* the directories are locked during eal_hugepage_info_init */
  eal_hugedirs_unlock();

  if (rte_eal_memzone_init() < 0)
    rte_panic("Cannot init memzone\n");

  if (rte_eal_tailqs_init() < 0)
    rte_panic("Cannot init tail queues for objects\n");

//#ifdef RTE_LIBRTE_IVSHMEM
//  if (rte_eal_ivshmem_obj_init() < 0)
//    rte_panic("Cannot init IVSHMEM objects\n");
//#endif

  if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0)
    rte_panic("Cannot init logs\n");
//
//  if (rte_eal_alarm_init() < 0)
//    rte_panic("Cannot init interrupt-handling thread\n");

  if (rte_eal_timer_init() < 0)
    rte_panic("Cannot init HPET or TSC timers\n");

//  eal_check_mem_on_local_socket();

//  if (eal_plugins_init() < 0)
//    rte_panic("Cannot init plugins\n");

//  eal_thread_init_master(rte_config.master_lcore);

//  ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);

//  RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
//    rte_config.master_lcore, (int)thread_id, cpuset,
//    ret == 0 ? "" : "...");

//  if (rte_eal_dev_init() < 0)
//    rte_panic("Cannot init pmd devices\n");

//  if (rte_eal_intr_init() < 0)
//    rte_panic("Cannot init interrupt-handling thread\n");

//  RTE_LCORE_FOREACH_SLAVE(i) {
//
//    /*
//     * create communication pipes between master thread
//     * and children
//     */
//    if (pipe(lcore_config[i].pipe_master2slave) < 0)
//      rte_panic("Cannot create pipe\n");
//    if (pipe(lcore_config[i].pipe_slave2master) < 0)
//      rte_panic("Cannot create pipe\n");
//
//    lcore_config[i].state = WAIT;
//
//    /* create a thread for each lcore */
//    ret = pthread_create(&lcore_config[i].thread_id, NULL,
//             eal_thread_loop, NULL);
//    if (ret != 0)
//      rte_panic("Cannot create thread\n");
//
//    /* Set thread_name for aid in debugging. */
//    snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
//      "lcore-slave-%d", i);
//    ret = rte_thread_setname(lcore_config[i].thread_id,
//            thread_name);
//    if (ret != 0)
//      RTE_LOG(DEBUG, EAL,
//        "Cannot set name for lcore thread\n");
//  }

//  /*
//   * Launch a dummy function on all slave lcores, so that master lcore
//   * knows they are all ready when this function returns.
//   */
//  rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
//  rte_eal_mp_wait_lcore();

//  /* Probe & Initialize PCI devices */
//  if (rte_eal_pci_probe())
//    rte_panic("Cannot probe PCI\n");

  rte_eal_mcfg_complete();

  return fctret;
}

After recompiling, I do not have the same issue when using TBB in the same 
application. This works for now but I am interested in understanding more about 
why this fixes the issue. It also seems like rte_eal_init() initializes a bunch 
of sub-systems. Perhaps more granularity at this level could be useful in 
addition to allow a more flexible initialization API that allows more 
programatic control instead of only allowing initialization from argv.

> On Nov 7, 2016, at 10:39 PM, Jason Lefley <jason.lefley at aclectic.com> 
> wrote:
> 
> I am working on leveraging SPDK (which internally uses EAL from DPDK) from an 
> existing application that uses Intel?s TBB. I do not need to use EAL for 
> multithreading but SPDK does require at least huge page support and PCI 
> access from EAL.
> 
> I noticed that if I try to use TBB?s parallel_for() after I have called 
> rte_eal_init(), then the TBB operation doesn?t actually run in parallel. If I 
> remove the call to rte_eal_init(), the TBB operations work as expected 
> however I then cannot use SPDK. Does anyone have any input regarding how I 
> can keep my existing multi-threaded application working as intended as I 
> integrate SPDK/DPDK?
> 
> Thanks

Reply via email to