larroy commented on a change in pull request #16654: Multithreaded Inference 
Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r364413881
 
 

 ##########
 File path: src/imperative/cached_op_threadsafe.cc
 ##########
 @@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <unordered_set>
+#include <iostream>
+#include "./imperative_utils.h"
+#include "../executor/exec_pass.h"
+#include "./cached_op_threadsafe.h"
+#include "../profiler/profiler.h"
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+
+namespace mxnet {
+
+DMLC_REGISTER_PARAMETER(CachedOpThreadSafeConfig);
+
+constexpr uint32_t kEidNotExist = std::numeric_limits<uint32_t>::max();
+
+
+struct CachedOpThreadSafe::GraphInfo {
+  nnvm::Graph fwd_graph;
+};
+
+struct CachedOpThreadSafe::DynamicRuntime {
+  GraphInfo info;
+  std::vector<OpStatePtr> op_states;
+};
+
+OpStatePtr CachedOpThreadSafe::GetCachedOpState(
+    const Context& ctx) {
+
+  for (const auto& i : cached_op_states_[ctx]) {
+    // only create one state per device when not using static memory
+    if (!config_.static_alloc || i.unique()) {
+      return i;
+    }
+  }
+  nnvm::Graph full_graph;
+  auto state_ptr = OpStatePtr::Create<CachedOpState>(ctx, fwd_graph_, 
full_graph, false);
+
+  cached_op_states_[ctx].push_back(state_ptr);
+  return state_ptr;
+}
+
+
+CachedOpThreadSafe::CachedOpThreadSafe(const nnvm::Symbol& sym,
+                                       const std::vector<std::pair<std::string,
+                                       std::string> >& flags) : CachedOp(sym, 
flags) {
+  using namespace nnvm;
+  using namespace imperative;
+  static const std::vector<const Op *> zero_ops{Op::Get("zeros_like"),
+                                                Op::Get("_zeros")};
+  static const auto _copy_op = Op::Get("_copy");
+  config_.Init(flags);
+
+  if (config_.static_shape) {
+      CHECK(config_.static_alloc) << "static_alloc must be True when 
static_shape is True";
+  }
+
+  // construct forward graph
+  {
+    NodeEntryMap<size_t> dedup_out;
+    for (const NodeEntry &nodeEntry : sym.outputs) {
+      if (dedup_out.find(nodeEntry) != dedup_out.end()) {
+        NodePtr copy_node = Node::Create();
+        copy_node->attrs.op = _copy_op;
+        copy_node->attrs.name = nodeEntry.node->attrs.name + "_copy" +
+                                std::to_string(dedup_out[nodeEntry]++);
+        copy_node->inputs.emplace_back(nodeEntry);
+        if (_copy_op->attr_parser != nullptr) {
+          _copy_op->attr_parser(&(copy_node->attrs));
+        }
+        fwd_graph_.outputs.emplace_back(std::move(copy_node));
+      } else {
+        dedup_out.emplace(nodeEntry, 0);
+        fwd_graph_.outputs.push_back(nodeEntry);
+      }
+    }
+
+    const auto &idx = fwd_graph_.indexed_graph();
+    CHECK_GE(idx.input_nodes().size(), 1)
+        << "CachedOp requires at least 1 input";
+
+    std::vector<uint32_t> ref_count(idx.num_node_entries(), 0);
+    for (const auto &i : idx.input_nodes())
+      ++ref_count[idx.entry_id(i, 0)];
+    for (const auto &i : idx.outputs())
+      ++ref_count[idx.entry_id(i)];
+    for (size_t i = 0; i < idx.num_nodes(); ++i) {
+      for (const auto &j : idx[i].inputs)
+        ++ref_count[idx.entry_id(j)];
+    }
+
+    fwd_graph_.attrs["forward_ref_count"] =
+        std::make_shared<dmlc::any>(std::move(ref_count));
+  }
+
+  // Set param indices
+  {
+    const auto& indexed_graph = fwd_graph_.indexed_graph();
+    if (config_.data_indices.ndim() || config_.param_indices.ndim()) {
+      CHECK_EQ(config_.data_indices.ndim() + config_.param_indices.ndim(),
+               indexed_graph.input_nodes().size());
+    } else {
+      std::vector<uint32_t> tmp;
+      tmp.reserve(indexed_graph.input_nodes().size());
+      for (size_t i = 0; i < indexed_graph.input_nodes().size(); ++i) {
+        tmp.emplace_back(i);
+      }
+      config_.data_indices.assign(tmp.begin(), tmp.end());
+    }
+  }
+}
+
+OpStatePtr CachedOpThreadSafe::DynamicForward(const Context& default_ctx,
+                                              const std::vector<NDArray*>& 
inputs,
+                                              const std::vector<NDArray*>& 
outputs) {
+  using namespace nnvm;
+  using namespace imperative;
+
+  {
+  auto state_ptr = GetCachedOpState(default_ctx);
+  auto op_state = OpStatePtr::Create<DynamicRuntime>();
+  auto &runtime = op_state.get_state<DynamicRuntime>();
+  {
+    auto &state = state_ptr.get_state<CachedOpState>();
+    // Need to lock the mutex on the state, this allows
+    // for multi context push of ops to dependency engine.
+    // SetForwardGraph runs infer passes on graphs as well
+    // as the planmemory pass.
+    std::lock_guard<std::mutex> lock(state.mutex);
+    SetForwardGraph(&state.info, false, inputs);
+    runtime.info.fwd_graph = state.info.fwd_graph;
+  }
+  nnvm::Graph &g = runtime.info.fwd_graph;
+  const auto &idx = g.indexed_graph();
+  size_t num_inputs = idx.input_nodes().size();
+  size_t max_nodes = runtime.info.fwd_graph.indexed_graph().num_nodes();
+  runtime.op_states.resize(max_nodes);
+  auto &states = runtime.op_states;
+
+  // Allocate entries
+  // This buff is thread local and used to store intermediate
+  // nodes in the graph
+  buff.resize(idx.num_node_entries());
 
 Review comment:
   This coding style is similar as in the backward pass, and I'm not a fan of 
this code style because I find it not very readable due to back and forth the 
indexed graph, indices, buffers and arrays. I think it can be abstracted more 
and make it easier to follow by factoring out in functions where the indexes 
don't appear. In the end it's the way a big chunk of mxnet is so this is like 
another drop in the ocean, but anyway.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to