AlenkaF commented on code in PR #42118: URL: https://github.com/apache/arrow/pull/42118#discussion_r2048558050
########## cpp/src/arrow/c/dlpack.cc: ########## @@ -130,4 +131,71 @@ Result<DLDevice> ExportDevice(const std::shared_ptr<Array>& arr) { } } +struct TensorManagerCtx { + std::shared_ptr<Tensor> t; + std::vector<int64_t> strides; + std::vector<int64_t> shape; + DLManagedTensor tensor; +}; + +Result<DLManagedTensor*> ExportTensor(const std::shared_ptr<Tensor>& t) { + // Define the DLDataType struct + const DataType& type = *t->type(); + ARROW_ASSIGN_OR_RAISE(DLDataType dlpack_type, GetDLDataType(type)); + + // Define DLDevice struct + ARROW_ASSIGN_OR_RAISE(DLDevice device, ExportDevice(t)) + + // Create TensorManagerCtx that will serve as the owner of the DLManagedTensor + std::unique_ptr<TensorManagerCtx> ctx(new TensorManagerCtx); + + // Define the data pointer to the DLTensor + // If tensor is of length 0, data pointer should be NULL + if (t->size() == 0) { + ctx->tensor.dl_tensor.data = NULL; + } else { + ctx->tensor.dl_tensor.data = t->raw_mutable_data(); + } + + ctx->tensor.dl_tensor.device = device; + ctx->tensor.dl_tensor.ndim = t->ndim(); + ctx->tensor.dl_tensor.dtype = dlpack_type; + ctx->tensor.dl_tensor.byte_offset = 0; + + std::vector<int64_t>& shape_arr = ctx->shape; + shape_arr.reserve(t->ndim()); + for (auto i : t->shape()) { + shape_arr.emplace_back(i); + } + ctx->tensor.dl_tensor.shape = shape_arr.data(); + + std::vector<int64_t>& strides_arr = ctx->strides; + strides_arr.reserve(t->ndim()); + auto byte_width = t->type()->byte_width(); + for (auto i : t->strides()) { + strides_arr.emplace_back(i / byte_width); + } + ctx->tensor.dl_tensor.strides = strides_arr.data(); + + ctx->t = std::move(t); + ctx->tensor.manager_ctx = ctx.get(); + ctx->tensor.deleter = [](struct DLManagedTensor* self) { + delete reinterpret_cast<TensorManagerCtx*>(self->manager_ctx); + }; + return &ctx.release()->tensor; +} + +Result<DLDevice> ExportDevice(const std::shared_ptr<Tensor>& t) { + // Define DLDevice struct + DLDevice device; + if (t->data()->device_type() == DeviceAllocationType::kCPU) { + device.device_id = 0; + device.device_type = DLDeviceType::kDLCPU; + return device; + } else { + return Status::NotImplemented( + "DLPack support is implemented only for buffers on CPU device."); Review Comment: The only reason is that we're splitting the work across multiple PRs. There's an umbrella issue tracking this effort here: https://github.com/apache/arrow/issues/39296, and the GPU-related part is covered by this issue: https://github.com/apache/arrow/issues/45721. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org