Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package dlpack for openSUSE:Factory checked 
in at 2023-01-30 17:11:13
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/dlpack (Old)
 and      /work/SRC/openSUSE:Factory/.dlpack.new.32243 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "dlpack"

Mon Jan 30 17:11:13 2023 rev:3 rq:1061994 version:0.8

Changes:
--------
--- /work/SRC/openSUSE:Factory/dlpack/dlpack.changes    2022-02-24 
18:24:32.154641565 +0100
+++ /work/SRC/openSUSE:Factory/.dlpack.new.32243/dlpack.changes 2023-01-30 
17:20:45.211696779 +0100
@@ -1,0 +2,9 @@
+Sat Jan 28 19:50:14 UTC 2023 - Dirk Müller <dmuel...@suse.com>
+
+- update to 0.8:
+  * Add kDLBool
+  * Add kDLHexagon
+  * Add kDLOneAPI
+  * Add DLPACK_VERSION and DLPACK_ABI_VERSION
+
+-------------------------------------------------------------------

Old:
----
  dlpack-0.6.tar.gz

New:
----
  dlpack-0.8.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ dlpack.spec ++++++
--- /var/tmp/diff_new_pack.HIfxhA/_old  2023-01-30 17:20:45.567698674 +0100
+++ /var/tmp/diff_new_pack.HIfxhA/_new  2023-01-30 17:20:45.571698696 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package dlpack
 #
-# Copyright (c) 2022 SUSE LLC
+# Copyright (c) 2023 SUSE LLC
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -17,7 +17,7 @@
 
 
 Name:           dlpack
-Version:        0.6
+Version:        0.8
 Release:        0
 Summary:        DLPack: Open In Memory Tensor Structure
 License:        Apache-2.0

++++++ dlpack-0.6.tar.gz -> dlpack-0.8.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/.github/workflows/deploy.yml 
new/dlpack-0.8/.github/workflows/deploy.yml
--- old/dlpack-0.6/.github/workflows/deploy.yml 1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/.github/workflows/deploy.yml 2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,39 @@
+name: CI
+
+on:
+  push:
+    branches:
+      - main
+
+jobs:
+  test_linux:
+    name: Deploy Docs
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v2
+      with:
+        submodules: recursive
+
+    - name: Configuring Test Environment
+      run: |
+        sudo apt-get update
+        sudo apt-get -y install build-essential doxygen ghp-import
+        python3 -m pip install -U pip wheel
+
+    - name: Installing dependencies
+      run: |
+        python3 -m pip install -r doc_requirements.txt
+
+    - name: Generating Docs
+      run: |
+        make doc
+
+    - name: Deploying on GitHub Pages
+      run: |
+        touch docs/build/.nojekyll
+        git remote set-url origin https://x-access-token:${{ 
secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY
+        git config --global user.email "dlpack-gh-actions-bot@nomail"
+        git config --global user.name "dlpack-gh-actions-bot"
+        ghp-import -m "Generate DLPack website" -b gh-pages docs/build
+        git push origin gh-pages -f
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/.github/workflows/docs.yaml 
new/dlpack-0.8/.github/workflows/docs.yaml
--- old/dlpack-0.6/.github/workflows/docs.yaml  1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/.github/workflows/docs.yaml  2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,49 @@
+name: Build Doc
+
+on:
+  push:
+    branches:
+      - main
+  pull_request:
+    branches:
+      - main
+
+jobs:
+  test_linux:
+    name: Linux
+    runs-on: ubuntu-latest
+    
+    steps:
+    - uses: actions/checkout@v2
+      with:
+        submodules: recursive
+
+    - name: Configuring Test Environment
+      run: |
+        sudo apt-get update
+        sudo apt-get -y install build-essential doxygen
+        python3 -m pip install -U pip wheel
+        python3 -m pip install cmake ninja
+
+        python3 --version
+        python3 -m pip --version
+        doxygen --version
+        make --version
+        cmake --version
+        ninja --version
+
+    - name: Installing dependencies
+      run: |
+        python3 -m pip install -r doc_requirements.txt
+
+    - name: Testing CMakeLists.txt
+      run: |
+        mkdir build
+        cd build
+        cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=./install -DBUILD_DOCS=ON
+        ninja
+        ninja install
+
+    - name: Testing Makefile
+      run: |
+        make doc
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/CMakeLists.txt 
new/dlpack-0.8/CMakeLists.txt
--- old/dlpack-0.6/CMakeLists.txt       2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/CMakeLists.txt       2023-01-05 19:38:50.000000000 +0100
@@ -8,7 +8,7 @@
 # Set variables:
 #   * PROJECT_NAME
 #   * PROJECT_VERSION
-project(dlpack VERSION 0.1.0 LANGUAGES C CXX)
+project(dlpack VERSION 0.6 LANGUAGES C CXX)
 
 #####
 # Change the default build type from Debug to Release, while still
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/Makefile new/dlpack-0.8/Makefile
--- old/dlpack-0.6/Makefile     2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/Makefile     2023-01-05 19:38:50.000000000 +0100
@@ -1,4 +1,4 @@
-.PHONY: clean all test doc lint
+.PHONY: clean all test doc lint show_docs
 
 all: bin/mock
 
@@ -13,6 +13,10 @@
 
 doc:
        doxygen docs/Doxyfile
+       $(MAKE) -C docs html
+
+show_docs:
+       @python3 -m http.server --directory docs/build/latest
 
 lint:
        ./tests/scripts/task_lint.sh
@@ -32,4 +36,4 @@
        $(CXX) $(CXXFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
 
 clean:
-       $(RM) -rf build  */*/*/*~ */*.o */*/*.o */*/*/*.o */*.d */*/*.d 
*/*/*/*.d
+       $(RM) -rf build  */*/*/*~ */*.o */*/*.o */*/*/*.o */*.d */*/*.d 
*/*/*/*.d docs/build docs/doxygen
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/NEWS.md new/dlpack-0.8/NEWS.md
--- old/dlpack-0.6/NEWS.md      2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/NEWS.md      2023-01-05 19:38:50.000000000 +0100
@@ -3,6 +3,33 @@
 
 This file records the changes in DLPack in reverse chronological order.
 
+## v0.8
+
+- Add kDLBool
+
+
+## v0.7
+
+- Add kDLHexagon
+- Add kDLOneAPI
+- Add DLPACK_VERSION and DLPACK_ABI_VERSION
+
+
+## v0.6
+
+- Add kDLROCMHost
+- Add kDLCUDAManaged
+
+
+## v0.5
+
+Rename enum values
+- kDLGPU -> kDLCUDA
+- kDLCPUPinned -> kDLCUDAHost
+The ABI is backward compatible, as it is only change of constant name,
+exchange can still happen between the new version and old version.
+
+
 ## v0.4
 
 - OpaqueHandle type
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/README.md new/dlpack-0.8/README.md
--- old/dlpack-0.6/README.md    2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/README.md    2023-01-05 19:38:50.000000000 +0100
@@ -2,14 +2,16 @@
 
 [![Build 
Status](https://github.com/dmlc/dlpack/actions/workflows/main.yaml/badge.svg?branch=main)](https://github.com/dmlc/dlpack/actions/workflows/main.yaml)
 
-DLPack is an open in-memory tensor structure to for sharing tensor among 
frameworks. DLPack enables
+Documentation: 
[https://dmlc.github.io/dlpack/latest](https://dmlc.github.io/dlpack/latest)
+
+DLPack is an open in-memory tensor structure for sharing tensors among 
frameworks. DLPack enables
 
 - Easier sharing of operators between deep learning frameworks.
 - Easier wrapping of vendor level operator implementations, allowing 
collaboration when introducing new devices/ops.
 - Quick swapping of backend implementations, like different version of BLAS
 - For final users, this could bring more operators, and possibility of mixing 
usage between frameworks.
 
-We do not intend to implement of Tensor and Ops, but instead use this as 
common bridge
+We do not intend to implement Tensor and Ops, but instead use this as common 
bridge
 to reuse tensor and ops across frameworks.
 
 ## Proposal Procedure
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/from_numpy/Makefile 
new/dlpack-0.8/apps/from_numpy/Makefile
--- old/dlpack-0.6/apps/from_numpy/Makefile     2021-07-01 16:01:05.000000000 
+0200
+++ new/dlpack-0.8/apps/from_numpy/Makefile     1970-01-01 01:00:00.000000000 
+0100
@@ -1,2 +0,0 @@
-all: numpy_dlpack.c
-       gcc -I../../include -shared -o libmain.so -fPIC numpy_dlpack.c
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/from_numpy/main.py 
new/dlpack-0.8/apps/from_numpy/main.py
--- old/dlpack-0.6/apps/from_numpy/main.py      2021-07-01 16:01:05.000000000 
+0200
+++ new/dlpack-0.8/apps/from_numpy/main.py      1970-01-01 01:00:00.000000000 
+0100
@@ -1,107 +0,0 @@
-from __future__ import print_function
-
-import numpy as np
-import gc
-import ctypes
-
-libmain = ctypes.cdll.LoadLibrary("./libmain.so")
-
-class DLDevice(ctypes.Structure):
-  _fields_ = [("device_type", ctypes.c_int),
-              ("device_id", ctypes.c_int)]
-
-class DLDataType(ctypes.Structure):
-  _fields_ = [("type_code", ctypes.c_uint8),
-              ("bits", ctypes.c_uint8),
-              ("lanes", ctypes.c_uint16)]
-  TYPE_MAP = {
-    "bool": (1, 1, 1),
-    "int32": (0, 32, 1),
-    "int64": (0, 64, 1),
-    "uint32": (1, 32, 1),
-    "uint64": (1, 64, 1),
-    "float32": (2, 32, 1),
-    "float64": (2, 64, 1),
-  }
-
-class DLTensor(ctypes.Structure):
-  _fields_ = [("data", ctypes.c_void_p),
-              ("device", DLDevice),
-              ("ndim", ctypes.c_int),
-              ("dtype", DLDataType),
-              ("shape", ctypes.POINTER(ctypes.c_int64)),
-              ("strides", ctypes.POINTER(ctypes.c_int64)),
-              ("byte_offset", ctypes.c_uint64)]
-
-class DLManagedTensor(ctypes.Structure):
-  pass
-
-DLManagedTensorHandle = ctypes.POINTER(DLManagedTensor)
-
-DeleterFunc = ctypes.CFUNCTYPE(None, DLManagedTensorHandle)
-
-DLManagedTensor._fields_ = [("dl_tensor", DLTensor),
-                            ("manager_ctx", ctypes.c_void_p),
-                            ("deleter", DeleterFunc)]
-
-def display(array):
-  print("data =", hex(array.ctypes.data_as(ctypes.c_void_p).value))
-  print("dtype =", array.dtype)
-  print("ndim =", array.ndim)
-  print("shape =", array.shape)
-  print("strides =", array.strides)
-
-def make_manager_ctx(obj):
-  pyobj = ctypes.py_object(obj)
-  void_p = ctypes.c_void_p.from_buffer(pyobj)
-  ctypes.pythonapi.Py_IncRef(pyobj)
-  return void_p
-
-# N.B.: In practice, one should ensure that this function
-# is not destructed before the numpy array is destructed.
-@DeleterFunc
-def dl_managed_tensor_deleter(dl_managed_tensor_handle):
-  void_p = dl_managed_tensor_handle.contents.manager_ctx
-  pyobj = ctypes.cast(void_p, ctypes.py_object)
-  print("Deleting manager_ctx:")
-  display(pyobj.value)
-  ctypes.pythonapi.Py_DecRef(pyobj)
-  print("Deleter self...")
-  libmain.FreeHandle()
-  print("Done")
-
-def make_dl_tensor(array):
-  # You may check array.flags here, e.g. array.flags['C_CONTIGUOUS']
-  dl_tensor = DLTensor()
-  dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p)
-  dl_tensor.device = DLDevice(1, 0)
-  dl_tensor.ndim = array.ndim
-  dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)]
-  # For 0-dim ndarrays, strides and shape will be NULL
-  dl_tensor.shape = array.ctypes.shape_as(ctypes.c_int64)
-  dl_tensor.strides = array.ctypes.strides_as(ctypes.c_int64)
-  for i in range(array.ndim):
-    dl_tensor.strides[i] //= array.itemsize
-  dl_tensor.byte_offset = 0
-  return dl_tensor
-
-def main():
-  array = np.random.rand(3, 1, 30).astype("float32")
-  print("Created:")
-  display(array)
-  c_obj = DLManagedTensor()
-  c_obj.dl_tensor = make_dl_tensor(array)
-  c_obj.manager_ctx = make_manager_ctx(array)
-  c_obj.deleter = dl_managed_tensor_deleter
-  print("-------------------------")
-  del array
-  gc.collect()
-  libmain.Give(c_obj)
-  print("-------------------------")
-  del c_obj
-  gc.collect()
-  libmain.Finalize()
-  print("-------------------------")
-
-if __name__ == "__main__":
-  main()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/from_numpy/numpy_dlpack.c 
new/dlpack-0.8/apps/from_numpy/numpy_dlpack.c
--- old/dlpack-0.6/apps/from_numpy/numpy_dlpack.c       2021-07-01 
16:01:05.000000000 +0200
+++ new/dlpack-0.8/apps/from_numpy/numpy_dlpack.c       1970-01-01 
01:00:00.000000000 +0100
@@ -1,52 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <dlpack/dlpack.h>
-
-DLManagedTensor *given = NULL;
-
-void display(DLManagedTensor a) {
-  puts("On C side:");
-  int i;
-  int ndim = a.dl_tensor.ndim;
-  printf("data = %p\n", a.dl_tensor.data);
-  printf("device = (device_type = %d, device_id = %d)\n",
-          (int) a.dl_tensor.device.device_type,
-          (int) a.dl_tensor.device.device_id);
-  printf("dtype = (code = %d, bits = %d, lanes = %d)\n",
-          (int) a.dl_tensor.dtype.code,
-          (int) a.dl_tensor.dtype.bits,
-          (int) a.dl_tensor.dtype.lanes);
-  printf("ndim = %d\n",
-          (int) a.dl_tensor.ndim);
-  printf("shape = (");
-  for (i = 0; i < ndim; ++i) {
-    if (i != 0) {
-      printf(", ");
-    }
-    printf("%d", (int) a.dl_tensor.shape[i]);
-  }
-  printf(")\n");
-  printf("strides = (");
-  for (i = 0; i < ndim; ++i) {
-    if (i != 0) {
-      printf(", ");
-    }
-    printf("%d", (int) a.dl_tensor.strides[i]);
-  }
-  printf(")\n");
-}
-
-void Give(DLManagedTensor dl_managed_tensor) {
-  display(dl_managed_tensor);
-  given = (DLManagedTensor *) malloc(sizeof(DLManagedTensor));
-  *given = dl_managed_tensor;
-}
-
-void Finalize() {
-  given->deleter(given);
-}
-
-void FreeHandle() {
-  free(given);
-  given = NULL;
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/.gitignore 
new/dlpack-0.8/apps/numpy_dlpack/.gitignore
--- old/dlpack-0.6/apps/numpy_dlpack/.gitignore 1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/apps/numpy_dlpack/.gitignore 2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1 @@
+__pycache__
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/README.md 
new/dlpack-0.8/apps/numpy_dlpack/README.md
--- old/dlpack-0.6/apps/numpy_dlpack/README.md  1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/apps/numpy_dlpack/README.md  2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,23 @@
+# Numpy DLPack Array Conversion Example
+
+This example demonstrates how a underlying array memory can be handed off 
between
+two DLPack compatible frameworks without requiring any copies. In this case,
+we demonstrate how to convert numpy to TVM's NDArray and vice-versa with proper
+memory handling. We hope that not only is this directly useful for TVM users, 
but
+also a solid example for how similar efficient copies can be implemented in 
other
+array frameworks.
+
+## File Breakdown
+
+[dlpack.py](dlpack/dlpack.py): Contains the definition of common DLPack 
structures shared between frameworks. Mirrors the official C++ definitions.
+
+[from_numpy.py](dlpack/from_numpy.py): Demonstrates how to convert a numpy 
array into a PyCapsule containing a DLPack Tensor.
+
+[to_numpy.py](dlpack/to_numpy.py): Demonstrates how to take a PyCapsule with a 
DLPack Tensor and convert it into a numpy array.
+
+[test.py](dlpack/test.py): Shows how to_numpy and from_numpy can be used to 
convert tensor formats without copies.
+
+## Authors
+[Josh Fromm](https://github.com/jwfromm)
+
+[Junru Shao](https://github.com/junrushao1994)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/dlpack/__init__.py 
new/dlpack-0.8/apps/numpy_dlpack/dlpack/__init__.py
--- old/dlpack-0.6/apps/numpy_dlpack/dlpack/__init__.py 1970-01-01 
01:00:00.000000000 +0100
+++ new/dlpack-0.8/apps/numpy_dlpack/dlpack/__init__.py 2023-01-05 
19:38:50.000000000 +0100
@@ -0,0 +1,2 @@
+from .from_numpy import from_numpy
+from .to_numpy import to_numpy
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/dlpack/dlpack.py 
new/dlpack-0.8/apps/numpy_dlpack/dlpack/dlpack.py
--- old/dlpack-0.6/apps/numpy_dlpack/dlpack/dlpack.py   1970-01-01 
01:00:00.000000000 +0100
+++ new/dlpack-0.8/apps/numpy_dlpack/dlpack/dlpack.py   2023-01-05 
19:38:50.000000000 +0100
@@ -0,0 +1,137 @@
+import ctypes
+
+_c_str_dltensor = b"dltensor"
+
+
+class DLDeviceType(ctypes.c_int):
+    """The enum that encodes the type of the device where
+    DLTensor memory is allocated.
+    """
+    kDLCPU = 1
+    kDLCUDA = 2
+    kDLCUDAHost = 3
+    kDLOpenCL = 4
+    kDLVulkan = 7
+    kDLMetal = 8
+    kDLVPI = 9
+    kDLROCM = 10
+    kDLROCMHost = 11
+    kDLCUDAManaged = 13
+    kDLOneAPI = 14
+
+    def __str__(self):
+        return {
+            self.kDLCPU : "CPU",
+            self.kDLCUDA: "CUDA",
+            self.kDLCUDAHost: "CUDAHost",
+            self.kDLOpenCL: "OpenCL",
+            self.kDLVulkan: "Vulkan",
+            self.kDLMetal: "Metal",
+            self.kDLVPI: "VPI",
+            self.kDLROCM: "ROCM",
+            self.kDLROCMHost: "ROMCHost",
+            self.kDLCUDAManaged: "CUDAManaged",
+            self.kDLOneAPI: "oneAPI",
+            }[self.value]
+
+
+class DLDevice(ctypes.Structure):
+    """Represents the device where DLTensor memory is allocated.
+    The device is represented by the pair of fields:
+       device_type: DLDeviceType
+       device_id: c_int
+    """
+    _fields_ = [
+        ("device_type", DLDeviceType),
+        ("device_id", ctypes.c_int),
+    ]
+
+
+class DLDataTypeCode(ctypes.c_uint8):
+    """An integer that encodes the category of DLTensor elements' data type."""
+    kDLInt = 0
+    kDLUInt = 1
+    kDLFloat = 2
+    kDLOpaquePointer = 3
+    kDLBfloat = 4
+    kDLComplex = 5
+
+    def __str__(self):
+        return {
+            self.kDLInt: "int",
+            self.kDLUInt: "uint",
+            self.kDLFloat: "float",
+            self.kDLBfloat: "bfloat",
+            self.kDLComplex: "complex",
+            self.kDLOpaquePointer: "void_p"
+        }[self.value]
+
+
+class DLDataType(ctypes.Structure):
+    """Descriptor of data type for elements of DLTensor.
+    The data type is described by a triple, `DLDataType.type_code`,
+    `DLDataType.bits`, and `DLDataType.lanes`.
+
+    The element is understood as packed `lanes` repetitions of
+    elements from `type_code` data-category of width `bits`.
+    """
+    _fields_ = [
+        ("type_code", DLDataTypeCode),
+        ("bits", ctypes.c_uint8),
+        ("lanes", ctypes.c_uint16),
+    ]
+    TYPE_MAP = {
+        "bool": (DLDataTypeCode.kDLUInt, 1, 1),
+        "int8": (DLDataTypeCode.kDLInt, 8, 1),
+        "int16": (DLDataTypeCode.kDLInt, 16, 1),
+        "int32": (DLDataTypeCode.kDLInt, 32, 1),
+        "int64": (DLDataTypeCode.kDLInt, 64, 1),
+        "uint8": (DLDataTypeCode.kDLUInt, 8, 1),
+        "uint16": (DLDataTypeCode.kDLUInt, 16, 1),
+        "uint32": (DLDataTypeCode.kDLUInt, 32, 1),
+        "uint64": (DLDataTypeCode.kDLUInt, 64, 1),
+        "float16": (DLDataTypeCode.kDLFloat, 16, 1),
+        "float32": (DLDataTypeCode.kDLFloat, 32, 1),
+        "float64": (DLDataTypeCode.kDLFloat, 64, 1),
+        "complex64": (DLDataTypeCode.kDLComplex, 64, 1),
+        "complex128": (DLDataTypeCode.kDLComplex, 128, 1)
+    }
+
+
+class DLTensor(ctypes.Structure):
+    """Structure describing strided layout of DLTensor.
+    Fields are:
+       data:  void pointer
+       device: DLDevice
+       ndim: number of indices needed to reference an
+             element of the tensor
+       dtype: data type descriptor
+       shape: tuple with lengths of the corresponding
+              tensor dimensions
+       strides: tuple of numbers of array elements to
+                step in each dimension when traversing
+                the tensor
+       byte_offset: data + byte_offset gives the address of
+                tensor element with index (0,) * ndim
+    """
+    _fields_ = [
+        ("data", ctypes.c_void_p),
+        ("device", DLDevice),
+        ("ndim", ctypes.c_int),
+        ("dtype", DLDataType),
+        ("shape", ctypes.POINTER(ctypes.c_int64)),
+        ("strides", ctypes.POINTER(ctypes.c_int64)),
+        ("byte_offset", ctypes.c_uint64),
+    ]
+
+
+class DLManagedTensor(ctypes.Structure):
+    """Structure storing the pointer to the tensor descriptor,
+    deleter callable for the tensor descriptor, and pointer to
+    some additional data. These are stored in fields `dl_tensor`,
+    `deleter`, and `manager_ctx`."""
+    _fields_ = [
+        ("dl_tensor", DLTensor),
+        ("manager_ctx", ctypes.c_void_p),
+        ("deleter", ctypes.CFUNCTYPE(None, ctypes.c_void_p)),
+    ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/dlpack/from_numpy.py 
new/dlpack-0.8/apps/numpy_dlpack/dlpack/from_numpy.py
--- old/dlpack-0.6/apps/numpy_dlpack/dlpack/from_numpy.py       1970-01-01 
01:00:00.000000000 +0100
+++ new/dlpack-0.8/apps/numpy_dlpack/dlpack/from_numpy.py       2023-01-05 
19:38:50.000000000 +0100
@@ -0,0 +1,98 @@
+from typing import Callable
+import numpy as np
+import ctypes
+
+from .dlpack import DLManagedTensor, DLDevice, DLDataType, _c_str_dltensor
+
+
+ctypes.pythonapi.PyMem_RawMalloc.restype = ctypes.c_void_p
+ctypes.pythonapi.PyMem_RawFree.argtypes = [ctypes.c_void_p]
+
+ctypes.pythonapi.PyCapsule_New.restype=ctypes.py_object
+ctypes.pythonapi.PyCapsule_New.argtypes=[ctypes.c_void_p, ctypes.c_char_p, 
ctypes.c_void_p]
+
+
+class _Holder:
+    """A wrapper around a numpy array to keep track of references to the 
underlying memory.
+
+    Parameters
+    ----------
+    np_array : np.ndarray
+        The numpy array that will be converted to a DLPack tensor and must be 
managed.
+    """
+
+    def __init__(self, np_array: np.ndarray) -> None:
+        self.np_array = np_array
+        self.data = np_array.ctypes.data_as(ctypes.c_void_p)
+        self.shape = np_array.ctypes.shape_as(ctypes.c_int64)
+        self.strides = np_array.ctypes.strides_as(ctypes.c_int64)
+        for i in range(np_array.ndim):
+            self.strides[i] //= np_array.itemsize
+
+    def _as_manager_ctx(self) -> ctypes.c_void_p:
+        py_obj = ctypes.py_object(self)
+        py_obj_ptr = ctypes.pointer(py_obj)
+        ctypes.pythonapi.Py_IncRef(py_obj)
+        ctypes.pythonapi.Py_IncRef(ctypes.py_object(py_obj_ptr))
+        return ctypes.cast(py_obj_ptr, ctypes.c_void_p)
+
+
+@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
+def _numpy_array_deleter(handle: ctypes.c_void_p) -> None:
+    """A function to deallocate the memory of a numpy array."""
+    dl_managed_tensor = DLManagedTensor.from_address(handle)
+    py_obj_ptr = ctypes.cast(
+        dl_managed_tensor.manager_ctx, ctypes.POINTER(ctypes.py_object)
+    )
+    py_obj = py_obj_ptr.contents
+    ctypes.pythonapi.Py_DecRef(py_obj)
+    ctypes.pythonapi.Py_DecRef(ctypes.py_object(py_obj_ptr))
+    ctypes.pythonapi.PyMem_RawFree(handle)
+
+
+@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
+def _numpy_pycapsule_deleter(handle: ctypes.c_void_p) -> None:
+    """A function to deallocate a pycapsule that wraps a numpy array."""
+    pycapsule: ctypes.py_object = ctypes.cast(handle, ctypes.py_object)
+    if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
+        dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(
+            pycapsule, _c_str_dltensor
+        )
+        _numpy_array_deleter(dl_managed_tensor)
+        ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None)
+
+
+def from_numpy(np_array: np.ndarray):
+    """Convert a numpy array to another type of dlpack compatible array.
+
+    Parameters
+    ----------
+    np_array : np.ndarray
+        The source numpy array that will be converted.
+
+    Returns
+    -------
+    pycapsule : PyCapsule
+        A pycapsule containing a DLManagedTensor that can be converted
+        to other array formats without copying the underlying memory.
+    """
+    holder = _Holder(np_array)
+    size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor))
+    dl_managed_tensor = DLManagedTensor.from_address(
+        ctypes.pythonapi.PyMem_RawMalloc(size)
+    )
+    dl_managed_tensor.dl_tensor.data = holder.data
+    dl_managed_tensor.dl_tensor.device = DLDevice(1, 0)
+    dl_managed_tensor.dl_tensor.ndim = np_array.ndim
+    dl_managed_tensor.dl_tensor.dtype = 
DLDataType.TYPE_MAP[str(np_array.dtype)]
+    dl_managed_tensor.dl_tensor.shape = holder.shape
+    dl_managed_tensor.dl_tensor.strides = holder.strides
+    dl_managed_tensor.dl_tensor.byte_offset = 0
+    dl_managed_tensor.manager_ctx = holder._as_manager_ctx()
+    dl_managed_tensor.deleter = _numpy_array_deleter
+    pycapsule = ctypes.pythonapi.PyCapsule_New(
+        ctypes.byref(dl_managed_tensor),
+        _c_str_dltensor,
+        _numpy_pycapsule_deleter,
+    )
+    return pycapsule
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/dlpack/to_numpy.py 
new/dlpack-0.8/apps/numpy_dlpack/dlpack/to_numpy.py
--- old/dlpack-0.6/apps/numpy_dlpack/dlpack/to_numpy.py 1970-01-01 
01:00:00.000000000 +0100
+++ new/dlpack-0.8/apps/numpy_dlpack/dlpack/to_numpy.py 2023-01-05 
19:38:50.000000000 +0100
@@ -0,0 +1,79 @@
+import ctypes
+import numpy as np
+from .dlpack import _c_str_dltensor, DLManagedTensor, DLTensor
+
+ctypes.pythonapi.PyCapsule_IsValid.restype = ctypes.c_int
+ctypes.pythonapi.PyCapsule_IsValid.argtypes = [ctypes.py_object, 
ctypes.c_char_p]
+
+ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
+ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, 
ctypes.c_char_p]
+
+def _array_interface_from_dl_tensor(dlt):
+    """Constructs NumPy's array_interface dictionary
+    from `dlpack.DLTensor` descriptor."""
+    assert isinstance(dlt, DLTensor)
+    shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
+    itemsize = dlt.dtype.lanes * dlt.dtype.bits // 8
+    if dlt.strides:
+        strides = tuple(
+            dlt.strides[dim] * itemsize for dim in range(dlt.ndim)
+        )
+    else:
+        # Array is compact, make it numpy compatible.
+        strides = []
+        for i, s in enumerate(shape):
+            cumulative = 1
+            for e in range(i + 1, dlt.ndim):
+                cumulative *= shape[e]
+            strides.append(cumulative * itemsize)
+        strides = tuple(strides)
+    typestr = "|" + str(dlt.dtype.type_code)[0] + str(itemsize)
+    return dict(
+        version=3,
+        shape=shape,
+        strides=strides,
+        data=(dlt.data, True),
+        offset=dlt.byte_offset,
+        typestr=typestr,
+    )
+
+
+class _Holder:
+    """A wrapper that combines a pycapsule and array_interface for consumption 
by  numpy.
+
+    Parameters
+    ----------
+    array_interface : dict
+        A description of the underlying memory.
+
+    pycapsule : PyCapsule
+        A wrapper around the dlpack tensor that will be converted to numpy.
+    """
+
+    def __init__(self, array_interface, pycapsule) -> None:
+        self.__array_interface__ = array_interface
+        self._pycapsule = pycapsule
+
+
+def to_numpy(pycapsule) -> np.ndarray:
+    """Convert a dlpack tensor into a numpy array without copying.
+
+    Parameters
+    ----------
+    pycapsule : PyCapsule
+        A pycapsule wrapping a dlpack tensor that will be converted.
+
+    Returns
+    -------
+    np_array : np.ndarray
+        A new numpy array that uses the same underlying memory as the input
+        pycapsule.
+    """
+    assert ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor)
+    dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(
+        pycapsule, _c_str_dltensor
+    )
+    dl_managed_tensor_ptr = ctypes.cast(dl_managed_tensor, 
ctypes.POINTER(DLManagedTensor))
+    dl_managed_tensor = dl_managed_tensor_ptr.contents
+    holder = 
_Holder(_array_interface_from_dl_tensor(dl_managed_tensor.dl_tensor), pycapsule)
+    return np.ctypeslib.as_array(holder)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/test.py 
new/dlpack-0.8/apps/numpy_dlpack/test.py
--- old/dlpack-0.6/apps/numpy_dlpack/test.py    1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/apps/numpy_dlpack/test.py    2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,36 @@
+import tvm
+import numpy as np
+from dlpack import from_numpy, to_numpy
+
+
+def test_from_numpy():
+    """Test the copy free conversion of numpy to a tvm ndarray."""
+    np_array = np.random.normal(size=[10, 10])
+    np_array_ref = np_array.copy()
+    tvm_array = tvm.nd.from_dlpack(from_numpy(np_array))
+    del np_array
+    np.testing.assert_equal(actual=tvm_array.numpy(), desired=np_array_ref)
+    del tvm_array
+
+
+def test_to_numpy():
+    """Test the copy free conversion of a tvm ndarray to a numpy array"""
+    tvm_array = tvm.nd.array(np.random.normal(size=[10, 10]))
+    np_array_ref = tvm_array.numpy()
+    np_array = to_numpy(tvm_array.__dlpack__())
+    del tvm_array
+    np.testing.assert_equal(actual=np_array, desired=np_array_ref)
+    del np_array
+
+
+if __name__ == "__main__":
+    """
+    Run both tests a bunch of times to make
+    sure the conversions and memory management are stable.
+    """
+    print("### Testing from_numpy")
+    for i in range(10000):
+        test_from_numpy()
+    print("### Testing to_numpy")
+    for i in range(10000):
+        test_to_numpy()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/apps/numpy_dlpack/test_pure_numpy.py 
new/dlpack-0.8/apps/numpy_dlpack/test_pure_numpy.py
--- old/dlpack-0.6/apps/numpy_dlpack/test_pure_numpy.py 1970-01-01 
01:00:00.000000000 +0100
+++ new/dlpack-0.8/apps/numpy_dlpack/test_pure_numpy.py 2023-01-05 
19:38:50.000000000 +0100
@@ -0,0 +1,38 @@
+import numpy as np
+from dlpack import from_numpy, to_numpy
+
+
+def test_to_from_numpy_zero_copy():
+    """Test the copy free conversion of numpy array via DLPack."""
+    np_ary = np.random.normal(size=[10, 10])
+    np_ary_big = np.random.normal(size=[12, 10])
+    dlpack_capsule = from_numpy(np_ary_big)
+    reconstructed_ary = to_numpy(dlpack_capsule)
+    del dlpack_capsule
+    np_ary_big[1:11] = np_ary
+    del np_ary_big
+    np.testing.assert_equal(actual=reconstructed_ary[1:11], desired=np_ary)
+
+
+def test_to_from_numpy_memory():
+    """Test that DLPack capsule keeps the source array alive"""
+    source_array = np.random.normal(size=[10, 10])
+    np_array_ref = source_array.copy()
+    dlpack_capsule = from_numpy(source_array)
+    del source_array
+    reconstructed_array = to_numpy(dlpack_capsule)
+    del dlpack_capsule
+    np.testing.assert_equal(actual=reconstructed_array, desired=np_array_ref)
+
+
+if __name__ == "__main__":
+    """
+    Run both tests a bunch of times to make
+    sure the conversions and memory management are stable.
+    """
+    print("### Running `test_to_from_numpy_zero_copy`")
+    for i in range(10000):
+        test_to_from_numpy_zero_copy()
+    print("### Running `test_to_from_numpy_memory`")
+    for i in range(10000):
+        test_to_from_numpy_memory()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/doc_requirements.txt 
new/dlpack-0.8/doc_requirements.txt
--- old/dlpack-0.6/doc_requirements.txt 1970-01-01 01:00:00.000000000 +0100
+++ new/dlpack-0.8/doc_requirements.txt 2023-01-05 19:38:50.000000000 +0100
@@ -0,0 +1,3 @@
+Sphinx==4.4.0
+pydata-sphinx-theme==0.7.1
+breathe==4.31.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/CMakeLists.txt 
new/dlpack-0.8/docs/CMakeLists.txt
--- old/dlpack-0.6/docs/CMakeLists.txt  2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/docs/CMakeLists.txt  2023-01-05 19:38:50.000000000 +0100
@@ -6,13 +6,32 @@
 # TODO: add config file
 set(doxyfile_in ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
 set(doxyfile    ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
+set(doxygen_output_dir ${CMAKE_CURRENT_BINARY_DIR}/doxygen)
 
 configure_file(${doxyfile_in} ${doxyfile} @ONLY)
 
-add_custom_target(docs
+file(MAKE_DIRECTORY ${doxygen_output_dir})
+
+add_custom_target(Doxygen ALL
     COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
     WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
     COMMENT "Generating API documentation with Doxygen"
     VERBATIM)
 
-install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html DESTINATION 
share/${PROJECT_NAME}/docs)
\ No newline at end of file
+find_program(SPHINX_EXECUTABLE
+    NAMES sphinx-build
+    DOC "Path to sphinx-build executable")
+
+set(sphinx_source ${CMAKE_CURRENT_SOURCE_DIR}/source)
+set(sphinx_build ${CMAKE_CURRENT_BINARY_DIR}/build/latest)
+set(doxygen_xml_builddir ${doxygen_output_dir}/xml)
+
+add_custom_target(Sphinx ALL
+    COMMAND ${SPHINX_EXECUTABLE} -b html
+    -Dbreathe_projects.dlpack=${doxygen_xml_builddir}
+    ${sphinx_source} ${sphinx_build} -WT --keep-going
+    WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+    COMMENT "Generating documentation with Sphinx"
+    VERBATIM)
+
+install(DIRECTORY ${sphinx_build} DESTINATION share/${PROJECT_NAME}/docs)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/Doxyfile new/dlpack-0.8/docs/Doxyfile
--- old/dlpack-0.6/docs/Doxyfile        2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/docs/Doxyfile        2023-01-05 19:38:50.000000000 +0100
@@ -38,13 +38,13 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         =
+PROJECT_NUMBER         = 0.6
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
 # quick idea about the purpose of the project. Keep the description short.
 
-PROJECT_BRIEF          =
+PROJECT_BRIEF          = "Common in-memory tensor structure and operator 
interface for deep learning and other systems"
 
 # With the PROJECT_LOGO tag one can specify an logo or icon that is included in
 # the documentation. The maximum height of the logo should not exceed 55 pixels
@@ -230,11 +230,6 @@
 
 ALIASES                =
 
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST              =
 
 # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C 
sources
 # only. Doxygen will then generate output that is more tailored for C. For
@@ -1008,13 +1003,6 @@
 
 ALPHABETICAL_INDEX     = YES
 
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX    = 5
-
 # In case all classes in a project start with a common prefix, all classes will
 # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
 # can be used to specify a prefix (or a list of prefixes) that should be 
ignored
@@ -1030,7 +1018,7 @@
 # If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
 # The default value is: YES.
 
-GENERATE_HTML          = YES
+GENERATE_HTML          = NO
 
 # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front 
of
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/Doxyfile.in 
new/dlpack-0.8/docs/Doxyfile.in
--- old/dlpack-0.6/docs/Doxyfile.in     2021-07-01 16:01:05.000000000 +0200
+++ new/dlpack-0.8/docs/Doxyfile.in     2023-01-05 19:38:50.000000000 +0100
@@ -1,14 +1,18 @@
 PROJECT_NAME           = @PROJECT_NAME@
 PROJECT_NUMBER         = @PROJECT_VERSION@
-PROJECT_BRIEF          = "RFC for common tensor and operator guideline in deep 
learning system"
+PROJECT_BRIEF          = "Common in-memory tensor structure and operator 
interface for deep learning and other systems"
 STRIP_FROM_PATH        = @PROJECT_SOURCE_DIR@ \
                          @PROJECT_BINARY_DIR@
 OUTPUT_LANGUAGE        = English
 FILE_PATTERNS          = *.h *.md
 RECURSIVE              = YES
+INPUT                  = @CMAKE_SOURCE_DIR@/include/dlpack
 IMAGE_PATH             = @CMAKE_SOURCE_DIR@/docs
 USE_MDFILE_AS_MAINPAGE = @CMAKE_SOURCE_DIR@/docs/readme.md
 JAVADOC_AUTOBRIEF      = YES
-GENERATE_HTML          = YES
+GENERATE_HTML          = NO
 GENERATE_LATEX         = NO
-OUTPUT_DIRECTORY       = @CMAKE_BINARY_DIR@/docs/doxygen
\ No newline at end of file
+GENERATE_XML           = YES
+XML_OUTPUT             = xml
+XML_PROGRAMLISTING     = YES
+OUTPUT_DIRECTORY       = @CMAKE_BINARY_DIR@/docs/doxygen
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/Makefile new/dlpack-0.8/docs/Makefile
--- old/dlpack-0.6/docs/Makefile        1970-01-01 01:00:00.000000000 +0100
+++ new/dlpack-0.8/docs/Makefile        2023-01-05 19:38:50.000000000 +0100
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?= -Dbreathe_projects.dlpack=../doxygen/xml -WT --keep-going
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = build/latest
+
+# Put it first so that "make" without argument is like "make help".
+help:
+       @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+       @$(SPHINXBUILD) -b $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/README.md 
new/dlpack-0.8/docs/README.md
--- old/dlpack-0.6/docs/README.md       1970-01-01 01:00:00.000000000 +0100
+++ new/dlpack-0.8/docs/README.md       2023-01-05 19:38:50.000000000 +0100
@@ -0,0 +1,13 @@
+# Documentation for DLPack
+
+## Building Locally
+
+The following dependencies must be downloaded in order to build docs:
+
+- Doxygen (On debian distros, simply run `sudo apt -y install doxygen`)
+- Python dependencies present in the `doc_requirements.txt` file in the root 
directory of the project. Run `python3 -m pip install -r doc_requirements.txt` 
to install them.
+
+Once the dependencies are installed, docs can be built using either CMake or 
the Makefile from the root directory of the project.
+
+- Using Makefile: Run `make doc` to build the HTML pages. Run `make show_docs` 
to serve the website locally.
+- Using CMake: Build with `BUILD_DOCS` option `ON`: `mkdir -p build && cd 
build && cmake .. -DBUILD_DOCS=ON && make`
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/make.bat new/dlpack-0.8/docs/make.bat
--- old/dlpack-0.6/docs/make.bat        1970-01-01 01:00:00.000000000 +0100
+++ new/dlpack-0.8/docs/make.bat        2023-01-05 19:38:50.000000000 +0100
@@ -0,0 +1,36 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+       set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set SPHINXOPTS=-Dbreathe_projects.dlpack=../doxygen/xml -WT --keep-going
+set BUILDDIR=build/latest
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+       echo.
+       echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+       echo.installed, then set the SPHINXBUILD environment variable to point
+       echo.to the full path of the 'sphinx-build' executable. Alternatively 
you
+       echo.may add the Sphinx directory to PATH.
+       echo.
+       echo.If you don't have Sphinx installed, grab it from
+       echo.https://www.sphinx-doc.org/
+       exit /b 1
+)
+
+%SPHINXBUILD% -b %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
Binary files old/dlpack-0.6/docs/source/_static/images/DLPack_diagram.png and 
new/dlpack-0.8/docs/source/_static/images/DLPack_diagram.png differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/source/c_api.rst 
new/dlpack-0.8/docs/source/c_api.rst
--- old/dlpack-0.6/docs/source/c_api.rst        1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/docs/source/c_api.rst        2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,35 @@
+.. _c_api:
+
+C API (``dlpack.h``)
+====================
+
+Macros
+~~~~~~
+
+.. doxygendefine:: DLPACK_EXTERN_C
+
+.. doxygendefine:: DLPACK_VERSION
+
+.. doxygendefine:: DLPACK_DLL
+
+Enumerations
+~~~~~~~~~~~~
+
+.. doxygenenum:: DLDeviceType
+
+.. doxygenenum:: DLDataTypeCode
+
+Structs
+~~~~~~~
+
+.. doxygenstruct:: DLDevice
+   :members:
+
+.. doxygenstruct:: DLDataType
+   :members:
+
+.. doxygenstruct:: DLTensor
+   :members:
+
+.. doxygenstruct:: DLManagedTensor
+   :members:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/source/conf.py 
new/dlpack-0.8/docs/source/conf.py
--- old/dlpack-0.6/docs/source/conf.py  1970-01-01 01:00:00.000000000 +0100
+++ new/dlpack-0.8/docs/source/conf.py  2023-01-05 19:38:50.000000000 +0100
@@ -0,0 +1,82 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'DLPack'
+copyright = '2022, DLPack contributors'
+author = 'DLPack contributors'
+
+# The full version, including alpha/beta/rc tags
+release = '0.6.0'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.intersphinx', 'breathe']
+
+# Add any paths that contain templates here, relative to this directory.
+# templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = []
+
+intersphinx_mapping = {
+    "array_api": ("https://data-apis.org/array-api/latest";, None),
+}
+
+html_use_index = True
+html_domain_indices = True
+
+html_theme_options = {
+    "github_url": "https://github.com/dmlc/dlpack";,
+    "use_edit_page_button": True,
+    "show_toc_level": 1,
+}
+
+html_context = {
+    "github_user": "dmlc",
+    "github_repo": "dlpack",
+    "github_version": "main",
+    "doc_path": "docs/source",
+}
+
+
+# -- Breathe configuration ---------------------------------------------------
+
+# Tell breathe what to call the project. This is used to link XML files
+# generated by Doxygen by passing a command line option:
+# -Dbreathe_projects.<breathe_default_project>=</path/to/xml/index/directory>
+breathe_default_project = "dlpack"
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'pydata_sphinx_theme' # 'alabaster'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/source/index.rst 
new/dlpack-0.8/docs/source/index.rst
--- old/dlpack-0.6/docs/source/index.rst        1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/docs/source/index.rst        2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,82 @@
+Welcome to DLPack's documentation!
+==================================
+
+
+Purpose
+~~~~~~~
+
+In order for an ndarray system to interact with a variety of frameworks, a
+stable in-memory data structure is needed.
+
+DLPack is one such data structure that allows exchange between major
+frameworks. It is developed with inputs from many deep learning system core
+developers. Highlights include:
+
+* Minimum and stable: :ref:`simple header <c_api>`
+* Designed for cross hardware: CPU, CUDA, OpenCL, Vulkan, Metal, VPI, ROCm,
+  WebGPU, Hexagon
+* Already a standard with wide community adoption and support:
+
+  * `NumPy 
<https://numpy.org/doc/stable/release/1.22.0-notes.html#add-nep-47-compatible-dlpack-support>`_
+  * `CuPy 
<https://docs.cupy.dev/en/stable/reference/generated/cupy.fromDlpack.html>`_
+  * `PyTorch <https://pytorch.org/docs/stable/dlpack.html>`_
+  * `Tensorflow 
<https://www.tensorflow.org/api_docs/python/tf/experimental/dlpack/from_dlpack>`_
+  * `MXNet 
<https://mxnet.apache.org/versions/master/api/python/docs/_modules/mxnet/dlpack.html>`_
+  * `TVM 
<https://tvm.apache.org/docs/reference/api/python/contrib.html#module-tvm.contrib.dlpack>`_
+  * `mpi4py 
<https://mpi4py.readthedocs.io/en/stable/overview.html#support-for-gpu-aware-mpi>`_
+
+* Clean C ABI compatible.
+
+  * Means you can create and access it from any language.
+  * It is also essential for building JIT and AOT compilers to support these
+    data types.
+
+
+Scope
+~~~~~
+
+The main design rationale of DLPack is the minimalism. DLPack drops the
+consideration of allocator, device API and focus on the minimum data
+structure. While still considering the need for cross hardware support
+(e.g. the data field is opaque for platforms that does not support normal
+addressing).
+
+It also simplifies some of the design to remove legacy issues (e.g. everything
+assumes to be row major, strides can be used to support other case, and avoid
+the complexity to consider more layouts).
+
+
+Roadmap
+~~~~~~~
+
+* C API that could be exposed as a new Python attribute ``__dlpack_info__``
+  for returning API and ABI versions. (see `#34 
<https://github.com/dmlc/dlpack/issues/34>`_,
+  `#72 <https://github.com/dmlc/dlpack/pull/72>`_)
+* Clarify alignment requirements. (see
+  `data-apis/array-api#293 
<https://github.com/data-apis/array-api/issues/293>`_,
+  `numpy/numpy#20338 <https://github.com/numpy/numpy/issues/20338>`_,
+  `data-apis/array-api#293 (comment) 
<https://github.com/data-apis/array-api/issues/293#issuecomment-964434449>`_)
+* Adding support for boolean data type (see `#75 
<https://github.com/dmlc/dlpack/issues/75>`_)
+* Adding a read-only flag (ABI break) or making it a hard requirement in the 
spec that
+  imported arrays should be treated as read-only. (see
+  `data-apis/consortium-feedback#1 (comment) 
<https://github.com/data-apis/consortium-feedback/issues/1#issuecomment-675857753>`_,
+  `data-apis/array-api#191 
<https://github.com/data-apis/array-api/issues/191>`_)
+* Standardize C interface for stream exchange. (see `#74 
<https://github.com/dmlc/dlpack/issues/74>`_,
+  `#65 <https://github.com/dmlc/dlpack/issues/65>`_)
+
+
+DLPack Documentation
+~~~~~~~~~~~~~~~~~~~~
+
+.. toctree::
+   :maxdepth: 2
+
+   c_api
+   python_spec
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/docs/source/python_spec.rst 
new/dlpack-0.8/docs/source/python_spec.rst
--- old/dlpack-0.6/docs/source/python_spec.rst  1970-01-01 01:00:00.000000000 
+0100
+++ new/dlpack-0.8/docs/source/python_spec.rst  2023-01-05 19:38:50.000000000 
+0100
@@ -0,0 +1,156 @@
+.. _python-spec:
+
+Python Specification for DLPack
+===============================
+
+The Python specification for DLPack is a part of the
+`Python array API standard 
<https://data-apis.org/array-api/latest/index.html>`_.
+More details about the spec can be found under the :ref:`data-interchange` 
page.
+
+
+Syntax for data interchange with DLPack
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The array API will offer the following syntax for data interchange:
+
+1. A ``from_dlpack(x)`` function, which accepts (array) objects with a
+   ``__dlpack__`` method and uses that method to construct a new array
+   containing the data from ``x``.
+2. ``__dlpack__(self, stream=None)`` and ``__dlpack_device__`` methods on the
+   array object, which will be called from within ``from_dlpack``, to query
+   what device the array is on (may be needed to pass in the correct
+   stream, e.g. in the case of multiple GPUs) and to access the data.
+
+
+Semantics
+~~~~~~~~~
+
+DLPack describes the memory layout of strided, n-dimensional arrays.
+When a user calls ``y = from_dlpack(x)``, the library implementing ``x`` (the
+"producer") will provide access to the data from ``x`` to the library
+containing ``from_dlpack`` (the "consumer"). If possible, this must be
+zero-copy (i.e. ``y`` will be a *view* on ``x``). If not possible, that library
+may make a copy of the data. In both cases:
+
+- The producer keeps owning the memory
+- ``y`` may or may not be a view, therefore the user must keep the 
recommendation to
+  avoid mutating ``y`` in mind - see :ref:`copyview-mutability`.
+- Both ``x`` and ``y`` may continue to be used just like arrays created in 
other ways.
+
+If an array that is accessed via the interchange protocol lives on a
+device that the requesting library does not support, it is recommended to
+raise a ``TypeError``.
+
+Stream handling through the ``stream`` keyword applies to CUDA and ROCm 
(perhaps
+to other devices that have a stream concept as well, however those haven't been
+considered in detail). The consumer must pass the stream it will use to the
+producer; the producer must synchronize or wait on the stream when necessary.
+In the common case of the default stream being used, synchronization will be
+unnecessary so asynchronous execution is enabled.
+
+
+Implementation
+~~~~~~~~~~~~~~
+
+*Note that while this API standard largely tries to avoid discussing
+implementation details, some discussion and requirements are needed
+here because data interchange requires coordination between
+implementers on, e.g., memory management.*
+
+.. image:: /_static/images/DLPack_diagram.png
+  :alt: Diagram of DLPack structs
+
+*DLPack diagram. Dark blue are the structs it defines, light blue
+struct members, gray text enum values of supported devices and data
+types.*
+
+The ``__dlpack__`` method will produce a ``PyCapsule`` containing a
+``DLManagedTensor``, which will be consumed immediately within
+``from_dlpack`` - therefore it is consumed exactly once, and it will not be
+visible to users of the Python API.
+
+The producer must set the ``PyCapsule`` name to ``"dltensor"`` so that
+it can be inspected by name, and set ``PyCapsule_Destructor`` that calls
+the ``deleter`` of the ``DLManagedTensor`` when the ``"dltensor"``-named
+capsule is no longer needed.
+
+The consumer must transer ownership of the ``DLManangedTensor`` from the
+capsule to its own object. It does so by renaming the capsule to
+``"used_dltensor"`` to ensure that ``PyCapsule_Destructor`` will not get
+called (ensured if ``PyCapsule_Destructor`` calls ``deleter`` only for
+capsules whose name is ``"dltensor"``), but the ``deleter`` of the
+``DLManagedTensor`` will be called by the destructor of the consumer
+library object created to own the ``DLManagerTensor`` obtained from the
+capsule. Below is an example of the capsule deleter written in the Python
+C API which is called either when the refcount on the capsule named
+``"dltensor"`` reaches zero or the consumer decides to deallocate its array:
+
+.. code-block:: C
+
+   static void dlpack_capsule_deleter(PyObject *self){
+      if (PyCapsule_IsValid(self, "used_dltensor")) {
+         return; /* Do nothing if the capsule has been consumed. */
+      }
+
+      /* an exception may be in-flight, we must save it in case we create 
another one */
+      PyObject *type, *value, *traceback;
+      PyErr_Fetch(&type, &value, &traceback);
+
+      DLManagedTensor *managed = (DLManagedTensor *)PyCapsule_GetPointer(self, 
"dltensor");
+      if (managed == NULL) {
+         PyErr_WriteUnraisable(self);
+         goto done;
+      }
+      /* the spec says the deleter can be NULL if there is no way for the 
caller to provide a reasonable destructor. */
+      if (managed->deleter) {
+         managed->deleter(managed);
+         /* TODO: is the deleter allowed to set a python exception? */
+         assert(!PyErr_Occurred());
+      }
+
+   done:
+      PyErr_Restore(type, value, traceback);
+   }
+
+Note: the capsule names ``"dltensor"`` and ``"used_dltensor"`` must be
+statically allocated.
+
+When the ``strides`` field in the ``DLTensor`` struct is ``NULL``, it 
indicates a
+row-major compact array. If the array is of size zero, the data pointer in
+``DLTensor`` should be set to either ``NULL`` or ``0``.
+
+DLPack version used must be ``0.2 <= DLPACK_VERSION < 1.0``. For further
+details on DLPack design and how to implement support for it,
+refer to `github.com/dmlc/dlpack <https://github.com/dmlc/dlpack>`_.
+
+.. warning::
+   DLPack contains a ``device_id``, which will be the device
+   ID (an integer, ``0, 1, ...``) which the producer library uses. In
+   practice this will likely be the same numbering as that of the
+   consumer, however that is not guaranteed. Depending on the hardware
+   type, it may be possible for the consumer library implementation to
+   look up the actual device from the pointer to the data - this is
+   possible for example for CUDA device pointers.
+
+   It is recommended that implementers of this array API consider and document
+   whether the ``.device`` attribute of the array returned from 
``from_dlpack`` is
+   guaranteed to be in a certain order or not.
+
+
+Reference Implementations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Several Python libraries have adopted this standard using Python C API, C++, 
Cython,
+ctypes, cffi, etc:
+
+* NumPy: `Python C API 
<https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/dlpack.c>`__
+* CuPy: `Cython 
<https://github.com/cupy/cupy/blob/master/cupy/_core/dlpack.pyx>`__
+* Tensorflow: `C++ 
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/c/eager/dlpack.cc>`__,
+  `Python wrapper using Python C API 
<https://github.com/tensorflow/tensorflow/blob/a97b01a4ff009ed84a571c138837130a311e74a7/tensorflow/python/tfe_wrapper.cc#L1562>`__,
+  `XLA 
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/python/dlpack.cc>`__
+* PyTorch: `C++ 
<https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/DLConvertor.cpp>`__,
+  `Python wrapper using Python C API 
<https://github.com/pytorch/pytorch/blob/c22b8a42e6038ed2f6a161114cf3d8faac3f6e9a/torch/csrc/Module.cpp#L355>`__
+* MXNet: `ctypes 
<https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/dlpack.py>`__
+* TVM: `ctypes 
<https://github.com/apache/tvm/blob/main/python/tvm/_ffi/_ctypes/ndarray.py>`__,
+  `Cython 
<https://github.com/apache/tvm/blob/main/python/tvm/_ffi/_cython/ndarray.pxi>`__
+* mpi4py: `Cython 
<https://github.com/mpi4py/mpi4py/blob/master/src/mpi4py/MPI/asdlpack.pxi>`_
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dlpack-0.6/include/dlpack/dlpack.h 
new/dlpack-0.8/include/dlpack/dlpack.h
--- old/dlpack-0.6/include/dlpack/dlpack.h      2021-07-01 16:01:05.000000000 
+0200
+++ new/dlpack-0.8/include/dlpack/dlpack.h      2023-01-05 19:38:50.000000000 
+0100
@@ -6,6 +6,9 @@
 #ifndef DLPACK_DLPACK_H_
 #define DLPACK_DLPACK_H_
 
+/**
+ * \brief Compatibility with C++
+ */
 #ifdef __cplusplus
 #define DLPACK_EXTERN_C extern "C"
 #else
@@ -13,7 +16,10 @@
 #endif
 
 /*! \brief The current version of dlpack */
-#define DLPACK_VERSION 60
+#define DLPACK_VERSION 80
+
+/*! \brief The current ABI version of dlpack */
+#define DLPACK_ABI_VERSION 1
 
 /*! \brief DLPACK_DLL prefix for windows */
 #ifdef _WIN32
@@ -35,7 +41,11 @@
 /*!
  * \brief The device type in DLDevice.
  */
+#ifdef __cplusplus
+typedef enum : int32_t {
+#else
 typedef enum {
+#endif
   /*! \brief CPU device */
   kDLCPU = 1,
   /*! \brief CUDA GPU device */
@@ -68,6 +78,17 @@
    * \brief CUDA managed/unified memory allocated by cudaMallocManaged
    */
   kDLCUDAManaged = 13,
+  /*!
+   * \brief Unified shared memory allocated on a oneAPI non-partititioned
+   * device. Call to oneAPI runtime is required to determine the device
+   * type, the USM allocation type and the sycl context it is bound to.
+   *
+   */
+  kDLOneAPI = 14,
+  /*! \brief GPU support for next generation WebGPU standard. */
+  kDLWebGPU = 15,
+  /*! \brief Qualcomm Hexagon DSP */
+  kDLHexagon = 16,
 } DLDeviceType;
 
 /*!
@@ -80,7 +101,7 @@
    * \brief The device index.
    * For vanilla CPU memory, pinned memory, or managed memory, this is set to 
0.
    */
-  int device_id;
+  int32_t device_id;
 } DLDevice;
 
 /*!
@@ -105,16 +126,21 @@
    * (C/C++/Python layout: compact struct per complex number)
    */
   kDLComplex = 5U,
+  /*! \brief boolean */
+  kDLBool = 6U,
 } DLDataTypeCode;
 
 /*!
- * \brief The data type the tensor can hold.
+ * \brief The data type the tensor can hold. The data type is assumed to 
follow the
+ * native endian-ness. An explicit error message should be raised when 
attempting to
+ * export an array with non-native endianness
  *
  *  Examples
- *   - float: type_code = 2, bits = 32, lanes=1
- *   - float4(vectorized 4 float): type_code = 2, bits = 32, lanes=4
- *   - int8: type_code = 0, bits = 8, lanes=1
+ *   - float: type_code = 2, bits = 32, lanes = 1
+ *   - float4(vectorized 4 float): type_code = 2, bits = 32, lanes = 4
+ *   - int8: type_code = 0, bits = 8, lanes = 1
  *   - std::complex<float>: type_code = 5, bits = 64, lanes = 1
+ *   - bool: type_code = 6, bits = 8, lanes = 1 (as per common array library 
convention, the underlying storage size of bool is 8 bits)
  */
 typedef struct {
   /*!
@@ -136,9 +162,16 @@
  */
 typedef struct {
   /*!
-   * \brief The opaque data pointer points to the allocated data. This will be
-   * CUDA device pointer or cl_mem handle in OpenCL. This pointer is always
-   * aligned to 256 bytes as in CUDA.
+   * \brief The data pointer points to the allocated data. This will be CUDA
+   * device pointer or cl_mem handle in OpenCL. It may be opaque on some device
+   * types. This pointer is always aligned to 256 bytes as in CUDA. The
+   * `byte_offset` field should be used to point to the beginning of the data.
+   *
+   * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
+   * TVM, perhaps others) do not adhere to this 256 byte aligment requirement
+   * on CPU/CUDA/ROCm, and always use `byte_offset=0`.  This must be fixed
+   * (after which this note will be updated); at the moment it is recommended
+   * to not rely on the data pointer being correctly aligned.
    *
    * For given DLTensor, the size of memory required to store the contents of
    * data is calculated as follows:
@@ -158,7 +191,7 @@
   /*! \brief The device of the tensor */
   DLDevice device;
   /*! \brief Number of dimensions */
-  int ndim;
+  int32_t ndim;
   /*! \brief The data type of the pointer*/
   DLDataType dtype;
   /*! \brief The shape of the tensor */

Reply via email to