This is an automated email from the ASF dual-hosted git repository.
guanmingchiu pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/mahout.git
The following commit(s) were added to refs/heads/main by this push:
new 194903bf0 [Examples] [QDP] re-org files & add simple example for qdp
(#1085)
194903bf0 is described below
commit 194903bf00ee06f631a551647160980910f56d5b
Author: Ryan Huang <[email protected]>
AuthorDate: Tue Feb 24 00:13:25 2026 +0800
[Examples] [QDP] re-org files & add simple example for qdp (#1085)
* re-org files & add simple example for qdp
* Update simple.ipynb
* qumat only
---
.github/workflows/notebook-testing.yml | 2 +-
examples/qdp/simple.ipynb | 154 ++++++++++++++++++++++++
examples/{ => qumat}/Optimization_Example.ipynb | 0
examples/{ => qumat}/Simple_Example.ipynb | 0
examples/{ => qumat}/quantum_teleportation.py | 0
examples/{ => qumat}/simple_example.py | 0
6 files changed, 155 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/notebook-testing.yml
b/.github/workflows/notebook-testing.yml
index 2c99dc71d..87982610a 100644
--- a/.github/workflows/notebook-testing.yml
+++ b/.github/workflows/notebook-testing.yml
@@ -38,7 +38,7 @@ jobs:
- name: Run Jupyter Notebooks
run: |
- for nb in $(find . -name '*.ipynb'); do
+ for nb in $(find . -name 'example/qumat/*.ipynb'); do
echo "Executing $nb"
uv run jupyter execute "$nb" --inplace
done
diff --git a/examples/qdp/simple.ipynb b/examples/qdp/simple.ipynb
new file mode 100644
index 000000000..fa9146d42
--- /dev/null
+++ b/examples/qdp/simple.ipynb
@@ -0,0 +1,154 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "collapsed": true,
+ "id": "y5xLkFQ4sLOV",
+ "outputId": "b3b21a29-a232-4cf0-ef94-4b2be060f48b"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install qumat[qdp]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "ZvmpEUJFscx-",
+ "outputId": "0c70d8eb-c7b4-4a87-914f-95c01d3fb26c"
+ },
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ "QDP + QML: Full GPU Pipeline (float64)\n",
+ "CPU → GPU (QDP batch encode) → GPU (real projection) → GPU (QML
training)\n",
+ "\"\"\"\n",
+ "\n",
+ "import torch\n",
+ "import torch.nn as nn\n",
+ "import torch.optim as optim\n",
+ "from qumat.qdp import QdpEngine\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 1. Setup\n",
+ "# ─────────────────────────────────────────────\n",
+ "DEVICE_ID = 0\n",
+ "TORCH_DEVICE = torch.device(\"cuda\", DEVICE_ID)\n",
+ "NUM_QUBITS = 2\n",
+ "EPOCHS = 60\n",
+ "LR = 0.01\n",
+ "\n",
+ "engine = QdpEngine(DEVICE_ID)\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 2. Raw Data on CPU — float64\n",
+ "# ─────────────────────────────────────────────\n",
+ "raw = torch.tensor([\n",
+ " [0.5, 0.5, 0.5, 0.5],\n",
+ " [0.7, 0.1, 0.5, 0.3],\n",
+ " [0.1, 0.8, 0.4, 0.4],\n",
+ " [0.6, 0.2, 0.6, 0.4],\n",
+ "], dtype=torch.float64) # ← float64\n",
+ "\n",
+ "labels = torch.tensor([0, 1, 0, 1], dtype=torch.float64,
device=TORCH_DEVICE)\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 3. CPU → GPU: QDP Batch Encode\n",
+ "# ─────────────────────────────────────────────\n",
+ "print(\"CPU → GPU: Batch encoding with QDP...\")\n",
+ "cuda_batch = raw.cuda()\n",
+ "\n",
+ "qtensor = engine.encode(cuda_batch, num_qubits=NUM_QUBITS,
encoding_method=\"amplitude\")\n",
+ "\n",
+ "# DLPack → complex128 CUDA tensor (two float64s per element)\n",
+ "X_complex = torch.from_dlpack(qtensor)\n",
+ "print(f\"Raw encoded: shape={X_complex.shape},
dtype={X_complex.dtype}, device={X_complex.device}\")\n",
+ "\n",
+ "# Concatenate real + imag → float64 [N, 8], stays on GPU\n",
+ "X_quantum = torch.cat([X_complex.real, X_complex.imag],
dim=-1).double()\n",
+ "print(f\"Real features: shape={X_quantum.shape},
dtype={X_quantum.dtype}, device={X_quantum.device}\")\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 4. QML Model on GPU — double precision\n",
+ "# ─────────────────────────────────────────────\n",
+ "class VariationalLayer(nn.Module):\n",
+ " def __init__(self, dim):\n",
+ " super().__init__()\n",
+ " self.theta = nn.Parameter(torch.randn(dim,
dtype=torch.float64))\n",
+ "\n",
+ " def forward(self, x):\n",
+ " return x * torch.cos(self.theta) + torch.roll(x, 1, dims=-1)
* torch.sin(self.theta)\n",
+ "\n",
+ "class QMLClassifier(nn.Module):\n",
+ " def __init__(self, num_qubits):\n",
+ " super().__init__()\n",
+ " dim = 2 * (2 ** num_qubits) # real + imag\n",
+ " self.layer1 = VariationalLayer(dim)\n",
+ " self.layer2 = VariationalLayer(dim)\n",
+ " self.readout = nn.Linear(dim, 1, dtype=torch.float64)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " x = torch.tanh(self.layer1(x))\n",
+ " x = self.layer2(x)\n",
+ " return torch.sigmoid(self.readout(x)).squeeze(-1)\n",
+ "\n",
+ "model = QMLClassifier(NUM_QUBITS).to(TORCH_DEVICE)\n",
+ "optimizer = optim.Adam(model.parameters(), lr=LR)\n",
+ "loss_fn = nn.BCELoss()\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 5. GPU Training\n",
+ "# ─────────────────────────────────────────────\n",
+ "print(\"\\nGPU → Training QML model...\")\n",
+ "for epoch in range(1, EPOCHS + 1):\n",
+ " model.train()\n",
+ " optimizer.zero_grad()\n",
+ " preds = model(X_quantum)\n",
+ " loss = loss_fn(preds, labels)\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ " if epoch % 10 == 0:\n",
+ " with torch.no_grad():\n",
+ " acc = ((preds > 0.5).double() ==
labels).double().mean().item()\n",
+ " print(f\"Epoch {epoch:3d} | Loss: {loss.item():.6f} |
Accuracy: {acc:.2f}\")\n",
+ "\n",
+ "# ─────────────────────────────────────────────\n",
+ "# 6. Inference\n",
+ "# ─────────────────────────────────────────────\n",
+ "model.eval()\n",
+ "with torch.no_grad():\n",
+ " predicted = (model(X_quantum) > 0.5).int()\n",
+ "\n",
+ "print(\"\\n─── Results ───\")\n",
+ "for i, (pred, true) in enumerate(zip(predicted.cpu().tolist(),
labels.int().cpu().tolist())):\n",
+ " print(f\"Sample {i}: Predicted={pred} True={true} {'✓' if pred
== true else '✗'}\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/examples/Optimization_Example.ipynb
b/examples/qumat/Optimization_Example.ipynb
similarity index 100%
rename from examples/Optimization_Example.ipynb
rename to examples/qumat/Optimization_Example.ipynb
diff --git a/examples/Simple_Example.ipynb b/examples/qumat/Simple_Example.ipynb
similarity index 100%
rename from examples/Simple_Example.ipynb
rename to examples/qumat/Simple_Example.ipynb
diff --git a/examples/quantum_teleportation.py
b/examples/qumat/quantum_teleportation.py
similarity index 100%
rename from examples/quantum_teleportation.py
rename to examples/qumat/quantum_teleportation.py
diff --git a/examples/simple_example.py b/examples/qumat/simple_example.py
similarity index 100%
rename from examples/simple_example.py
rename to examples/qumat/simple_example.py