Skip to main content

Neuromorphic Torch Layers

The Dynex Neuromorphic Torch Layer integrates Dynex quantum computation directly into PyTorch model architectures. It can be used as a drop-in replacement for any standard PyTorch layer, enabling:
  • Hybrid quantum-classical models — combine classical neural network layers with quantum computation
  • Neuromorphic transfer learning — fine-tune pre-trained models with quantum layers
  • Federated learning — run quantum layers across distributed compute nodes

Installation

pip install dynex torch

Basic usage

import torch
import torch.nn as nn
from dynex import DynexConfig, ComputeBackend

# Import the neuromorphic layer
import sys
sys.path.append('examples/utils')
from HybridQRBM.pytorchdnx import DynexTorchLayer

config = DynexConfig(compute_backend=ComputeBackend.GPU)

# Build a hybrid model: classical layers + Dynex quantum layer
class HybridModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Linear(784, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
        )
        self.quantum_layer = DynexTorchLayer(
            n_visible=128,
            n_hidden=64,
            config=config,
            num_reads=1000,
            annealing_time=200
        )
        self.classifier = nn.Sequential(
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32, 10),
        )

    def forward(self, x):
        x = self.encoder(x)
        x = self.quantum_layer(x)   # Dynex quantum computation
        x = self.classifier(x)
        return x

model = HybridModel()

Training a hybrid model

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset

# Dummy data
X = torch.randn(1000, 784)
y = torch.randint(0, 10, (1000,))
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, shuffle=True)

model = HybridModel()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()

for epoch in range(5):
    total_loss = 0
    for batch_X, batch_y in loader:
        optimizer.zero_grad()
        output = model(batch_X)
        loss = criterion(output, batch_y)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    print(f"Epoch {epoch+1}: loss={total_loss/len(loader):.4f}")

Federated learning with parallel Dynex layers

import multiprocessing
from dynex import DynexConfig, ComputeBackend

def train_node(queue, node_id, local_data):
    """Train a model partition on a single federated node."""
    config = DynexConfig(compute_backend=ComputeBackend.QPU, qpu_model='apollo_rc1')
    # ... local training with quantum layer ...
    queue.put((node_id, local_model_weights))

# Run N federated nodes in parallel
nodes = 4
queues = []
processes = []

for i in range(nodes):
    q = multiprocessing.Queue()
    queues.append(q)
    p = multiprocessing.Process(target=train_node, args=(q, i, data_partition[i]))
    processes.append(p)
    p.start()

for p in processes:
    p.join()

# Aggregate weights (federated averaging)
all_weights = [q.get() for q in queues]

TensorFlow support

Neuromorphic layers are also available for TensorFlow:

Notebooks

NotebookDescription
example_neuromorphic_torch_layers.ipynbQBM with PyTorch
Example_SVM_pytorch.ipynbQSVM with PyTorch
example_pytorch.ipynbMode-assisted QRBM