import torch
import numpy as np
import dynex
import dimod
from dynex import DynexConfig, ComputeBackend
class DynexRBM:
def __init__(self, n_visible, n_hidden, config):
self.n_visible = n_visible
self.n_hidden = n_hidden
self.config = config
# Initialize weights and biases
self.W = torch.randn(n_visible, n_hidden) * 0.01
self.b_v = torch.zeros(n_visible)
self.b_h = torch.zeros(n_hidden)
def _build_rbm_qubo(self, v_data):
"""Build QUBO for joint sampling of visible and hidden units."""
Q = {}
batch_avg_v = v_data.mean(0).numpy()
# Hidden unit biases
for j in range(self.n_hidden):
bias = float(self.b_h[j])
for i in range(self.n_visible):
bias += float(self.W[i, j]) * batch_avg_v[i]
Q[(j, j)] = Q.get((j, j), 0) - bias
# Weight interactions (hidden-hidden via visible)
for j1 in range(self.n_hidden):
for j2 in range(j1+1, self.n_hidden):
interaction = sum(
float(self.W[i, j1]) * float(self.W[i, j2])
for i in range(self.n_visible)
)
if abs(interaction) > 1e-6:
Q[(j1, j2)] = Q.get((j1, j2), 0) + interaction
return Q
def sample_hidden(self, v_data, num_reads=1000, annealing_time=200):
"""Sample hidden units given visible data using Dynex."""
Q = self._build_rbm_qubo(v_data)
bqm = dimod.BinaryQuadraticModel.from_qubo(Q)
model = dynex.BQM(bqm)
sampler = dynex.DynexSampler(model, config=self.config, logging=False)
sampleset = sampler.sample(num_reads=num_reads, annealing_time=annealing_time)
h_sample = torch.zeros(self.n_hidden)
for j, val in sampleset.first.sample.items():
if j < self.n_hidden:
h_sample[j] = float(val)
return h_sample
def train_step(self, v_data, lr=0.01):
"""Single training step with contrastive divergence."""
# Positive phase: data statistics
h_prob_pos = torch.sigmoid(v_data @ self.W + self.b_h)
pos_grad = v_data.t() @ h_prob_pos / v_data.shape[0]
# Negative phase: model statistics via Dynex
h_neg = self.sample_hidden(v_data)
v_neg = torch.sigmoid(h_neg @ self.W.t() + self.b_v)
neg_grad = v_neg.unsqueeze(1) @ h_neg.unsqueeze(0)
# Update parameters
self.W += lr * (pos_grad - neg_grad)
self.b_h += lr * (h_prob_pos.mean(0) - h_neg)
self.b_v += lr * (v_data.mean(0) - v_neg)
# Train on MNIST-like data
config = DynexConfig(compute_backend=ComputeBackend.GPU)
rbm = DynexRBM(n_visible=784, n_hidden=128, config=config)
# Assume X_train is [n_samples, 784] binary tensor
# for batch in DataLoader(X_train, batch_size=32):
# rbm.train_step(batch)