Parallel Sampling
DynexSampler is thread-safe and can be parallelized using Python’s multiprocessing module. This is especially useful for:
- Federated learning — computing multiple network layers simultaneously
- Ensemble methods — collecting diverse solutions from independent runs
- Hyperparameter search — testing multiple configurations in parallel
- Multi-model pipelines — running different models on the same problem concurrently
Basic parallel example
import dynex
import dimod
import multiprocessing
from multiprocessing import Queue
from dynex import DynexConfig, ComputeBackend, QPUModel, DynexSampler, BQM
def run_sampler(queue, job_id, model):
print(f"Sampler {job_id} started")
config = DynexConfig(
compute_backend=ComputeBackend.QPU,
qpu_model=QPUModel.APOLLO_RC1
)
sampler = DynexSampler(
model,
config=config,
logging=False,
description=f"Parallel job {job_id}"
)
sampleset = sampler.sample(num_reads=50, annealing_time=200) # QPU: num_reads 1–100, annealing_time 10–1000
print(f"Sampler {job_id} finished")
queue.put(sampleset)
if __name__ == "__main__":
# Build the model once, share across workers
bqm = dimod.BinaryQuadraticModel(
{i: float(i % 3 - 1) for i in range(15)},
{(i, i+1): 0.5 for i in range(14)},
0.0,
'BINARY'
)
config = DynexConfig(compute_backend=ComputeBackend.QPU, qpu_model='apollo_rc1')
model = BQM(bqm, config=config)
PARALLEL_INSTANCES = 8
jobs = []
result_queues = []
# Start all parallel samplers
for i in range(PARALLEL_INSTANCES):
q = Queue()
result_queues.append(q)
p = multiprocessing.Process(target=run_sampler, args=(q, i, model))
jobs.append(p)
p.start()
# Wait for all to complete
for job in jobs:
job.join()
# Collect results
results = []
for q in result_queues:
sampleset = q.get()
results.append(sampleset)
print(f"Best energy: {sampleset.first.energy:.4f}")
Federated learning pattern
In federated learning, each parallel job typically handles a different model or data partition:
import multiprocessing
from multiprocessing import Queue
import dynex
from dynex import DynexConfig, ComputeBackend, DynexSampler, BQM
def train_layer(queue, layer_id, layer_bqm):
"""Train a single layer of a quantum neural network."""
config = DynexConfig(compute_backend=ComputeBackend.QPU, qpu_model='apollo_rc1')
model = BQM(layer_bqm)
sampler = DynexSampler(model, config=config, logging=False)
sampleset = sampler.sample(num_reads=50, annealing_time=200) # QPU: num_reads 1–100, annealing_time 10–1000
queue.put((layer_id, sampleset.first.sample))
def train_parallel(layer_bqms):
jobs = []
queues = []
for i, bqm in enumerate(layer_bqms):
q = Queue()
queues.append(q)
p = multiprocessing.Process(target=train_layer, args=(q, i, bqm))
jobs.append(p)
p.start()
for job in jobs:
job.join()
# Collect layer weights in order
weights = {}
for q in queues:
layer_id, sample = q.get()
weights[layer_id] = sample
return [weights[i] for i in range(len(layer_bqms))]
Thread pool for I/O-bound workflows
For lighter workloads where GIL contention is not a concern, ThreadPoolExecutor can be used:
from concurrent.futures import ThreadPoolExecutor, as_completed
import dynex
from dynex import DynexConfig, ComputeBackend, DynexSampler, BQM
def run_job(args):
job_id, bqm = args
config = DynexConfig(compute_backend=ComputeBackend.GPU)
model = BQM(bqm)
sampler = DynexSampler(model, config=config, logging=False)
sampleset = sampler.sample(num_reads=500, annealing_time=100)
return job_id, sampleset.first.energy
bqms = [build_bqm(i) for i in range(4)] # Your model-building function
with ThreadPoolExecutor(max_workers=4) as executor:
futures = {executor.submit(run_job, (i, bqm)): i for i, bqm in enumerate(bqms)}
for future in as_completed(futures):
job_id, energy = future.result()
print(f"Job {job_id} best energy: {energy:.4f}")
Use multiprocessing.Process (not threads) for CPU-intensive sampling. Python’s GIL prevents true parallelism with threads for compute-heavy workloads.
- All parallel jobs are submitted to the Dynex network simultaneously — they compete for the same worker pool
- For QPU backends, each parallel job consumes QPU resources independently
- Set
logging=False in parallel workers to avoid interleaved output
- Use
description to tag jobs for identification in the Dynex dashboard