Speedup ExactSampler
Created by: PhilipVinc
ExactSampler does not really generate Markov Chains, as all samples are independent, so there is little sense in generating the samples one step
at a time.
Since it has to callback into python to convert numbers to actual bit strings, doing it only once in a batch is more efficient, especially for small networks where this becomes the bottleneck.
Moreover, we get as a bonus that performance with ExactSampler becomes independent of the number of chains that we have.
import netket as nk
from tqdm import tqdm
ma = nk.models.RBM(alpha=1, use_visible_bias=True, dtype=float)
def benchmark(L, n_chains, n_samples, n_iters=1000):
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
hi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)
sa = nk.sampler.ExactSampler(hi, n_chains=n_chains)
vs = nk.variational.MCState(sa, ma, n_samples=n_samples, n_discard=0, seed=66)
vs.sample()
for i in tqdm(range(n_iters)):
vs.sample()
# L = 10
benchmark(10, 16, 1000)
# this pr
# 1000/1000 [00:31<00:00, 32.02it/s]
# master
# 1000/1000 [02:16<00:00, 7.30it/s]
benchmark(10, 1, 1000)
# this pr
# 1000/1000 [00:30<00:00, 34.23it/s]
# master
# 200/200 [01:52<00:00, 1.80it/s]
benchmark(15, 16, 1000)
# this pr
# 1000/1000 [00:48<00:00, 20.58it/s]
# this pr
# 000/1000 [03:16<00:00, 5.10it/s]
benchmark(18, 16, 1000, n_iters=100)
# this pr
# 100/100 [00:19<00:00, 5.07it/s]
# master
# 100/100 [00:37<00:00, 2.68it/s]
benchmark(20, 16, 1000, n_iters=100)
# this pr
# 100/100 [01:15<00:00, 1.32it/s]
# master
# 100/100 [02:16<00:00, 1.36s/it]