Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 35 additions & 35 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,26 @@
import json

def main()

# Initialize the command line parser
parser = argparse.ArgumentParser()

# Read command line arguments
parser.add_argument('command',type=str,help='command to execute')
parser.add_argument('-nV',type=int,default=4,help='number of visible nodes')
parser.add_argument('-nH',type=int,default=4,help='number of hidden nodes')
parser.add_argument('-steps',type=int,default=1000000,help='training steps')
parser.add_argument('-lr',type=float,default=1e-3,help='learning rate')
parser.add_argument('-bs',type=int,default=100,help='batch size')
parser.add_argument('-CD',type=int,default=10,help='steps of contrastive divergence')
parser.add_argument('-nC',type=float,default=10,help='number of chains in PCD')
parser.add_argument('command',type=str,help='command to execute')
parser.add_argument('-nV',type=int,default=4,help='number of visible nodes')
parser.add_argument('-nH',type=int,default=4,help='number of hidden nodes')
parser.add_argument('-steps',type=int,default=1000000,help='training steps')
parser.add_argument('-lr',type=float,default=1e-3,help='learning rate')
parser.add_argument('-bs',type=int,default=100,help='batch size')
parser.add_argument('-CD',type=int,default=10,help='steps of contrastive divergence')
parser.add_argument('-nC',type=float,default=10,help='number of chains in PCD')

# Parse the arguments
args = parser.parse_args()

if args.command == 'train':
train(args)

if args.command == 'sample':
sample(args)

Expand All @@ -43,7 +43,7 @@ class Ops(object):
pass

def train(args):

# Simulation parameters
num_visible = args.nV # number of visible nodes
num_hidden = args.nH # number of hidden nodes
Expand All @@ -57,32 +57,32 @@ def train(args):
hidden_bias=None # hidden bias
bcount=0 # counter
epochs_done=1 # epochs counter

# *************************************************************
# INSERT HERE THE PATH TO THE TRAINING AND TESTING DATASETS
trainName = '*******************'
testName = '*******************'

# Loading the data
xtrain = np.loadtxt(trainName)
xtest = np.loadtxt(testName)

ept=np.random.permutation(xtrain) # random permutation of training data
epv=np.random.permutation(xtest) # random permutation of test data
iterations_per_epoch = xtrain.shape[0] / bsize # gradient iteration per epoch

# Initialize RBM class
rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples)
rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples)

# Initialize operations and placeholders classes
ops = Ops()
placeholders = Placeholders()

placeholders.visible_samples = tf.placeholder(tf.float32, shape=(None, num_visible), name='v') # placeholder for training data

total_iterations = 0 # starts at zero
total_iterations = 0 # starts at zero
ops.global_step = tf.Variable(total_iterations, name='global_step_count', trainable=False)

# Decaying learning rate
learning_rate = tf.train.exponential_decay(
learning_rate_b,
Expand All @@ -98,10 +98,10 @@ def train(args):
ops.lr=learning_rate
ops.train = optimizer.minimize(cost, global_step=ops.global_step)
ops.init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())

with tf.Session() as sess:
sess.run(ops.init)

for ii in range(nsteps):
if bcount*bsize+ bsize>=xtrain.shape[0]:
bcount=0
Expand All @@ -110,7 +110,7 @@ def train(args):
batch=ept[ bcount*bsize: bcount*bsize+ bsize,:]
bcount=bcount+1
feed_dict = {placeholders.visible_samples: batch}

_, num_steps = sess.run([ops.train, ops.global_step], feed_dict=feed_dict)

if num_steps % iterations_per_epoch == 0:
Expand All @@ -119,51 +119,51 @@ def train(args):
epochs_done += 1

def sample(args):

num_visible = args.nV # number of visible nodes
num_hidden = args.nH # number of hidden nodes

# *************************************************************
# INSERT HERE THE PATH TO THE PARAMETERS FILE
path_to_params = '*******************'
# Load the RBM parameters

# Load the RBM parameters
params = np.load(path_to_params)
weights = params['weights']
visible_bias = params['visible_bias']
hidden_bias = params['hidden_bias']
hidden_bias=np.reshape(hidden_bias,(hidden_bias.shape[0],1))
visible_bias=np.reshape(visible_bias,(visible_bias.shape[0],1))

# Sampling parameters
num_samples=1000 # how many independent chains will be sampled
gibb_updates=100 # how many gibbs updates per call to the gibbs sampler
nbins=1000 # number of calls to the RBM sampler
nbins=1000 # number of calls to the RBM sampler

# Initialize RBM class
rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples)
hsamples,vsamples=rbm.stochastic_maximum_likelihood(gibb_updates)

# Initialize tensorflow
init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())

with tf.Session() as sess:
sess.run(init)

for i in range(nbins):
print ('bin %d\t' %i)

# Gibbs sampling
_,samples=sess.run([hsamples,vsamples])


def save_parameters(sess,rbm,epochs):
weights, visible_bias, hidden_bias = sess.run([rbm.weights, rbm.visible_bias, rbm.hidden_bias])

# *************************************************************
# INSERT HERE THE PATH TO THE PARAMETERS FILE
parameter_file_path = '*******************'

np.savez_compressed(parameter_file_path, weights=weights, visible_bias=visible_bias, hidden_bias=hidden_bias,
epochs=epochs)

Expand Down
12 changes: 6 additions & 6 deletions rbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import numpy as np

class RBM(object):

''' Restricted Boltzmann Machine '''

def __init__(self, num_hidden, num_visible, num_samples=128, weights=None, visible_bias=None, hidden_bias=None):
''' Constructor '''
# number of hidden units
Expand Down Expand Up @@ -101,7 +101,7 @@ class variables.
self.hidden_samples = self.hidden_samples.assign(h_samples)
self.p_of_v = p_of_v
return self.hidden_samples, v_samples

def energy(self, hidden_samples, visible_samples):
# type: (tf.Tensor, tf.Tensor) -> tf.Tensor
"""Compute the energy:
Expand All @@ -123,8 +123,8 @@ def free_energy(self, visible_samples):
free_energy = (tf.matmul(visible_samples, self.visible_bias)
+ tf.reduce_sum(tf.nn.softplus(tf.matmul(visible_samples, self.weights)
+ tf.transpose(self.hidden_bias)), 1, keep_dims=True))
return free_energy
return free_energy

def neg_log_likelihood_grad(self, visible_samples, model_samples=None, num_gibbs=2):
# type: (tf.Tensor, tf.Tensor, int) -> tf.Tensor

Expand All @@ -142,7 +142,7 @@ def neg_log_likelihood(self, visible_samples, log_Z):
+ tf.reduce_sum(tf.nn.softplus(tf.matmul(visible_samples, self.weights)
+ tf.transpose(self.hidden_bias)), 1))
return -tf.reduce_mean(free_energy - log_Z)

def exact_log_partition_function(self):
''' Evaluate the partition function by exact enumerations '''
with tf.name_scope('exact_log_Z'):
Expand Down
60 changes: 30 additions & 30 deletions tutorial/ed_tfim1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
Sx = np.array([[0,1.],[1,0]])
Sz = np.array([[1,0.],[0,-1]])

# Pauli X operator
# Pauli X operator
def sigmaX(L,i):
''' Return the many-body operator
I x I x .. x Sx x I x .. x I
Expand Down Expand Up @@ -34,11 +34,11 @@ def sigmaZ(L,i):

return reduce(np.kron,OpList)

# Magnetic interaction term
# Magnetic interaction term
def buildMagneticInteraction(i,B,L):
return B*sigmaX(L,i)

# Ising interaction term
# Ising interaction term
def buildIsingInteraction(i,j,J,L):
''' Return the Ising interaction term
I x .. x Sz x Sz x .. x I
Expand All @@ -56,37 +56,37 @@ def buildIsingInteraction(i,j,J,L):

# Build transverse-field Ising model
def build1dIsingModel(L,J,B,OBC):

D = 1<<L # dimension of Hilbert space
''' Return the full Hamiltonian '''
Ham = np.zeros((D,D))
Ham = np.zeros((D,D))
for i in range(L-1):
Ham = Ham - buildIsingInteraction(i,i+1,J,L)
Ham = Ham - buildMagneticInteraction(i,B,L)
Ham = Ham - buildMagneticInteraction(i,B,L)
Ham = Ham - buildMagneticInteraction(L-1,B,L)

# Periodic Boundary Conditions
if OBC is True:
Ham = Ham - buildIsingInteraction(0,L-1,J,L)

return Ham

# Generate the training dataset
def buildDataset(L,B,psi,Nsamples):

D = 1<<L
config = np.zeros((D,L))
psi2 = np.zeros((D))
psi2 = np.zeros((D))

# Build all spin states and psi^2
for i in range(D):
state = (bin(i)[2:].zfill(L)).split()
for j in range(L):
config[i,j] = int(state[0][j])
psi2[i] = psi[i]**2
psi2[i] = psi[i]**2

config_index = range(D)

# Generate the trainset
index_samples = np.random.choice(config_index,
Nsamples,
Expand All @@ -99,7 +99,7 @@ def buildDataset(L,B,psi,Nsamples):
dataFile.write("%d " % config[index_samples[i]][j])
dataFile.write("\n")
dataFile.close()

# Generate the testset
index_samples = np.random.choice(config_index,
Nsamples/10,
Expand All @@ -114,16 +114,16 @@ def buildDataset(L,B,psi,Nsamples):

# MAIN
def main(pars):

print ('\n--------------------------------\n')
print ('EXACT DIAGONALIZATION OF THE 1d-TFIM\n')

# Parameters
L = pars.L; # number of spins
B = pars.B # magnetic field
B = pars.B # magnetic field
J = pars.J # ising interaction strength
Nsamples = 10000 # train dataset size

print ('Number of spins L = %d' % L)
print ('Ising interaction J = %.1f' % J)
print ('\n\n')
Expand All @@ -133,38 +133,38 @@ def main(pars):
Bmax = 2.0
Bsteps = 10
deltaB = (Bmax-Bmin)/float(Bsteps)

# Observables file
obs_Name = '../data/tfim1d/observables/ed_tfim1d_L' + str(L) + '_observables.txt'
obs_File = open(obs_Name,'w')
obs_File.write('# B E ') # write header
obs_File.write('<|Sz|> <Sx>\n') # write header

# Spin-spin correlation file
corr_Name = '../data/tfim1d/observables/ed_tfim1d_L' + str(L) + '_correlations.txt'
corr_File = open(corr_Name,'w')

# Loop over magnetic field values
for b in range(1,Bsteps+1):

B = Bmin + b*deltaB
print('Magnetic field B = %.2f' % B)
print('Magnetic field B = %.2f' % B)

# Wavefunction file
psiName = '../data/tfim1d/wavefunctions/wavefunction_tfim1d_L' + str(L)
psiName += '_B' + str(B) + '.txt'

# Diagonalize the Hamiltonian
print('diagonalizing...')
H = build1dIsingModel(L,J,B,True)
(e,psi) = np.linalg.eigh(H)
psi0 = np.abs(psi[:,0])
e0 = e[0]

# Save energy and wavefunction
obs_File.write('%.1f %.10f ' % (B,e0/float(L)))
np.savetxt(psiName,psi0)

# Magnetic observables
print('computing observables...')
# Compute <|Sz|>
Expand All @@ -184,21 +184,21 @@ def main(pars):
for j in range(L):
for k in range(1<<L):
config = (bin(k)[2:].zfill(L)).split()
SzSz[i,j] += (psi0[k]**2)*(2*float(config[0][i])-1)*(2*float(config[0][j])-1)
SzSz[i,j] += (psi0[k]**2)*(2*float(config[0][i])-1)*(2*float(config[0][j])-1)
corr_File.write('%.10f ' % SzSz[i,j])
corr_File.write('\n')

# Generate the training datasets
print('generating the dataset...')
buildDataset(L,B,psi0,Nsamples)
buildDataset(L,B,psi0,Nsamples)
print('\n')


#---------------------------------------------------------------


if __name__ == "__main__":

# Read arguments from command line
parser = argparse.ArgumentParser()
parser.add_argument('-L',type=int,default=10)
Expand Down
Loading