From a735a3eb295bca0e20229db557f9de9eadde2b40 Mon Sep 17 00:00:00 2001 From: jake Date: Tue, 19 Sep 2017 14:03:11 -0400 Subject: [PATCH] style: delete trailing spaces --- main.py | 70 ++++++++++++++--------------- rbm.py | 12 ++--- tutorial/ed_tfim1d.py | 60 ++++++++++++------------- tutorial/ising2d.py | 74 +++++++++++++++---------------- tutorial/main.py | 4 +- tutorial/plot_observer.py | 70 ++++++++++++++--------------- tutorial/plot_results.py | 30 ++++++------- tutorial/plot_sampler.py | 52 +++++++++++----------- tutorial/rbm.py | 16 +++---- tutorial/sampler.py | 84 +++++++++++++++++------------------ tutorial/tfim1d.py | 92 +++++++++++++++++++-------------------- 11 files changed, 282 insertions(+), 282 deletions(-) diff --git a/main.py b/main.py index 6e04593..6b0b6c8 100644 --- a/main.py +++ b/main.py @@ -9,26 +9,26 @@ import json def main() - + # Initialize the command line parser parser = argparse.ArgumentParser() - + # Read command line arguments - parser.add_argument('command',type=str,help='command to execute') - parser.add_argument('-nV',type=int,default=4,help='number of visible nodes') - parser.add_argument('-nH',type=int,default=4,help='number of hidden nodes') - parser.add_argument('-steps',type=int,default=1000000,help='training steps') - parser.add_argument('-lr',type=float,default=1e-3,help='learning rate') - parser.add_argument('-bs',type=int,default=100,help='batch size') - parser.add_argument('-CD',type=int,default=10,help='steps of contrastive divergence') - parser.add_argument('-nC',type=float,default=10,help='number of chains in PCD') - + parser.add_argument('command',type=str,help='command to execute') + parser.add_argument('-nV',type=int,default=4,help='number of visible nodes') + parser.add_argument('-nH',type=int,default=4,help='number of hidden nodes') + parser.add_argument('-steps',type=int,default=1000000,help='training steps') + parser.add_argument('-lr',type=float,default=1e-3,help='learning rate') + parser.add_argument('-bs',type=int,default=100,help='batch size') + parser.add_argument('-CD',type=int,default=10,help='steps of contrastive divergence') + parser.add_argument('-nC',type=float,default=10,help='number of chains in PCD') + # Parse the arguments args = parser.parse_args() - + if args.command == 'train': train(args) - + if args.command == 'sample': sample(args) @@ -43,7 +43,7 @@ class Ops(object): pass def train(args): - + # Simulation parameters num_visible = args.nV # number of visible nodes num_hidden = args.nH # number of hidden nodes @@ -57,32 +57,32 @@ def train(args): hidden_bias=None # hidden bias bcount=0 # counter epochs_done=1 # epochs counter - + # ************************************************************* # INSERT HERE THE PATH TO THE TRAINING AND TESTING DATASETS trainName = '*******************' testName = '*******************' - + # Loading the data xtrain = np.loadtxt(trainName) xtest = np.loadtxt(testName) - + ept=np.random.permutation(xtrain) # random permutation of training data epv=np.random.permutation(xtest) # random permutation of test data iterations_per_epoch = xtrain.shape[0] / bsize # gradient iteration per epoch # Initialize RBM class - rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples) - + rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples) + # Initialize operations and placeholders classes ops = Ops() placeholders = Placeholders() placeholders.visible_samples = tf.placeholder(tf.float32, shape=(None, num_visible), name='v') # placeholder for training data - total_iterations = 0 # starts at zero + total_iterations = 0 # starts at zero ops.global_step = tf.Variable(total_iterations, name='global_step_count', trainable=False) - + # Decaying learning rate learning_rate = tf.train.exponential_decay( learning_rate_b, @@ -98,10 +98,10 @@ def train(args): ops.lr=learning_rate ops.train = optimizer.minimize(cost, global_step=ops.global_step) ops.init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) - + with tf.Session() as sess: sess.run(ops.init) - + for ii in range(nsteps): if bcount*bsize+ bsize>=xtrain.shape[0]: bcount=0 @@ -110,7 +110,7 @@ def train(args): batch=ept[ bcount*bsize: bcount*bsize+ bsize,:] bcount=bcount+1 feed_dict = {placeholders.visible_samples: batch} - + _, num_steps = sess.run([ops.train, ops.global_step], feed_dict=feed_dict) if num_steps % iterations_per_epoch == 0: @@ -119,26 +119,26 @@ def train(args): epochs_done += 1 def sample(args): - + num_visible = args.nV # number of visible nodes num_hidden = args.nH # number of hidden nodes - + # ************************************************************* # INSERT HERE THE PATH TO THE PARAMETERS FILE path_to_params = '*******************' - - # Load the RBM parameters + + # Load the RBM parameters params = np.load(path_to_params) weights = params['weights'] visible_bias = params['visible_bias'] hidden_bias = params['hidden_bias'] hidden_bias=np.reshape(hidden_bias,(hidden_bias.shape[0],1)) visible_bias=np.reshape(visible_bias,(visible_bias.shape[0],1)) - + # Sampling parameters num_samples=1000 # how many independent chains will be sampled gibb_updates=100 # how many gibbs updates per call to the gibbs sampler - nbins=1000 # number of calls to the RBM sampler + nbins=1000 # number of calls to the RBM sampler # Initialize RBM class rbm = RBM(num_hidden=num_hidden, num_visible=num_visible, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples) @@ -146,24 +146,24 @@ def sample(args): # Initialize tensorflow init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) - + with tf.Session() as sess: sess.run(init) - + for i in range(nbins): print ('bin %d\t' %i) - + # Gibbs sampling _,samples=sess.run([hsamples,vsamples]) def save_parameters(sess,rbm,epochs): weights, visible_bias, hidden_bias = sess.run([rbm.weights, rbm.visible_bias, rbm.hidden_bias]) - + # ************************************************************* # INSERT HERE THE PATH TO THE PARAMETERS FILE parameter_file_path = '*******************' - + np.savez_compressed(parameter_file_path, weights=weights, visible_bias=visible_bias, hidden_bias=hidden_bias, epochs=epochs) diff --git a/rbm.py b/rbm.py index 4ed04ac..58c102e 100644 --- a/rbm.py +++ b/rbm.py @@ -3,9 +3,9 @@ import numpy as np class RBM(object): - + ''' Restricted Boltzmann Machine ''' - + def __init__(self, num_hidden, num_visible, num_samples=128, weights=None, visible_bias=None, hidden_bias=None): ''' Constructor ''' # number of hidden units @@ -101,7 +101,7 @@ class variables. self.hidden_samples = self.hidden_samples.assign(h_samples) self.p_of_v = p_of_v return self.hidden_samples, v_samples - + def energy(self, hidden_samples, visible_samples): # type: (tf.Tensor, tf.Tensor) -> tf.Tensor """Compute the energy: @@ -123,8 +123,8 @@ def free_energy(self, visible_samples): free_energy = (tf.matmul(visible_samples, self.visible_bias) + tf.reduce_sum(tf.nn.softplus(tf.matmul(visible_samples, self.weights) + tf.transpose(self.hidden_bias)), 1, keep_dims=True)) - return free_energy - + return free_energy + def neg_log_likelihood_grad(self, visible_samples, model_samples=None, num_gibbs=2): # type: (tf.Tensor, tf.Tensor, int) -> tf.Tensor @@ -142,7 +142,7 @@ def neg_log_likelihood(self, visible_samples, log_Z): + tf.reduce_sum(tf.nn.softplus(tf.matmul(visible_samples, self.weights) + tf.transpose(self.hidden_bias)), 1)) return -tf.reduce_mean(free_energy - log_Z) - + def exact_log_partition_function(self): ''' Evaluate the partition function by exact enumerations ''' with tf.name_scope('exact_log_Z'): diff --git a/tutorial/ed_tfim1d.py b/tutorial/ed_tfim1d.py index a3bc1cd..600009f 100755 --- a/tutorial/ed_tfim1d.py +++ b/tutorial/ed_tfim1d.py @@ -6,7 +6,7 @@ Sx = np.array([[0,1.],[1,0]]) Sz = np.array([[1,0.],[0,-1]]) -# Pauli X operator +# Pauli X operator def sigmaX(L,i): ''' Return the many-body operator I x I x .. x Sx x I x .. x I @@ -34,11 +34,11 @@ def sigmaZ(L,i): return reduce(np.kron,OpList) -# Magnetic interaction term +# Magnetic interaction term def buildMagneticInteraction(i,B,L): return B*sigmaX(L,i) -# Ising interaction term +# Ising interaction term def buildIsingInteraction(i,j,J,L): ''' Return the Ising interaction term I x .. x Sz x Sz x .. x I @@ -56,19 +56,19 @@ def buildIsingInteraction(i,j,J,L): # Build transverse-field Ising model def build1dIsingModel(L,J,B,OBC): - + D = 1< \n') # write header - + # Spin-spin correlation file corr_Name = '../data/tfim1d/observables/ed_tfim1d_L' + str(L) + '_correlations.txt' corr_File = open(corr_Name,'w') - + # Loop over magnetic field values for b in range(1,Bsteps+1): - + B = Bmin + b*deltaB - print('Magnetic field B = %.2f' % B) + print('Magnetic field B = %.2f' % B) # Wavefunction file psiName = '../data/tfim1d/wavefunctions/wavefunction_tfim1d_L' + str(L) psiName += '_B' + str(B) + '.txt' - + # Diagonalize the Hamiltonian print('diagonalizing...') H = build1dIsingModel(L,J,B,True) (e,psi) = np.linalg.eigh(H) psi0 = np.abs(psi[:,0]) e0 = e[0] - + # Save energy and wavefunction obs_File.write('%.1f %.10f ' % (B,e0/float(L))) np.savetxt(psiName,psi0) - + # Magnetic observables print('computing observables...') # Compute <|Sz|> @@ -184,13 +184,13 @@ def main(pars): for j in range(L): for k in range(1<