22
33class BinaryBinaryRBM :
44 def __init__ (self , n_visible , n_hidden , learning_rate = 0.1 ):
5- """
6- Initialize the RBM with given parameters.
7- """
5+ # Initialize the RBM with given parameters.
86 self .n_visible = n_visible
97 self .n_hidden = n_hidden
108 self .learning_rate = learning_rate
11-
129 # Initialize weights and biases
1310 self .weights = np .random .normal (0 , 0.01 , (n_visible , n_hidden )) # Weights
1411 self .visible_bias = np .zeros (n_visible ) # Bias for visible units
1512 self .hidden_bias = np .zeros (n_hidden ) # Bias for hidden units
1613
1714 def sigmoid (self , x ):
18- """
19- Sigmoid activation function.
20- """
15+ # Sigmoid activation function.
2116 return 1 / (1 + np .exp (- x ))
2217
2318 def sample_hidden (self , v ):
24- """
25- Sample hidden units given visible units.
26- """
19+ # Sample hidden units given visible units.
2720 activation = np .dot (v , self .weights ) + self .hidden_bias
2821 prob_h = self .sigmoid (activation )
2922 return np .random .binomial (n = 1 , p = prob_h ), prob_h
3023
3124 def sample_visible (self , h ):
32- """
33- Sample visible units given hidden units.
34- """
25+ # Sample visible units given hidden units.
3526 activation = np .dot (h , self .weights .T ) + self .visible_bias
3627 prob_v = self .sigmoid (activation )
3728 return np .random .binomial (n = 1 , p = prob_v ), prob_v
3829
3930 def contrastive_divergence (self , v0 , k = 1 ):
40- """
41- Contrastive Divergence (CD-k) algorithm for training the RBM.
42- """
31+ # Contrastive Divergence (CD-k) algorithm for training the RBM, Gibbs sampling
4332 # Positive phase
4433 h0 , prob_h0 = self .sample_hidden (v0 )
4534 pos_associations = np .outer (v0 , prob_h0 )
46-
4735 # Gibbs Sampling (k steps)
4836 v_k = v0
4937 for _ in range (k ):
5038 h_k , _ = self .sample_hidden (v_k )
5139 v_k , _ = self .sample_visible (h_k )
52-
5340 # Negative phase
5441 h_k , prob_h_k = self .sample_hidden (v_k )
5542 neg_associations = np .outer (v_k , prob_h_k )
56-
5743 # Update weights and biases
5844 self .weights += self .learning_rate * (pos_associations - neg_associations )
5945 self .visible_bias += self .learning_rate * (v0 - v_k )
6046 self .hidden_bias += self .learning_rate * (prob_h0 - prob_h_k )
6147
6248 def train (self , data , epochs = 1000 , batch_size = 10 ):
63- """
64- Train the RBM using mini-batch gradient descent.
65- """
49+ # Train the RBM using mini-batch gradient descent.
6650 n_samples = data .shape [0 ]
6751 for epoch in range (epochs ):
6852 np .random .shuffle (data )
@@ -75,9 +59,7 @@ def train(self, data, epochs=1000, batch_size=10):
7559 print (f"Epoch { epoch + 1 } /{ epochs } - Reconstruction Error: { error :.4f} " )
7660
7761 def reconstruction_error (self , data ):
78- """
79- Compute reconstruction error for the dataset.
80- """
62+ # Compute reconstruction error for the dataset.
8163 error = 0
8264 for v in data :
8365 _ , prob_h = self .sample_hidden (v )
@@ -86,9 +68,7 @@ def reconstruction_error(self, data):
8668 return error / len (data )
8769
8870 def reconstruct (self , v ):
89- """
90- Reconstruct a visible vector after one pass through hidden units.
91- """
71+ # Reconstruct a visible vector after one pass through hidden units.
9272 _ , prob_h = self .sample_hidden (v )
9373 _ , prob_v = self .sample_visible (prob_h )
9474 return prob_v
0 commit comments