Skip to content

Keep partial end batch #88

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions code/DBN.py
Original file line number Diff line number Diff line change
@@ -158,6 +158,7 @@ def pretraining_functions(self, train_set_x, batch_size, k):

# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_batches = int(numpy.ceil(n_batches / float(batch_size)))
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
@@ -211,9 +212,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):

# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

index = T.lscalar('index') # index to a [mini]batch

6 changes: 3 additions & 3 deletions code/SdA.py
Original file line number Diff line number Diff line change
@@ -254,9 +254,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):

# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

index = T.lscalar('index') # index to a [mini]batch

@@ -357,7 +357,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# numpy random generator
# start-snippet-3
3 changes: 2 additions & 1 deletion code/cA.py
Original file line number Diff line number Diff line change
@@ -246,7 +246,8 @@ def test_cA(learning_rate=0.01, training_epochs=20,
train_set_x, train_set_y = datasets[0]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
6 changes: 3 additions & 3 deletions code/convolutional_mlp.py
Original file line number Diff line number Diff line change
@@ -142,9 +142,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
3 changes: 2 additions & 1 deletion code/dA.py
Original file line number Diff line number Diff line change
@@ -280,7 +280,8 @@ def test_dA(learning_rate=0.1, training_epochs=15,
train_set_x, train_set_y = datasets[0]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# start-snippet-2
# allocate symbolic variables for the data
9 changes: 6 additions & 3 deletions code/logistic_cg.py
Original file line number Diff line number Diff line change
@@ -162,9 +162,12 @@ def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'):

batch_size = 600 # size of the minibatch

n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

n_in = 28 * 28 # number of input units
n_out = 10 # number of output units
9 changes: 6 additions & 3 deletions code/logistic_sgd.py
Original file line number Diff line number Diff line change
@@ -273,9 +273,12 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

######################
# BUILD ACTUAL MODEL #
9 changes: 6 additions & 3 deletions code/mlp.py
Original file line number Diff line number Diff line change
@@ -228,9 +228,12 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

######################
# BUILD ACTUAL MODEL #
3 changes: 2 additions & 1 deletion code/rbm.py
Original file line number Diff line number Diff line change
@@ -384,7 +384,8 @@ def test_rbm(learning_rate=0.1, training_epochs=15,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch