Skip to content
This repository has been archived by the owner on Aug 18, 2020. It is now read-only.

Commit

Permalink
Merge pull request #84 from CStephenson970/decorator_fix
Browse files Browse the repository at this point in the history
Change dnnseparate models to run on CPU by default
  • Loading branch information
karllab41 authored Jun 21, 2017
2 parents 69cffd1 + b2e76f2 commit 255eb69
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 44 deletions.
34 changes: 18 additions & 16 deletions src/dnnseparate/DANmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
class DANModel:
def __init__(self, F=257, num_speakers=251,
layer_size=600, embedding_size=40,
nonlinearity='logistic',normalize=False):
nonlinearity='logistic',normalize=False,
device='/cpu:0'):
"""
Initializes a Deep Attractor Network[i]. Default architecture is the
same as for the Lab41 model and the deep clustering model.
Expand All @@ -23,6 +24,7 @@ def __init__(self, F=257, num_speakers=251,
nonlinearity: Nonlinearity to use in BLSTM layers (default logistic)
normalize: Do you normalize vectors coming into the final layer?
(default False)
device: Which device to run the model on
"""

self.F = F
Expand All @@ -34,28 +36,28 @@ def __init__(self, F=257, num_speakers=251,

self.graph = tf.Graph()
with self.graph.as_default():
with tf.device(device):
# Placeholder tensor for the magnitude spectrogram
self.S = tf.placeholder("float", [None, None, self.F])

# Placeholder tensor for the magnitude spectrogram
self.S = tf.placeholder("float", [None, None, self.F])
# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])

# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])
# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])

# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])
# Placeholder for the speaker indicies
self.I = tf.placeholder(tf.int32, [None,None])

# Placeholder for the speaker indicies
self.I = tf.placeholder(tf.int32, [None,None])

# Define the speaker vectors to use during training
self.speaker_vectors = tf_utils.weight_variable(
# Define the speaker vectors to use during training
self.speaker_vectors = tf_utils.weight_variable(
[self.num_speakers,self.embedding_size],
tf.sqrt(2/self.embedding_size))

# Model methods
self.network
self.cost
self.optimizer
# Model methods
self.network
self.cost
self.optimizer

# Saver
self.saver = tf.train.Saver()
Expand Down
33 changes: 18 additions & 15 deletions src/dnnseparate/L41model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
class L41Model:
def __init__(self, F=257, num_speakers=251,
layer_size=600, embedding_size=40,
nonlinearity='logistic',normalize=False):
nonlinearity='logistic',normalize=False,
device='/cpu:0'):
"""
Initializes Lab41's clustering model. Default architecture comes from
the parameters used by the best performing model in the paper[1].
Expand All @@ -21,8 +22,9 @@ def __init__(self, F=257, num_speakers=251,
layer_size: Size of BLSTM layers
embedding_size: Dimension of embedding vector
nonlinearity: Nonlinearity to use in BLSTM layers (default logistic)
normalize: Do you normalize vectors coming into the final layer?
normalize: Do you normalize vectors coming into the final layer?
(default False)
device: Which device to run the model on
"""

self.F = F
Expand All @@ -33,26 +35,27 @@ def __init__(self, F=257, num_speakers=251,
self.normalize = normalize

self.graph = tf.Graph()
with self.graph.as_default():

# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])
with self.graph.as_default():
with tf.device(device):
# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])

# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])
# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])

# Placeholder for the speaker indicies
self.I = tf.placeholder(tf.int32, [None,None])
# Placeholder for the speaker indicies
self.I = tf.placeholder(tf.int32, [None,None])

# Define the speaker vectors to use during training
self.speaker_vectors = tf_utils.weight_variable(
# Define the speaker vectors to use during training
self.speaker_vectors = tf_utils.weight_variable(
[self.num_speakers,self.embedding_size],
tf.sqrt(2/self.embedding_size))

# Model methods
self.network
self.cost
self.optimizer
# Model methods
self.network
self.cost
self.optimizer

# Saver
self.saver = tf.train.Saver()
Expand Down
22 changes: 12 additions & 10 deletions src/dnnseparate/deep_clustering_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
class DeepClusteringModel:
def __init__(self, F=257,
layer_size=600, embedding_size=40,
nonlinearity='logistic'):
nonlinearity='logistic',
device='/cpu:0'):
"""
Initializes the deep clustering model from [1]. Defaults correspond to
the parameters used by the best performing model in the paper.
Expand All @@ -21,6 +22,7 @@ def __init__(self, F=257,
layer_size: Size of BLSTM layers
embedding_size: Dimension of embedding vector
nonlinearity: Nonlinearity to use in BLSTM layers
device: Which device to run the model on
"""

self.F = F
Expand All @@ -30,17 +32,17 @@ def __init__(self, F=257,

self.graph = tf.Graph()
with self.graph.as_default():
with tf.device(device):
# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])

# Placeholder tensor for the input data
self.X = tf.placeholder("float", [None, None, self.F])
# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])

# Placeholder tensor for the labels/targets
self.y = tf.placeholder("float", [None, None, self.F, None])

# Model methods
self.network
self.cost
self.optimizer
# Model methods
self.network
self.cost
self.optimizer

# Saver
self.saver = tf.train.Saver()
Expand Down
5 changes: 2 additions & 3 deletions src/utils/tf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ def scope_decorator(function):
@functools.wraps(function)
def decorator(self):
if not hasattr(self,attribute):
with tf.device("/cpu:0"):
with tf.variable_scope(name):
setattr(self,attribute,function(self))
with tf.variable_scope(name):
setattr(self,attribute,function(self))
return getattr(self,attribute)

return decorator
Expand Down

0 comments on commit 255eb69

Please sign in to comment.