diff --git a/official/vision/evaluation/analyze_model.py b/official/vision/evaluation/analyze_model.py new file mode 100644 index 00000000..34fdaca0 --- /dev/null +++ b/official/vision/evaluation/analyze_model.py @@ -0,0 +1,55 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Metrics involving the Model""" + +import numpy as np +import tensorflow as tf + +class ModelAnalysis: + """Computes metrics including the number of parameters for a model, the FLOPS (Floating Point Operations per Second) + and activations""" + + def __init__(self, model): + """ + Args: + model: A reference to a tf.keras model + """ + self.model = model + + def get_parameters(self): + """ + + Returns: a number corresponding to the parameter count of trainable and un-trainable variables within a model + + """ + return self.model.count_params() + + def get_num_activations(self, model): + """ + Args: + model: A reference to a tf.keras model + Returns: a number corresponding to the number of activations of the layers within the model + """ + total_activations = 0 + + # Iterates through each layer + for layer in model.layers: + # Checks if the layer is not a flattening or dense layer + if len(layer.output_shape) == 4: + print(layer.output_shape) + total_activations += (layer.output_shape[1] * layer.output_shape[2] * layer.output_shape[3]) + + return total_activations + diff --git a/official/vision/evaluation/analyze_params_test.py b/official/vision/evaluation/analyze_params_test.py new file mode 100644 index 00000000..34d768fc --- /dev/null +++ b/official/vision/evaluation/analyze_params_test.py @@ -0,0 +1,42 @@ +import tensorflow as tf +import tensorflow.keras as ks +from official.vision.evaluation import analyze_model + +class paramaterTesting(tf.test.TestCase): + def __init__(self): + self = self + def test_model1(self): + #Tensorflow Model 1 + input_shape = (28, 28, 1) + tfModel = ks.Sequential() + tfModel.add(ks.layers.Conv2D(28, kernel_size=(3,3), input_shape=input_shape)) + tfModel.add(ks.layers.MaxPooling2D(pool_size=(2, 2))) + tfModel.add(ks.layers.Flatten()) # Flattening the 2D arrays for fully connected layers + tfModel.add(ks.layers.Dense(128, activation=tf.nn.relu)) + tfModel.add(ks.layers.Dropout(0.2)) + tfModel.add(ks.layers.Dense(10,activation=tf.nn.softmax)) + ma = analyze_model.ModelAnalysis(tfModel) + result = ma.get_parameters() + expected_result = 600000 + self.assertAllClose(expected_result, result, atol=1e4) + #Pytorch Model that the Tensorflow Model is tested against + # class NeuralNet(nn.Module): + # def __init__(self): + # super(NeuralNet, self).__init__() + # self.conv = nn.Conv2d(1, 28, kernel_size=3) + # self.pool = nn.MaxPool2d(2) + # self.hidden= nn.Linear(28*13*13, 128) + # self.drop = nn.Dropout(0.2) + # self.out = nn.Linear(128, 10) + # self.act = nn.ReLU() + # def forward(self, x): + # x = self.act(self.conv(x)) # [batch_size, 28, 26, 26] + # x = self.pool(x) # [batch_size, 28, 13, 13] + # x = x.view(x.size(0), -1) # [batch_size, 28*13*13=4732] + # x = self.act(self.hidden(x)) # [batch_size, 128] + # x = self.drop(x) + # x = self.out(x) # [batch_size, 10] + # return x + # pytorchModel = NeuralNet() +pt = paramaterTesting() +pt.test_model1() \ No newline at end of file diff --git a/official/vision/evaluation/iou.py b/official/vision/evaluation/iou.py index 1dabd4af..b0616b31 100644 --- a/official/vision/evaluation/iou.py +++ b/official/vision/evaluation/iou.py @@ -119,6 +119,10 @@ def result(self): return tf.math.divide_no_nan(true_positives, denominator) + def get_miou(self): + """Compute the mean intersection-over-union for all classes""" + return self.result().numpy().mean() + def reset_states(self): tf.keras.backend.set_value( self.total_cm, np.zeros((self.num_classes, self.num_classes))) diff --git a/official/vision/evaluation/panoptic_quality_evaluator.py b/official/vision/evaluation/panoptic_quality_evaluator.py index ebaaa6ce..c95e860e 100644 --- a/official/vision/evaluation/panoptic_quality_evaluator.py +++ b/official/vision/evaluation/panoptic_quality_evaluator.py @@ -93,13 +93,27 @@ def reset_states(self): self._pq_metric_module.reset() def result(self): - """Evaluates detection results, and reset_states.""" + """Evaluates detection results""" results = self._pq_metric_module.result(self._is_thing) - self.reset_states() return results + def overall_pq(self): + """Evaluates overall_pq""" + results = self._pq_metric_module.result(self._is_thing) + return results["All_pq"] + + def overall_sq(self): + """Evaluates overall_sq""" + results = self._pq_metric_module.result(self._is_thing) + return results["All_sq"] + + def overall_rq(self): + """Evaluates overall_rq""" + results = self._pq_metric_module.result(self._is_thing) + return results["All_rq"] + def _convert_to_numpy(self, groundtruths, predictions): - """Converts tesnors to numpy arrays.""" + """Converts tensors to numpy arrays.""" if groundtruths: labels = tf.nest.map_structure(lambda x: x.numpy(), groundtruths) numpy_groundtruths = {} diff --git a/official/vision/evaluation/panoptic_quality_evaluator_test.py b/official/vision/evaluation/panoptic_quality_evaluator_test.py index b9d1454d..a836b162 100644 --- a/official/vision/evaluation/panoptic_quality_evaluator_test.py +++ b/official/vision/evaluation/panoptic_quality_evaluator_test.py @@ -89,6 +89,9 @@ def test_multiple_batches(self): self.assertAlmostEqual(results['All_pq'], 0.63177083) self.assertAlmostEqual(results['All_rq'], 0.75) self.assertAlmostEqual(results['All_sq'], 0.84236111) + self.assertAlmostEqual(pq_evaluator.overall_pq(), 0.63177083) + self.assertAlmostEqual(pq_evaluator.overall_rq(), 0.75) + self.assertAlmostEqual(pq_evaluator.overall_sq(), 0.84236111) self.assertEqual(results['All_num_categories'], 1)