Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

new metric for percentage of cpu usage #128

Merged
merged 4 commits into from
Feb 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions api/src/enums/metric.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from enum import Enum

class Metric(Enum):
SERVER_AVERAGE_CPU = 'server_avg_cpu'
SERVER_AVERAGE_CPU_CORES = 'server_avg_cpu_cores'
SERVER_AVERAGE_MEMORY = 'server_avg_memory'
CLIENT_AVERAGE_CPU = 'client_avg_cpu'
SERVER_AVERAGE_CPU = 'server_avg_cpu'
CLIENT_AVERAGE_CPU_CORES = 'client_avg_cpu_cores'
CLIENT_AVERAGE_MEMORY = 'client_avg_memory'
CLIENT_AVERAGE_CPU = 'client_avg_cpu'
ERROR_RATE = 'error_rate'
BYTES_THROUGHPUT_PER_SECOND = 'bytes_throughput_per_sec'
MESSAGES_THROUGHPUT_PER_SECOND = 'msg_throughput_per_sec'
Expand Down
5 changes: 3 additions & 2 deletions api/src/services/cadvisor_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,10 @@ def get_metrics(metrics_url):
cur = stats[i]
prev = stats[i - 1]
interval_ns = __get_interval(cur['timestamp'], prev['timestamp'])
cpu_val = (cur['cpu']['usage']['total'] - prev['cpu']['usage']['total']) / interval_ns
cpu_cores_val = (cur['cpu']['usage']['total'] - prev['cpu']['usage']['total']) / interval_ns
cpu_val = cpu_cores_val / len(cur['cpu']['usage']['per_cpu_usage'])
memory_val = cur['memory']['usage'] / ONE_MEGABYTE
data[cur['timestamp']] = {"cpu": cpu_val, "memory": memory_val}
data[cur['timestamp']] = {"cpu_cores": cpu_cores_val, "cpu": cpu_val, "memory": memory_val}
return data


Expand Down
24 changes: 12 additions & 12 deletions api/src/services/metrics_service.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import logging

from flask import current_app
from src.models.test_run_metric import TestRunMetric
from src.enums.metric import Metric
Expand All @@ -8,16 +6,17 @@


def create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time):
__save_resources_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_resources_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)
__save_resources_metrics(Metric.CLIENT_AVERAGE_CPU_CORES, Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_resources_metrics(Metric.SERVER_AVERAGE_CPU_CORES, Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)
__save_throughput_metrics(Metric.MESSAGES_THROUGHPUT_PER_SECOND, Metric.BYTES_THROUGHPUT_PER_SECOND, start_time,
end_time, requests_size, test_run)


def __save_resources_metrics(cpu_metric_name, memory_metric_name, metrics, test_run):
cpu, memory = __calculate_average(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_metric_name, cpu)
def __save_resources_metrics(cpu_cores_metric_name, cpu_metric_name, memory_metric_name, metrics, test_run):
cpu_cores, cpu, memory = __calculate_average_usage(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_cores_metric_name, cpu_cores)
__save_metric_to_db(test_run, memory_metric_name, memory)
__save_metric_to_db(test_run, cpu_metric_name, cpu)


def __save_throughput_metrics(requests_metric_name, bytes_metric_name, start_time, end_time, requests_size, test_run):
Expand All @@ -35,23 +34,24 @@ def __save_metric_to_db(test_run, metric_name, metric_value):
current_app.database_manager.create(test_run_metric)


def __calculate_average(metrics, start_time):
cpu, memory = 0, 0
def __calculate_average_usage(metrics, start_time):
cpu_cores, cpu, memory = 0, 0, 0
counter = 0
for ts, value in metrics.items():
if parser.parse(ts) >= start_time.astimezone(pytz.UTC):
cpu_cores += value["cpu_cores"]
cpu += value["cpu"]
memory += value["memory"]
counter += 1

if counter == 0:
return 0, 0
return round(cpu/counter, 2), round(memory/counter, 0)
return 0, 0, 0
return round(cpu_cores/counter, 2), round(cpu/counter, 2), round(memory/counter, 0)


def __calculate_throughput(iterations, start_time, end_time, requests_size):
seconds = (end_time - start_time).total_seconds()
request_throughput = 0 if seconds == 0 else iterations / seconds
bytes_throughput = 0 if seconds == 0 or requests_size is None else int(requests_size) / seconds
return round(request_throughput, 0), round(bytes_throughput, 0)
return round(request_throughput, 2), round(bytes_throughput, 0)

15 changes: 9 additions & 6 deletions api/src/utils/test_suite_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@ def __get_test_runs_metrics(test_runs):
test_runs_list = []
for test_run in test_runs:
metrics = test_run.test_run_metrics
cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics)
cpu_cores_avg, cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics)
request_throughput, bytes_throughput = get_throughput_metrics(metrics)
results = {
"id": test_run.id,
"algorithm": test_run.algorithm,
"iterations": test_run.iterations,
"message_size": test_run.message_size,
"results": {
"average_cpu_cores": round(cpu_cores_avg, 2),
"average_cpu": round(cpu_avg, 2),
"average_memory": int(memory_avg),
"request_throughput": int(request_throughput),
Expand All @@ -49,20 +50,22 @@ def __get_test_runs_metrics(test_runs):


def __calculate_cpu_memory_avg(test_run_metrics):
cpu_avg, memory_avg = 0.00, 0
cpu_cores_avg, cpu_avg, memory_avg = 0.00, 0.00, 0

for metric in test_run_metrics:
if metric.metric_name in (Metric.CLIENT_AVERAGE_CPU, Metric.SERVER_AVERAGE_CPU):
cpu_avg += metric.value
if metric.metric_name in (Metric.CLIENT_AVERAGE_CPU_CORES, Metric.SERVER_AVERAGE_CPU_CORES):
cpu_cores_avg += metric.value
elif metric.metric_name in (Metric.CLIENT_AVERAGE_MEMORY, Metric.SERVER_AVERAGE_MEMORY):
memory_avg += metric.value
elif metric.metric_name in (Metric.CLIENT_AVERAGE_CPU, Metric.SERVER_AVERAGE_CPU):
cpu_avg += metric.value

return cpu_avg, memory_avg
return cpu_cores_avg, cpu_avg, memory_avg


def get_throughput_metrics(test_run_metrics):
return __find_metric(test_run_metrics, Metric.MESSAGES_THROUGHPUT_PER_SECOND), __find_metric(test_run_metrics, Metric.BYTES_THROUGHPUT_PER_SECOND)


def __find_metric(test_run_metrics, metric_name):
return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0)
return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0)
36 changes: 20 additions & 16 deletions api/tests/test_analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
GET_REQUEST = 'requests.get'
INVALID_DATA_PROVIDED = "Invalid data provided"

client_metrics = {str(datetime.now() + timedelta(seconds=30)) + "123Z":{"cpu":3.6, "memory":254}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":3.8, "memory":234}}
server_metrics = {str(datetime.now() + timedelta(seconds=30))+ "123Z":{"cpu":2.3, "memory":154}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":2.7, "memory":156}}
client_metrics = {str(datetime.now() + timedelta(seconds=30)) + "123Z":{"cpu_cores":3.6, "cpu":10, "memory":254}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu_cores":3.8, "cpu":11, "memory":234}}
server_metrics = {str(datetime.now() + timedelta(seconds=30))+ "123Z":{"cpu_cores":2.3, "cpu":7.5, "memory":154}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu_cores":2.7, "cpu":8.5, "memory":156}}
metrics = [client_metrics, server_metrics]


Expand Down Expand Up @@ -65,24 +65,28 @@ def test_analyze(self, mock_parser, mock_start_collecting, mock_stop_collecting,
content_type=CONTENT_TYPE)


self.assertEqual(self.app.database_manager.create.call_count, 15)# 1 for the test suite, and 2 for test runs and 6*2(12) for test run metrics
self.assertEqual(self.app.database_manager.create.call_count, 19)# 1 for the test suite, and 2 for test runs and 8*2(16) for test run metrics
db_call = self.app.database_manager.create.call_args_list
self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU)
self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU_CORES)
self.assertEqual(db_call[2].args[0].value, 3.7)
self.assertEqual(db_call[3].args[0].metric_name, Metric.CLIENT_AVERAGE_MEMORY)
self.assertEqual(db_call[3].args[0].value, 244.0)
self.assertEqual(db_call[4].args[0].metric_name, Metric.SERVER_AVERAGE_CPU)
self.assertEqual(db_call[4].args[0].value, 2.5)
self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY)
self.assertEqual(db_call[5].args[0].value, 155.0)
self.assertEqual(db_call[6].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[6].args[0].value, 8.0)
self.assertEqual(db_call[7].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[7].args[0].value, 83.0)
self.assertEqual(db_call[13].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[13].args[0].value, 33.0)
self.assertEqual(db_call[14].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[14].args[0].value, 167.0)
self.assertEqual(db_call[4].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU)
self.assertEqual(db_call[4].args[0].value, 10.5)
self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_CPU_CORES)
self.assertEqual(db_call[5].args[0].value, 2.5)
self.assertEqual(db_call[6].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY)
self.assertEqual(db_call[6].args[0].value, 155.0)
self.assertEqual(db_call[7].args[0].metric_name, Metric.SERVER_AVERAGE_CPU)
self.assertEqual(db_call[7].args[0].value, 8.0)
self.assertEqual(db_call[8].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[8].args[0].value, 8.0)
self.assertEqual(db_call[9].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[9].args[0].value, 83.0)
self.assertEqual(db_call[17].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[17].args[0].value, 33.0)
self.assertEqual(db_call[18].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND)
self.assertEqual(db_call[18].args[0].value, 167.0)

self.assertEqual(response.status_code, 200)
# Check the response content
Expand Down
11 changes: 4 additions & 7 deletions api/tests/test_metrics_collection_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
POST_REQUEST = 'requests.post'


docker_response = { "docker":{ "id": "id", "name": "/docker", "aliases": [ "docker", "docker" ], "namespace": "docker", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }}
k8s_response = { "id": "id", "name": "/kubepods.slice/xxx", "aliases": [ "a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67", "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9422c26d_d44c_4f7c_9901_b05c2d0d908d.slice/cri-containerd-a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67.scope" ], "namespace": "containerd", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }
expected_curl_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu': 4.073167074816332e-06, 'memory': 36.6484375}}
expected_nginx_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu': 4.073167074816332e-06, 'memory': 36.6484375}}
docker_response = { "docker": { "id": "id", "name": "/docker", "aliases": [ "docker", "docker" ], "namespace": "docker", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }}
k8s_response = { "id": "id", "name": "/kubepods.slice/xxx", "aliases": [ "a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67", "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9422c26d_d44c_4f7c_9901_b05c2d0d908d.slice/cri-containerd-a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67.scope" ], "namespace": "containerd", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }
expected_curl_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu_cores': 0.0, 'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu_cores': 4.073167074816332e-06, 'cpu': 6.788611791360554e-07, 'memory': 36.6484375}}
expected_nginx_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu_cores': 0.0, 'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu_cores': 4.073167074816332e-06, 'cpu': 6.788611791360554e-07, 'memory': 36.6484375}}

class TestMetricsService(unittest.TestCase):

Expand All @@ -29,7 +29,6 @@ def setUp(self):
self.client = self.app.test_client()
load_config(self.app)
self.app.database_manager = Mock(spec=DatabaseManager)



def test_collecting_docker(self):
Expand Down Expand Up @@ -286,8 +285,6 @@ def test_collecting_k8s_when_cadvisor_pod_not_found(self, mock_log):
self.assertEqual(mock_post.call_count, 0)
self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)")
self.assertEqual(str(mock_log.call_args_list[1]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)")




if __name__ == '__main__':
Expand Down
Loading
Loading