diff --git a/api/src/enums/metric.py b/api/src/enums/metric.py index 1779afaf..ea1626e6 100644 --- a/api/src/enums/metric.py +++ b/api/src/enums/metric.py @@ -1,10 +1,12 @@ from enum import Enum class Metric(Enum): - SERVER_AVERAGE_CPU = 'server_avg_cpu' + SERVER_AVERAGE_CPU_CORES = 'server_avg_cpu_cores' SERVER_AVERAGE_MEMORY = 'server_avg_memory' - CLIENT_AVERAGE_CPU = 'client_avg_cpu' + SERVER_AVERAGE_CPU = 'server_avg_cpu' + CLIENT_AVERAGE_CPU_CORES = 'client_avg_cpu_cores' CLIENT_AVERAGE_MEMORY = 'client_avg_memory' + CLIENT_AVERAGE_CPU = 'client_avg_cpu' ERROR_RATE = 'error_rate' BYTES_THROUGHPUT_PER_SECOND = 'bytes_throughput_per_sec' MESSAGES_THROUGHPUT_PER_SECOND = 'msg_throughput_per_sec' diff --git a/api/src/services/cadvisor_service.py b/api/src/services/cadvisor_service.py index 77383386..99016a7a 100644 --- a/api/src/services/cadvisor_service.py +++ b/api/src/services/cadvisor_service.py @@ -56,9 +56,10 @@ def get_metrics(metrics_url): cur = stats[i] prev = stats[i - 1] interval_ns = __get_interval(cur['timestamp'], prev['timestamp']) - cpu_val = (cur['cpu']['usage']['total'] - prev['cpu']['usage']['total']) / interval_ns + cpu_cores_val = (cur['cpu']['usage']['total'] - prev['cpu']['usage']['total']) / interval_ns + cpu_val = cpu_cores_val / len(cur['cpu']['usage']['per_cpu_usage']) memory_val = cur['memory']['usage'] / ONE_MEGABYTE - data[cur['timestamp']] = {"cpu": cpu_val, "memory": memory_val} + data[cur['timestamp']] = {"cpu_cores": cpu_cores_val, "cpu": cpu_val, "memory": memory_val} return data diff --git a/api/src/services/metrics_service.py b/api/src/services/metrics_service.py index 7ca167bf..a7501a4a 100644 --- a/api/src/services/metrics_service.py +++ b/api/src/services/metrics_service.py @@ -1,5 +1,3 @@ -import logging - from flask import current_app from src.models.test_run_metric import TestRunMetric from src.enums.metric import Metric @@ -8,16 +6,17 @@ def create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time): - __save_resources_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run) - __save_resources_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run) + __save_resources_metrics(Metric.CLIENT_AVERAGE_CPU_CORES, Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run) + __save_resources_metrics(Metric.SERVER_AVERAGE_CPU_CORES, Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run) __save_throughput_metrics(Metric.MESSAGES_THROUGHPUT_PER_SECOND, Metric.BYTES_THROUGHPUT_PER_SECOND, start_time, end_time, requests_size, test_run) -def __save_resources_metrics(cpu_metric_name, memory_metric_name, metrics, test_run): - cpu, memory = __calculate_average(metrics, test_run.start_time) - __save_metric_to_db(test_run, cpu_metric_name, cpu) +def __save_resources_metrics(cpu_cores_metric_name, cpu_metric_name, memory_metric_name, metrics, test_run): + cpu_cores, cpu, memory = __calculate_average_usage(metrics, test_run.start_time) + __save_metric_to_db(test_run, cpu_cores_metric_name, cpu_cores) __save_metric_to_db(test_run, memory_metric_name, memory) + __save_metric_to_db(test_run, cpu_metric_name, cpu) def __save_throughput_metrics(requests_metric_name, bytes_metric_name, start_time, end_time, requests_size, test_run): @@ -35,23 +34,24 @@ def __save_metric_to_db(test_run, metric_name, metric_value): current_app.database_manager.create(test_run_metric) -def __calculate_average(metrics, start_time): - cpu, memory = 0, 0 +def __calculate_average_usage(metrics, start_time): + cpu_cores, cpu, memory = 0, 0, 0 counter = 0 for ts, value in metrics.items(): if parser.parse(ts) >= start_time.astimezone(pytz.UTC): + cpu_cores += value["cpu_cores"] cpu += value["cpu"] memory += value["memory"] counter += 1 if counter == 0: - return 0, 0 - return round(cpu/counter, 2), round(memory/counter, 0) + return 0, 0, 0 + return round(cpu_cores/counter, 2), round(cpu/counter, 2), round(memory/counter, 0) def __calculate_throughput(iterations, start_time, end_time, requests_size): seconds = (end_time - start_time).total_seconds() request_throughput = 0 if seconds == 0 else iterations / seconds bytes_throughput = 0 if seconds == 0 or requests_size is None else int(requests_size) / seconds - return round(request_throughput, 0), round(bytes_throughput, 0) + return round(request_throughput, 2), round(bytes_throughput, 0) diff --git a/api/src/utils/test_suite_serializer.py b/api/src/utils/test_suite_serializer.py index 7168049a..4d5d40fb 100644 --- a/api/src/utils/test_suite_serializer.py +++ b/api/src/utils/test_suite_serializer.py @@ -30,7 +30,7 @@ def __get_test_runs_metrics(test_runs): test_runs_list = [] for test_run in test_runs: metrics = test_run.test_run_metrics - cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics) + cpu_cores_avg, cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics) request_throughput, bytes_throughput = get_throughput_metrics(metrics) results = { "id": test_run.id, @@ -38,6 +38,7 @@ def __get_test_runs_metrics(test_runs): "iterations": test_run.iterations, "message_size": test_run.message_size, "results": { + "average_cpu_cores": round(cpu_cores_avg, 2), "average_cpu": round(cpu_avg, 2), "average_memory": int(memory_avg), "request_throughput": int(request_throughput), @@ -49,15 +50,17 @@ def __get_test_runs_metrics(test_runs): def __calculate_cpu_memory_avg(test_run_metrics): - cpu_avg, memory_avg = 0.00, 0 + cpu_cores_avg, cpu_avg, memory_avg = 0.00, 0.00, 0 for metric in test_run_metrics: - if metric.metric_name in (Metric.CLIENT_AVERAGE_CPU, Metric.SERVER_AVERAGE_CPU): - cpu_avg += metric.value + if metric.metric_name in (Metric.CLIENT_AVERAGE_CPU_CORES, Metric.SERVER_AVERAGE_CPU_CORES): + cpu_cores_avg += metric.value elif metric.metric_name in (Metric.CLIENT_AVERAGE_MEMORY, Metric.SERVER_AVERAGE_MEMORY): memory_avg += metric.value + elif metric.metric_name in (Metric.CLIENT_AVERAGE_CPU, Metric.SERVER_AVERAGE_CPU): + cpu_avg += metric.value - return cpu_avg, memory_avg + return cpu_cores_avg, cpu_avg, memory_avg def get_throughput_metrics(test_run_metrics): @@ -65,4 +68,4 @@ def get_throughput_metrics(test_run_metrics): def __find_metric(test_run_metrics, metric_name): - return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0) \ No newline at end of file + return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0) diff --git a/api/tests/test_analyze_api.py b/api/tests/test_analyze_api.py index b9b3657a..78cdae8e 100644 --- a/api/tests/test_analyze_api.py +++ b/api/tests/test_analyze_api.py @@ -19,8 +19,8 @@ GET_REQUEST = 'requests.get' INVALID_DATA_PROVIDED = "Invalid data provided" -client_metrics = {str(datetime.now() + timedelta(seconds=30)) + "123Z":{"cpu":3.6, "memory":254}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":3.8, "memory":234}} -server_metrics = {str(datetime.now() + timedelta(seconds=30))+ "123Z":{"cpu":2.3, "memory":154}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":2.7, "memory":156}} +client_metrics = {str(datetime.now() + timedelta(seconds=30)) + "123Z":{"cpu_cores":3.6, "cpu":10, "memory":254}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu_cores":3.8, "cpu":11, "memory":234}} +server_metrics = {str(datetime.now() + timedelta(seconds=30))+ "123Z":{"cpu_cores":2.3, "cpu":7.5, "memory":154}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu_cores":2.7, "cpu":8.5, "memory":156}} metrics = [client_metrics, server_metrics] @@ -65,24 +65,28 @@ def test_analyze(self, mock_parser, mock_start_collecting, mock_stop_collecting, content_type=CONTENT_TYPE) - self.assertEqual(self.app.database_manager.create.call_count, 15)# 1 for the test suite, and 2 for test runs and 6*2(12) for test run metrics + self.assertEqual(self.app.database_manager.create.call_count, 19)# 1 for the test suite, and 2 for test runs and 8*2(16) for test run metrics db_call = self.app.database_manager.create.call_args_list - self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU) + self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU_CORES) self.assertEqual(db_call[2].args[0].value, 3.7) self.assertEqual(db_call[3].args[0].metric_name, Metric.CLIENT_AVERAGE_MEMORY) self.assertEqual(db_call[3].args[0].value, 244.0) - self.assertEqual(db_call[4].args[0].metric_name, Metric.SERVER_AVERAGE_CPU) - self.assertEqual(db_call[4].args[0].value, 2.5) - self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY) - self.assertEqual(db_call[5].args[0].value, 155.0) - self.assertEqual(db_call[6].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) - self.assertEqual(db_call[6].args[0].value, 8.0) - self.assertEqual(db_call[7].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) - self.assertEqual(db_call[7].args[0].value, 83.0) - self.assertEqual(db_call[13].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) - self.assertEqual(db_call[13].args[0].value, 33.0) - self.assertEqual(db_call[14].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) - self.assertEqual(db_call[14].args[0].value, 167.0) + self.assertEqual(db_call[4].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU) + self.assertEqual(db_call[4].args[0].value, 10.5) + self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_CPU_CORES) + self.assertEqual(db_call[5].args[0].value, 2.5) + self.assertEqual(db_call[6].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY) + self.assertEqual(db_call[6].args[0].value, 155.0) + self.assertEqual(db_call[7].args[0].metric_name, Metric.SERVER_AVERAGE_CPU) + self.assertEqual(db_call[7].args[0].value, 8.0) + self.assertEqual(db_call[8].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[8].args[0].value, 8.0) + self.assertEqual(db_call[9].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[9].args[0].value, 83.0) + self.assertEqual(db_call[17].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[17].args[0].value, 33.0) + self.assertEqual(db_call[18].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[18].args[0].value, 167.0) self.assertEqual(response.status_code, 200) # Check the response content diff --git a/api/tests/test_metrics_collection_manager.py b/api/tests/test_metrics_collection_manager.py index bf18297c..f1ede1be 100644 --- a/api/tests/test_metrics_collection_manager.py +++ b/api/tests/test_metrics_collection_manager.py @@ -17,10 +17,10 @@ POST_REQUEST = 'requests.post' -docker_response = { "docker":{ "id": "id", "name": "/docker", "aliases": [ "docker", "docker" ], "namespace": "docker", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }} -k8s_response = { "id": "id", "name": "/kubepods.slice/xxx", "aliases": [ "a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67", "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9422c26d_d44c_4f7c_9901_b05c2d0d908d.slice/cri-containerd-a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67.scope" ], "namespace": "containerd", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] } -expected_curl_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu': 4.073167074816332e-06, 'memory': 36.6484375}} -expected_nginx_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu': 4.073167074816332e-06, 'memory': 36.6484375}} +docker_response = { "docker": { "id": "id", "name": "/docker", "aliases": [ "docker", "docker" ], "namespace": "docker", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] }} +k8s_response = { "id": "id", "name": "/kubepods.slice/xxx", "aliases": [ "a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67", "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9422c26d_d44c_4f7c_9901_b05c2d0d908d.slice/cri-containerd-a57b1eb676f6d93426d58ed45e063f76f67d23d1f42bf543d8e851af952d5a67.scope" ], "namespace": "containerd", "spec": { "creation_time": "2023-12-15T01:08:18.235177452Z", "labels": { "io.cri-containerd.kind": "container", "io.kubernetes.container.name": "curl", "io.kubernetes.pod.name": "qujata-curl-5565f95dbc-wf4dt", "io.kubernetes.pod.namespace": "qujata", "io.kubernetes.pod.uid": "9422c26d-d44c-4f7c-9901-b05c2d0d908d" }, "has_cpu": True, "cpu": { "limit": 2, "max_limit": 0, "mask": "0-3", "period": 100000 }, "has_memory": True, "memory": { "limit": 18446744073709551615, "swap_limit": 18446744073709551615 }, "has_hugetlb": False, "has_network": False, "has_processes": True, "processes": { "limit": 19178 }, "has_filesystem": False, "has_diskio": True, "has_custom_metrics": False, "image": "docker.io/qujata/curl:1.0.0" }, "stats": [ { "timestamp": "2023-12-25T21:07:14.471924967Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:18.546007469Z", "cpu": { "usage": { "total": 275599403000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932396000, "system": 81667006000 } }, "memory": { "usage": 38428672 } }, { "timestamp": "2023-12-25T21:07:58.564316069Z", "cpu": { "usage": { "total": 275599566000, "per_cpu_usage": [1, 1, 1, 1 ,1, 1], "user": 193932511000, "system": 81667054000 }, "load_average": 0 }, "memory": { "usage": 38428672 } } ] } +expected_curl_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu_cores': 0.0, 'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu_cores': 4.073167074816332e-06, 'cpu': 6.788611791360554e-07, 'memory': 36.6484375}} +expected_nginx_metrics_collector_data = {'2023-12-25T21:07:18.546007469Z': {'cpu_cores': 0.0, 'cpu': 0.0, 'memory': 36.6484375}, '2023-12-25T21:07:58.564316069Z': {'cpu_cores': 4.073167074816332e-06, 'cpu': 6.788611791360554e-07, 'memory': 36.6484375}} class TestMetricsService(unittest.TestCase): @@ -29,7 +29,6 @@ def setUp(self): self.client = self.app.test_client() load_config(self.app) self.app.database_manager = Mock(spec=DatabaseManager) - def test_collecting_docker(self): @@ -286,8 +285,6 @@ def test_collecting_k8s_when_cadvisor_pod_not_found(self, mock_log): self.assertEqual(mock_post.call_count, 0) self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)") self.assertEqual(str(mock_log.call_args_list[1]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)") - - if __name__ == '__main__': diff --git a/api/tests/test_tests_api.py b/api/tests/test_tests_api.py index 3b11103d..eff34df8 100644 --- a/api/tests/test_tests_api.py +++ b/api/tests/test_tests_api.py @@ -57,9 +57,11 @@ def __test_run(self): ) def __test_run_metrics(self): - return [TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_CPU, value=3), + return [TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_CPU_CORES, value=3), + TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_CPU, value=0.3), TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_MEMORY, value=5), - TestRunMetric(metric_name=Metric.SERVER_AVERAGE_CPU, value=6), + TestRunMetric(metric_name=Metric.SERVER_AVERAGE_CPU_CORES, value=6), + TestRunMetric(metric_name=Metric.SERVER_AVERAGE_CPU, value=0.5), TestRunMetric(metric_name=Metric.SERVER_AVERAGE_MEMORY, value=9), TestRunMetric(metric_name=Metric.MESSAGES_THROUGHPUT_PER_SECOND, value=50), TestRunMetric(metric_name=Metric.BYTES_THROUGHPUT_PER_SECOND, value=4500)] @@ -75,7 +77,7 @@ def test_get_test_suite(self): self.app.database_manager.get_by_id.return_value = test_suite response = self.client.get(TEST_SUITES_GET_URL) result = json.loads(response.data) - expected = {'code_release': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpu_architecture': None, 'cpu_clock_speed': None, 'cpu_cores': None, 'node_size': None, 'operating_system': None, 'resource_name': None}, 'id': None, 'name': 'name', 'start_time': None, 'test_runs': [{'algorithm': None, 'id': 1, 'iterations': None, 'message_size': None, 'results': {'average_cpu': 9.0, 'average_memory': 14, 'request_throughput': 50, 'bytes_throughput': 4500}}]} + expected = {'code_release': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpu_architecture': None, 'cpu_clock_speed': None, 'cpu_cores': None, 'node_size': None, 'operating_system': None, 'resource_name': None}, 'id': None, 'name': 'name', 'start_time': None, 'test_runs': [{'algorithm': None, 'id': 1, 'iterations': None, 'message_size': None, 'results': {'average_cpu': 0.8, 'average_cpu_cores': 9.0, 'average_memory': 14, 'request_throughput': 50, 'bytes_throughput': 4500}}]} self.assertEqual(result, expected) def test_get_test_suite_return_not_found(self): diff --git a/mysql/scripts/init-db.sql b/mysql/scripts/init-db.sql index 6ed5656e..5d3562c6 100644 --- a/mysql/scripts/init-db.sql +++ b/mysql/scripts/init-db.sql @@ -49,7 +49,7 @@ CREATE TABLE IF NOT EXISTS test_runs ( CREATE TABLE IF NOT EXISTS test_run_metrics ( test_run_id INT, - metric_name ENUM('server_avg_cpu', 'server_avg_memory', 'client_avg_cpu', 'client_avg_memory', 'error_rate', 'bytes_throughput_per_sec', 'msg_throughput_per_sec', 'avg_tls_handshake_time'), + metric_name ENUM('server_avg_cpu_cores', 'server_avg_memory', 'server_avg_cpu', 'client_avg_cpu_cores', 'client_avg_memory', 'client_avg_cpu', 'error_rate', 'bytes_throughput_per_sec', 'msg_throughput_per_sec', 'avg_tls_handshake_time'), value DOUBLE, PRIMARY KEY (test_run_id, metric_name), FOREIGN KEY (test_run_id) REFERENCES test_runs(id) ON DELETE CASCADE diff --git a/run/docker/docker-compose.yml b/run/docker/docker-compose.yml index b45e8d3d..55af9134 100644 --- a/run/docker/docker-compose.yml +++ b/run/docker/docker-compose.yml @@ -6,7 +6,7 @@ services: - "9091:9091" mysql: - image: qujata/mysql:1.1.0 + image: qujata/mysql:1.2.0 container_name: qujata-mysql environment: MYSQL_ROOT_PASSWORD: qujata