Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Commit

Permalink
Merge branch 'main' of https://github.com/att/qujata into api_prefix
Browse files Browse the repository at this point in the history
# Conflicts:
#	portal/mock-server/src/router.ts
#	portal/src/setupProxy.js
  • Loading branch information
iadibar committed Feb 14, 2024
2 parents 1ca6b0c + c6c470e commit 3ca0ddf
Show file tree
Hide file tree
Showing 158 changed files with 2,810 additions and 674 deletions.
40 changes: 40 additions & 0 deletions .github/workflows/docker-image.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: Build and Deploy to GitHub Pages

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]

jobs:
build-and-deploy:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Build Docker image
run: docker build ./portal --file ./portal/Dockerfile --tag my-website-image

- name: Run Docker container
run: |
docker run --name my-website-container -d my-website-image
# Wait a few seconds to ensure the web server inside the container is fully up and running
sleep 10
- name: Copy static content from Docker container
run: |
mkdir -p static-content
docker cp my-website-container:/usr/share/nginx/html/qujata ./static-content
- name: Stop and remove Docker container
run: |
docker stop my-website-container
docker rm my-website-container
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./static-content/qujata
1 change: 1 addition & 0 deletions api/.env
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ PROTOCOL=TLS 1.3
DEFAULT_GROUPS=prime256v1:secp384r1:frodo640aes:frodo640shake:frodo976aes:frodo976shake:frodo1344aes:frodo1344shake:kyber512:p256_kyber512:kyber768:p384_kyber768:x25519_kyber768:kyber1024:bikel1:bikel3:bikel5:hqc128:hqc192:hqc256
REQUEST_TIMEOUT=900
ITERATIONS_OPTIONS=100:500:1000:2000:5000:10000:50000
MESSAGE_SIZES_OPTIONS=0:1:2:100:1024:102400:204800:1048576:2097152:10485760
ENVIRONMENT=docker # value must be docker or kubernetes
LOG_LEVEL=INFO # value must be one of DEBUG, INFO, WARNING, ERROR, CRITICAL

Expand Down
5 changes: 4 additions & 1 deletion api/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,11 @@ python3 -m src.main
curl --location 'http://localhost:3020/qujata-api/analyze' \
--header 'Content-Type: application/json' \
--data '{
"experimentName": "name",
"description" : "test description",
"algorithms": ["kyber512"],
"iterationsCount": 5
"iterationsCount": [5],
"messageSizes": [10]
}'
```

Expand Down
5 changes: 4 additions & 1 deletion api/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ def load_config(app):
'request_timeout': os.environ.get('REQUEST_TIMEOUT', 3600),
'code_release': os.environ.get('CODE_RELEASE'),
'protocol': os.environ.get('PROTOCOL'),
'iterations_options': list(map(int, os.environ.get('ITERATIONS_OPTIONS', "100:500:1000:2000:5000:10000:50000").split(':')))
'iterations_options': list(map(int, os.environ.get('ITERATIONS_OPTIONS', "100:500:1000:2000:5000:10000:50000").split(':'))),
'message_sizes_options': list(map(int, os.environ.get('MESSAGE_SIZES_OPTIONS', "0:1:2:100:1024:102400:204800:1048576:2097152:10485760").split(':')))
})


Expand All @@ -31,6 +32,8 @@ def __init__(self, config_dict):
self.code_release = config_dict.get('code_release')
self.protocol = config_dict.get('protocol')
self.iterations_options = config_dict.get('iterations_options')
self.message_sizes_options = config_dict.get('message_sizes_options')


def __validate_environment(self, environment):
valid_environments = [e.value for e in Environment]
Expand Down
4 changes: 4 additions & 0 deletions api/src/api/analyze_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ def __validate(data):
for iterations in data['iterationsCount']:
if iterations <= 0:
raise ApiException('The number of iterations should be greater than 0', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
if 'messageSizes' in data:
for message_size in data['messageSizes']:
if message_size < 0:
raise ApiException('The message size should be greater than -1', INVALID_DATA_MESSAGE, HTTP_STATUS_BAD_REQUEST)
if process_is_running:
raise ApiException('The previous test is still running. Please try again in few minutes', 'Current test is still running', HTTP_STATUS_LOCKED)
for algorithm in data['algorithms']:
Expand Down
6 changes: 6 additions & 0 deletions api/src/api/configurations_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,9 @@ def get_algorithms():
def get_iterations_list():
return { "iterations": current_app.configurations.iterations_options }


@api.route('/message_sizes', methods=['GET'])
@cross_origin(origins=['*'], supports_credentials=True)
def get_message_sizes_list():
return { "message_sizes": current_app.configurations.message_sizes_options }

18 changes: 17 additions & 1 deletion api/src/api/tests_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,23 @@ def get_test_run(test_suite_id, test_run_id):
else:
return jsonify({'error': 'Not Found', 'message':'Test run with id: ' + str(test_run_id) +' and test suite id: '+ str(test_suite_id) +' not found'}), HTTP_STATUS_NOT_FOUND

@api.route('/test_suites/delete', methods=['POST'])
@cross_origin(origins=['*'], supports_credentials=True)
def delete_test_suites():
try:
data = request.get_json()
__validate_delete_test_suites(data)
test_suites_service.delete_test_suites(data['ids'])
return jsonify(), HTTP_STATUS_NO_CONTENT
except (ApiException) as e:
return jsonify({'error': e.error, 'message': e.message}), e.status_code


def __validate_update_test_suite(data):
if not data or 'name' not in data or 'description' not in data:
raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)
raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)


def __validate_delete_test_suites(data):
if not data or 'ids' not in data:
raise ApiException('Missing properties, required property: ids', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)
48 changes: 22 additions & 26 deletions api/src/services/analyze_service.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import os
import uuid
import time
import requests
import logging
Expand All @@ -8,14 +6,8 @@
from datetime import datetime, timedelta
from flask import jsonify, current_app
import src.services.test_suites_service as test_suites_service
import src.services.metrics_service as metrics_service
from src.models.env_info import EnvInfo
from src.models.test_suite import TestSuite
from src.models.test_run import TestRun
import src.utils.metrics_collection_manager as metrics_collection_manager
from src.enums.status import Status
from src.exceptions.exceptions import ApiException



# constants
WAIT_MS = 15
Expand All @@ -26,14 +18,16 @@ def analyze(data):
start_time = int(datetime.timestamp(datetime.now() - timedelta(seconds=60)) * 1000)
iterations_count = data['iterationsCount']
algorithms = data['algorithms']
message_sizes = data['messageSizes'] if 'messageSizes' in data else [0]
first_run = True
for algorithm in algorithms:
for iterations in iterations_count:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, test_suite.id)
for message_size in message_sizes:
if not first_run:
time.sleep(WAIT_MS)
else:
first_run = False
__create_test_run(algorithm, iterations, message_size, test_suite.id)

# end time is now + 90 sec, to show the graph after the test for sure finished running
end_time = int(datetime.timestamp(datetime.now() + timedelta(seconds=90)) * 1000)
Expand All @@ -45,29 +39,31 @@ def analyze(data):
return jsonify({'test_suite_id': test_suite.id})


def __create_test_run(algorithm, iterations, test_suite_id):
def __create_test_run(algorithm, iterations, message_size, test_suite_id):
start_time = datetime.now()
metrics_service.start_collecting()
status, status_message = __run(algorithm, iterations)
metrics_service.stop_collecting()
metrics_collection_manager.start_collecting()
status, status_message, requests_size = __run(algorithm, iterations, message_size)
metrics_collection_manager.stop_collecting()
end_time = datetime.now()
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, *metrics_service.get_metrics())

test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, *metrics_collection_manager.get_metrics())

def __run(algorithm, iterations):

def __run(algorithm, iterations, message_size):
logging.debug('Running test for algorithm: %s ', algorithm)
payload = {
'algorithm': algorithm,
'iterationsCount': iterations
'iterationsCount': iterations,
'messageSize': message_size
}
headers = { 'Content-Type': 'application/json' }
response = requests.post(current_app.configurations.curl_url + "/curl", headers=headers, json=payload, timeout=int(current_app.configurations.request_timeout))

return __validate_response(response)


def __validate_response(response):
if response.status_code < 200 or response.status_code > 299:
return Status.FAILED, json.dumps(response.json())
data = response.json()
if response.status_code < 200 or response.status_code > 299:
return Status.FAILED, json.dumps(data), 0
else:
return Status.SUCCESS, ""
return Status.SUCCESS, "", data.get('totalRequestSize')
3 changes: 1 addition & 2 deletions api/src/services/cadvisor_service.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import src.services.k8s_service as k8s_service
import requests
import pandas as pd
import logging
from src.enums.environment import Environment

DOCKER_METRICS_URL = "{}/api/v1.3/docker/{}"
Expand All @@ -28,7 +27,7 @@ def get_metrics_url(service_name):
elif __environment == Environment.KUBERNETES.value:
return __build_k8s_metrics_url(service_name)
else:
raise RuntimeError("Invalid Environemnt: " + __environment)
raise RuntimeError("Invalid Environment: " + __environment)


def __build_docker_metrics_url(service_name):
Expand Down
75 changes: 54 additions & 21 deletions api/src/services/metrics_service.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,57 @@
import logging

from flask import current_app
from src.models.test_run_metric import TestRunMetric
from src.utils.metrics_collector import MetricsCollector
import logging
from src.enums.metric import Metric
import pytz
from dateutil import parser


def create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time):
__save_resources_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_resources_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)
__save_throughput_metrics(Metric.MESSAGES_THROUGHPUT_PER_SECOND, Metric.BYTES_THROUGHPUT_PER_SECOND, start_time,
end_time, requests_size, test_run)


def __save_resources_metrics(cpu_metric_name, memory_metric_name, metrics, test_run):
cpu, memory = __calculate_average(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_metric_name, cpu)
__save_metric_to_db(test_run, memory_metric_name, memory)


def __save_throughput_metrics(requests_metric_name, bytes_metric_name, start_time, end_time, requests_size, test_run):
requests_throughput, bytes_throughput = __calculate_throughput(test_run.iterations, start_time, end_time, requests_size)
__save_metric_to_db(test_run, requests_metric_name, requests_throughput)
__save_metric_to_db(test_run, bytes_metric_name, bytes_throughput)


def __save_metric_to_db(test_run, metric_name, metric_value):
test_run_metric = TestRunMetric(
test_run_id=test_run.id,
metric_name=metric_name,
value=metric_value
)
current_app.database_manager.create(test_run_metric)


def __calculate_average(metrics, start_time):
cpu, memory = 0, 0
counter = 0
for ts, value in metrics.items():
if parser.parse(ts) >= start_time.astimezone(pytz.UTC):
cpu += value["cpu"]
memory += value["memory"]
counter += 1

if counter == 0:
return 0, 0
return round(cpu/counter, 2), round(memory/counter, 0)


def __calculate_throughput(iterations, start_time, end_time, requests_size):
seconds = (end_time - start_time).total_seconds()
request_throughput = 0 if seconds == 0 else iterations / seconds
bytes_throughput = 0 if seconds == 0 or requests_size is None else int(requests_size) / seconds
return round(request_throughput, 0), round(bytes_throughput, 0)

client_collector = MetricsCollector("qujata-curl")
server_collector = MetricsCollector("qujata-nginx")

# TODO: add lock validation
def start_collecting():
client_collector.start()
server_collector.start()

def stop_collecting():
client_collector.stop()
server_collector.stop()
# print collectors results
logging.info(client_collector.to_pretty_table())
logging.info(server_collector.to_pretty_table())

def get_metrics():
client_data = client_collector.get_data()
server_data = server_collector.get_data()
return client_data, server_data
53 changes: 8 additions & 45 deletions api/src/services/test_suites_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,10 @@
from src.models.env_info import EnvInfo
from src.models.test_suite import TestSuite
from src.models.test_run import TestRun
from src.models.test_run_metric import TestRunMetric
from src.enums.metric import Metric
from src.exceptions.exceptions import ApiException, NotFoundException
import src.utils.test_suite_serializer as test_suite_serializer
import pytz
from dateutil import parser
import logging

import src.services.metrics_service as metrics_service
from src.enums.status import Status

# constants
HTTP_STATUS_UNPROCESSABLE_ENTITY = 422
Expand All @@ -36,19 +32,20 @@ def create_test_suite(data):
current_app.database_manager.create(test_suite)
return test_suite

def create_test_run(start_time, end_time, algorithm, iterations, test_suite_id, status, status_message, client_metrics, server_metrics):
def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, client_metrics, server_metrics):
test_run = TestRun(
start_time=start_time,
end_time=end_time,
algorithm=algorithm,
iterations=iterations,
status=status,
status_message=status_message,
# message_size=1024,
message_size=message_size,
test_suite_id=test_suite_id
)
current_app.database_manager.create(test_run)
__create_test_run_metrics(test_run, client_metrics, server_metrics)
if status == Status.SUCCESS:
metrics_service.create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time)
return test_run

def update_test_suite(test_suite):
Expand Down Expand Up @@ -87,39 +84,5 @@ def delete_test_suite(test_suite_id):
raise NotFoundException('Test suite with id: ' + str(test_suite_id) +' not found', 'Not Found')
current_app.database_manager.delete(test_suite)


def __create_test_run_metrics(test_run, client_metrics, server_metrics):
__save_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)


def __save_metrics(cpu_metric_name, memory_metric_name, metrics, test_run):
cpu, memory = __calculate_average(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_metric_name, cpu, TYPE_CPU)
__save_metric_to_db(test_run, memory_metric_name, memory, TYPE_MEMORY)


def __calculate_average(metrics, start_time):
cpu, memory = 0, 0
counter = 0
for ts, value in metrics.items():
if parser.parse(ts) >= start_time.astimezone(pytz.UTC):
cpu += value["cpu"]
memory += value["memory"]
counter += 1

if counter == 0:
return 0, 0
return cpu/counter, memory/counter

def __save_metric_to_db(test_run, metric_name, metric_value, metric_type):
if metric_type == TYPE_CPU:
metric_value = round(metric_value, 2)
elif metric_type == TYPE_MEMORY:
metric_value = round(metric_value, 0)
test_run_metric = TestRunMetric(
test_run_id=test_run.id,
metric_name=metric_name,
value=metric_value
)
current_app.database_manager.create(test_run_metric)
def delete_test_suites(test_suite_ids):
current_app.database_manager.delete_by_ids(TestSuite, test_suite_ids)
Loading

0 comments on commit 3ca0ddf

Please sign in to comment.