Skip to content

Commit

Permalink
Max_failures option
Browse files Browse the repository at this point in the history
Signed-off-by: Raul Sevilla <[email protected]>
  • Loading branch information
rsevilla87 committed May 5, 2022
1 parent 87b446d commit 582f172
Show file tree
Hide file tree
Showing 6 changed files with 46 additions and 15 deletions.
10 changes: 6 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@ git clone https://github.com/cloud-bulldozer/touchstone
cd touchstone
python setup.py develop
touchstone_compare -h
usage: touchstone_compare [-h] [--version] [--database {elasticsearch}] [--identifier-key IDENTIFIER] -u UUID [UUID ...] [-o {json,yaml,csv}] --config CONFIG [--output-file OUTPUT_FILE] [--tolerancy-rules TOLERANCY_RULES] -url CONN_URL
[CONN_URL ...] [-v] [-vv]

usage: touchstone_compare [-h] [--version] [--database {elasticsearch}] [--identifier-key IDENTIFIER] -u UUID [UUID ...] [-a ALIASES [ALIASES ...]] [-o {json,yaml,csv}] --config CONFIG [--output-file OUTPUT_FILE]
[--tolerancy-rules TOLERANCY_RULES] [--rc RC] -url CONN_URL [CONN_URL ...] [-v] [-vv]
compare results from benchmarks

optional arguments:
Expand Down Expand Up @@ -218,12 +217,14 @@ This feature can be enabled with the flag `--tolerancy-rules` which points to a
```yaml
- json_path: ["test_type", "stream", "protocol", "*", "message_size", "*", "num_threads", "*", "avg(norm_byte)"]
tolerancy: -15
max_failures: 25
- json_path: ["test_type", "rr", "protocol", "*", "message_size", "*", "num_threads", "*", "99.0percentiles(norm_ltcy)"]
tolerancy: 15
max_failures: 25
```
This YAML file contains a list of dictionaries, where the `json_path` key is a list that indicates the path that will allow `touchstone` to find the metric values from a comparison.
Wildcards can be used to match several keys at a certain level, and `tolerancy` defines the accepted tolerance percentage by the metrics matched by `json_path`. i.e a 10 would mean any metric 10% higher than the baseline metric will be considered an error, and -10 would mean the opposite, any metric at least 10% below the baseline value will be considered an error.
Wildcards can be used to match several keys at a certain level, and `tolerancy` defines the accepted tolerance percentage by the metrics matched by `json_path`. i.e a 10 would mean any metric 10% higher than the baseline metric will be considered an error, and -10 would mean the opposite, any metric at least 10% below the baseline value will be considered an error. The optional parameter `max_failures` indicates the allowed percentage of failures permitted to consider a benchmark comparison as passed, by default is 0, meaning that only one fail will make set the comparison as failed.
By default `touchstone` takes the first UUID passed as baseline. When `touchstone` finds a metric not meeting a configured tolerancy thresholds it returns 1.
Expand Down Expand Up @@ -260,6 +261,7 @@ $ touchstone_compare -url https://my-es.instance.com -u 975fa650-aeb2-5042-8517-
+-----------+--------+----------------------+-----------+--------------------------+--------+-----------+--------------+---------+
$ echo $?
1
# If for example max_failures is 50, the benchmark will fail too, because the second block, (test_type: http) has more than 50% of failures.
```
### Querying for raw data
Expand Down
28 changes: 21 additions & 7 deletions src/touchstone/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ def parse_args(args):
"""
parser = argparse.ArgumentParser(description="compare results from benchmarks")
parser.add_argument(
"--version", action="version", version="touchstone {ver}".format(ver=__version__),
"--version",
action="version",
version="touchstone {ver}".format(ver=__version__),
)
parser.add_argument(
"--database",
Expand All @@ -53,7 +55,11 @@ def parse_args(args):
nargs="+",
)
parser.add_argument(
"-a", "--aliases", help="id aliases", type=str, nargs="+",
"-a",
"--aliases",
help="id aliases",
type=str,
nargs="+",
)
parser.add_argument(
"-o",
Expand All @@ -80,7 +86,11 @@ def parse_args(args):
type=argparse.FileType("r", encoding="utf-8"),
)
parser.add_argument(
"--rc", help="Return code if tolerancy check fails", required=False, type=int, default=1,
"--rc",
help="Return code if tolerancy check fails",
required=False,
type=int,
default=1,
)
parser.add_argument(
"-url",
Expand Down Expand Up @@ -186,7 +196,11 @@ def main(args):
if "aggregations" in compute:
alias = args.aliases[uuid_index] if args.aliases else None
result = database_instance.emit_compute_dict(
uuid=uuid, compute_map=compute, index=index, identifier=args.identifier, alias=alias,
uuid=uuid,
compute_map=compute,
index=index,
identifier=args.identifier,
alias=alias,
)
if not result:
logger.error(
Expand Down Expand Up @@ -230,7 +244,8 @@ def main(args):
baseline_uuid = args.aliases[0] if args.aliases else args.uuid[0]
identifiers = args.aliases if args.aliases else args.uuid
compute_header = extract_headers(compute) + ["result", "deviation"] + identifiers
rc = decision_maker.run(baseline_uuid, index_json, compute_header, output_file, args)
if decision_maker.run(baseline_uuid, index_json, compute_header, output_file, args):
rc = args.rc
if metadata_dict:
main_json["metadata"] = metadata_dict
if args.output == "json":
Expand All @@ -241,8 +256,7 @@ def main(args):


def render():
"""Entry point for console_scripts
"""
"""Entry point for console_scripts"""
main(sys.argv[1:])


Expand Down
7 changes: 5 additions & 2 deletions src/touchstone/databases/elasticsearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@
import elasticsearch
import json
import ssl
import urllib3
from elasticsearch_dsl import Search, A


from . import DatabaseBaseClass


logger = logging.getLogger("touchstone")
urllib3.disable_warnings()


class Elasticsearch(DatabaseBaseClass):
Expand All @@ -17,8 +19,9 @@ def _create_conn_object(self):
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
es = elasticsearch.Elasticsearch([self._conn_url], send_get_body_as='POST',
ssl_context=ssl_ctx, use_ssl=True)
es = elasticsearch.Elasticsearch(
[self._conn_url], send_get_body_as="POST", ssl_context=ssl_ctx, use_ssl=True
)
return es

def __init__(self, conn_url=None):
Expand Down
13 changes: 11 additions & 2 deletions src/touchstone/decision_maker/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@


class Compare:

comparisons = 0
fails = 0

def __init__(self, baseline_uuid, json_data):
self.json_data = json_data
self.baseline_uuid = baseline_uuid
Expand All @@ -18,6 +22,7 @@ def __init__(self, baseline_uuid, json_data):
self.compare_dict = {}

def _compare(self, input_dict, compare_dict):
self.comparisons += 1
if self.baseline_uuid not in input_dict:
logger.error(f"Missing UUID in input dict: {input_dict}")
return
Expand All @@ -33,6 +38,7 @@ def _compare(self, input_dict, compare_dict):
if (self.tolerancy >= 0 and v > base_val) or (self.tolerancy < 0 and v < base_val):
result = "Fail"
self.passed = False
self.fails += 1
else:
result = "Pass"
if result not in compare_dict:
Expand Down Expand Up @@ -89,14 +95,15 @@ def run(baseline_uuid, results_data, compute_header, output_file, args):
:param compute_header headers to use in CSV and tabulate outputs
:param args benchmark-comparison arguments
"""
rc = 0
passed = True
try:
args.tolerancy_rules.seek(0)
json_paths = yaml.load(args.tolerancy_rules, Loader=yaml.FullLoader)
except Exception as err:
logger.error(f"Error loading tolerations rules: {err}")
c = Compare(baseline_uuid, results_data)
for json_path in json_paths:
c = Compare(baseline_uuid, results_data)
passed = c.compare(json_path["json_path"], json_path["tolerancy"])
if args.output == "yaml":
print(yaml.dump({"tolerations": c.compare_dict}, indent=1), file=output_file)
Expand All @@ -111,4 +118,6 @@ def run(baseline_uuid, results_data, compute_header, output_file, args):
row_list = []
flatten_and_discard(c.compare_dict, compute_header, row_list)
print(tabulate(row_list, headers=compute_header, tablefmt="pretty"), file=output_file)
return 0 if passed else args.rc
if not passed and c.fails * 100 / c.comparisons > json_path.get("max_failures", 0):
rc = args.rc
return rc
1 change: 1 addition & 0 deletions tolerancy-configs/mb.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
- json_path: ["test_type", "*", "routes", "*", "conn_per_targetroute", "*", "keepalive", "*", "avg(requests_per_second)"]
tolerancy: -15
max_failures: 0
2 changes: 2 additions & 0 deletions tolerancy-configs/uperf.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
- json_path: ["test_type", "stream", "protocol", "*", "message_size", "*", "num_threads", "*", "avg(norm_byte)"]
tolerancy: -15
max_failures: 0
- json_path: ["test_type", "rr", "protocol", "*", "message_size", "*", "num_threads", "*", "99.0percentiles(norm_ltcy)"]
tolerancy: 15
max_failures: 0

0 comments on commit 582f172

Please sign in to comment.