diff --git a/tests/interop/README.md b/tests/interop/README.md index 4d996a9f..40d248f2 100644 --- a/tests/interop/README.md +++ b/tests/interop/README.md @@ -1,14 +1,20 @@ # Running tests +## Prerequisites + +* Openshift clusters with retail pattern installed + * factory cluster is managed via rhacm +* kubeconfig files for Openshift clusters +* oc client installed at ~/oc_client/oc + ## Steps -* install oc client at ~/oc_client/oc -* create python3 venv, clone patterns repository +* create python3 venv, clone retail repository * export KUBECONFIG=\ * export KUBECONFIG_EDGE=\ * export INFRA_PROVIDER=\ * (optional) export WORKSPACE=\ (defaults to /tmp) -* cd \/tests/interop +* cd retail/tests/interop * pip install -r requirements.txt * ./run_tests.sh diff --git a/tests/interop/test_subscription_status_edge.py b/tests/interop/test_subscription_status_edge.py index 03a56a5c..a9022297 100644 --- a/tests/interop/test_subscription_status_edge.py +++ b/tests/interop/test_subscription_status_edge.py @@ -17,22 +17,10 @@ def test_subscription_status_edge(openshift_dyn_client): "crunchy-postgres-operator": ["openshift-operators"], } - ( - operator_versions, - missing_subs, - unhealthy_subs, - missing_installplans, - upgrades_pending, - ) = subscription.subscription_status(openshift_dyn_client, expected_subs) - - for line in operator_versions: - logger.info(line) - - cluster_version = subscription.openshift_version(openshift_dyn_client) - logger.info(f"Openshift version:\n{cluster_version.instance.status.history}") - - if missing_subs or unhealthy_subs or missing_installplans or upgrades_pending: - err_msg = "Subscription status check failed" + err_msg = subscription.subscription_status( + openshift_dyn_client, expected_subs, diff=False + ) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: diff --git a/tests/interop/test_subscription_status_hub.py b/tests/interop/test_subscription_status_hub.py index 521415fc..55a0459b 100644 --- a/tests/interop/test_subscription_status_hub.py +++ b/tests/interop/test_subscription_status_hub.py @@ -1,8 +1,4 @@ -import difflib import logging -import os -import re -import subprocess import pytest from validatedpatterns_tests.interop import subscription @@ -24,100 +20,11 @@ def test_subscription_status_hub(openshift_dyn_client): "crunchy-postgres-operator": ["openshift-operators"], } - ( - operator_versions, - missing_subs, - unhealthy_subs, - missing_installplans, - upgrades_pending, - ) = subscription.subscription_status(openshift_dyn_client, expected_subs) - - if missing_subs: - logger.error(f"FAIL: The following subscriptions are missing: {missing_subs}") - if unhealthy_subs: - logger.error( - f"FAIL: The following subscriptions are unhealthy: {unhealthy_subs}" - ) - if missing_installplans: - logger.error( - f"FAIL: The install plan for the following subscriptions is missing: {missing_installplans}" - ) - if upgrades_pending: - logger.error( - f"FAIL: The following subscriptions are in UpgradePending state: {upgrades_pending}" - ) - - cluster_version = subscription.openshift_version(openshift_dyn_client) - logger.info(f"Openshift version:\n{cluster_version.instance.status.history}") - - if os.getenv("EXTERNAL_TEST") != "true": - shortversion = re.sub("(.[0-9]+$)", "", os.getenv("OPENSHIFT_VER")) - currentfile = os.getcwd() + "/operators_hub_current" - sourceFile = open(currentfile, "w") - for line in operator_versions: - logger.info(line) - print(line, file=sourceFile) - sourceFile.close() - - logger.info("Clone operator-versions repo") - try: - operator_versions_repo = ( - "git@gitlab.cee.redhat.com:mpqe/mps/vp/operator-versions.git" - ) - clone = subprocess.run( - ["git", "clone", operator_versions_repo], capture_output=True, text=True - ) - logger.info(clone.stdout) - logger.info(clone.stderr) - except Exception: - pass - - previouspath = os.getcwd() + f"/operator-versions/retail_hub_{shortversion}" - previousfile = f"retail_hub_{shortversion}" - - logger.info("Ensure previous file exists") - checkpath = os.path.exists(previouspath) - logger.info(checkpath) - - if checkpath is True: - logger.info("Diff current operator list with previous file") - diff = opdiff(open(previouspath).readlines(), open(currentfile).readlines()) - diffstring = "".join(diff) - logger.info(diffstring) - - logger.info("Write diff to file") - sourceFile = open("operator_diffs_hub.log", "w") - print(diffstring, file=sourceFile) - sourceFile.close() - else: - logger.info("Skipping operator diff - previous file not found") - - if missing_subs or unhealthy_subs or missing_installplans or upgrades_pending: - err_msg = "Subscription status check failed" + err_msg = subscription.subscription_status( + openshift_dyn_client, expected_subs, diff=False + ) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: - # Only push the new operarator list if the test passed - # and we are not testing a pre-release operator nor - # running externally - if os.getenv("EXTERNAL_TEST") != "true": - if checkpath is True and not os.environ["INDEX_IMAGE"]: - os.remove(previouspath) - os.rename(currentfile, previouspath) - - cwd = os.getcwd() + "/operator-versions" - logger.info(f"CWD: {cwd}") - - logger.info("Push new operator list") - subprocess.run(["git", "add", previousfile], cwd=cwd) - subprocess.run( - ["git", "commit", "-m", "Update operator versions list"], - cwd=cwd, - ) - subprocess.run(["git", "push"], cwd=cwd) - logger.info("PASS: Subscription status check passed") - - -def opdiff(*args): - return filter(lambda x: not x.startswith(" "), difflib.ndiff(*args)) diff --git a/tests/interop/test_validate_edge_site_components.py b/tests/interop/test_validate_edge_site_components.py index 36d9e736..807b121f 100644 --- a/tests/interop/test_validate_edge_site_components.py +++ b/tests/interop/test_validate_edge_site_components.py @@ -2,13 +2,7 @@ import os import pytest -from ocp_resources.route import Route -from validatedpatterns_tests.interop import components -from validatedpatterns_tests.interop.crd import ArgoCD -from validatedpatterns_tests.interop.edge_util import ( - get_long_live_bearer_token, - get_site_response, -) +from validatedpatterns_tests.interop import application, components from . import __loggername__ @@ -16,12 +10,6 @@ oc = os.environ["HOME"] + "/oc_client/oc" -""" -Validate following retail on edge site (line server): - -1) applications health (Applications deployed through argocd) -""" - @pytest.mark.test_validate_edge_site_components def test_validate_edge_site_components(): @@ -33,29 +21,8 @@ def test_validate_edge_site_components(): @pytest.mark.validate_edge_site_reachable def test_validate_edge_site_reachable(kube_config, openshift_dyn_client): logger.info("Check if edge site API end point is reachable") - edge_api_url = kube_config.host - if not edge_api_url: - err_msg = "Edge site url is missing in kubeconfig file" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - else: - logger.info(f"EDGE api url : {edge_api_url}") - - bearer_token = get_long_live_bearer_token( - dyn_client=openshift_dyn_client, - namespace="openshift-gitops", - sub_string="argocd-dex-server-token", - ) - - if not bearer_token: - assert False, "Bearer token is missing for argocd-dex-server" - - edge_api_response = get_site_response( - site_url=edge_api_url, bearer_token=bearer_token - ) - - if edge_api_response.status_code != 200: - err_msg = "Edge site is not reachable. Please check the deployment." + err_msg = components.validate_site_reachable(kube_config, openshift_dyn_client) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: @@ -65,8 +32,6 @@ def test_validate_edge_site_reachable(kube_config, openshift_dyn_client): @pytest.mark.check_pod_status_edge def test_check_pod_status(openshift_dyn_client): logger.info("Checking pod status") - - err_msg = [] projects = [ "openshift-operators", "open-cluster-management-agent", @@ -74,25 +39,7 @@ def test_check_pod_status(openshift_dyn_client): "openshift-gitops", ] - missing_projects = components.check_project_absense(openshift_dyn_client, projects) - missing_pods = [] - failed_pods = [] - - for project in projects: - missing_pods += components.check_pod_absence(openshift_dyn_client, project) - failed_pods += components.check_pod_status(openshift_dyn_client, project) - - if missing_projects: - err_msg.append(f"The following namespaces are missing: {missing_projects}") - - if missing_pods: - err_msg.append( - f"The following namespaces have no pods deployed: {missing_pods}" - ) - - if failed_pods: - err_msg.append(f"The following pods are failed: {failed_pods}") - + err_msg = components.check_pod_status(openshift_dyn_client, projects) if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg @@ -102,49 +49,9 @@ def test_check_pod_status(openshift_dyn_client): @pytest.mark.validate_argocd_reachable_edge_site def test_validate_argocd_reachable_edge_site(openshift_dyn_client): - namespace = "openshift-gitops" - - try: - for route in Route.get( - dyn_client=openshift_dyn_client, - namespace=namespace, - name="openshift-gitops-server", - ): - argocd_route_url = route.instance.spec.host - except StopIteration: - err_msg = f"Argocd url/route is missing in {namespace} namespace" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - - logger.info("Check if argocd route/url on hub site is reachable") - if not argocd_route_url: - err_msg = f"Argocd url/route is missing in {namespace} namespace" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - else: - final_argocd_url = f"{'https://'}{argocd_route_url}" - logger.info(f"Argocd route/url : {final_argocd_url}") - - bearer_token = get_long_live_bearer_token( - dyn_client=openshift_dyn_client, - namespace=namespace, - sub_string="argocd-dex-server-token", - ) - if not bearer_token: - err_msg = "Bearer token is missing for argocd-dex-server" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - else: - logger.debug(f"Argocd bearer token : {bearer_token}") - - argocd_route_response = get_site_response( - site_url=final_argocd_url, bearer_token=bearer_token - ) - - logger.info(f"Argocd route response : {argocd_route_response}") - - if argocd_route_response.status_code != 200: - err_msg = "Argocd is not reachable. Please check the deployment." + logger.info("Check if argocd route/url on edge site is reachable") + err_msg = components.validate_argocd_reachable(openshift_dyn_client) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: @@ -153,29 +60,11 @@ def test_validate_argocd_reachable_edge_site(openshift_dyn_client): @pytest.mark.validate_argocd_applications_health_edge_site def test_validate_argocd_applications_health_edge_site(openshift_dyn_client): - unhealthy_apps = [] logger.info("Get all applications deployed by argocd on edge site") projects = ["openshift-gitops"] - for project in projects: - for app in ArgoCD.get(dyn_client=openshift_dyn_client, namespace=project): - app_name = app.instance.metadata.name - app_health = app.instance.status.health.status - app_sync = app.instance.status.sync.status - - logger.info(f"Status for {app_name} : {app_health} : {app_sync}") - - if "Healthy" != app_health or "Synced" != app_sync: - logger.info(f"Dumping failed resources for app: {app_name}") - unhealthy_apps.append(app_name) - try: - for res in app.instance.status.resources: - if ( - res.health and res.health.status != "Healthy" - ) or res.status != "Synced": - logger.info(f"\n{res}") - except TypeError: - logger.info(f"No resources found for app: {app_name}") - + unhealthy_apps = application.get_argocd_application_status( + openshift_dyn_client, projects + ) if unhealthy_apps: err_msg = "Some or all applications deployed on edge site are unhealthy" logger.error(f"FAIL: {err_msg}:\n{unhealthy_apps}") diff --git a/tests/interop/test_validate_hub_site_components.py b/tests/interop/test_validate_hub_site_components.py index 2807708a..aef6bf8a 100644 --- a/tests/interop/test_validate_hub_site_components.py +++ b/tests/interop/test_validate_hub_site_components.py @@ -2,10 +2,8 @@ import os import pytest -import yaml from ocp_resources.storage_class import StorageClass from validatedpatterns_tests.interop import application, components -from validatedpatterns_tests.interop.crd import ManagedCluster from . import __loggername__ @@ -13,12 +11,6 @@ oc = os.environ["HOME"] + "/oc_client/oc" -""" -Validate following retail components on hub site (central server): - -1) applications health (Applications deployed through argocd) -""" - @pytest.mark.test_validate_hub_site_components def test_validate_hub_site_components(openshift_dyn_client): @@ -37,19 +29,8 @@ def test_validate_hub_site_components(openshift_dyn_client): @pytest.mark.validate_hub_site_reachable def test_validate_hub_site_reachable(kube_config, openshift_dyn_client): logger.info("Check if hub site API end point is reachable") - namespace = "openshift-gitops" - sub_string = "argocd-dex-server-token" - try: - hub_api_url = application.get_site_api_url(kube_config) - hub_api_response = application.get_site_api_response( - openshift_dyn_client, hub_api_url, namespace, sub_string - ) - except AssertionError as e: - logger.error(f"FAIL: {e}") - assert False, e - - if hub_api_response.status_code != 200: - err_msg = "Hub site is not reachable. Please check the deployment." + err_msg = components.validate_site_reachable(kube_config, openshift_dyn_client) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: @@ -59,8 +40,6 @@ def test_validate_hub_site_reachable(kube_config, openshift_dyn_client): @pytest.mark.check_pod_status_hub def test_check_pod_status(openshift_dyn_client): logger.info("Checking pod status") - - err_msg = [] projects = [ "openshift-operators", "open-cluster-management", @@ -69,26 +48,7 @@ def test_check_pod_status(openshift_dyn_client): "vault", ] - missing_projects = components.check_project_absense(openshift_dyn_client, projects) - missing_pods = [] - failed_pods = [] - - for project in projects: - logger.info(f"Checking pods in namespace '{project}'") - missing_pods += components.check_pod_absence(openshift_dyn_client, project) - failed_pods += components.check_pod_status(openshift_dyn_client, projects) - - if missing_projects: - err_msg.append(f"The following namespaces are missing: {missing_projects}") - - if missing_pods: - err_msg.append( - f"The following namespaces have no pods deployed: {missing_pods}" - ) - - if failed_pods: - err_msg.append(f"The following pods are failed: {failed_pods}") - + err_msg = components.check_pod_status(openshift_dyn_client, projects) if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg @@ -99,59 +59,22 @@ def test_check_pod_status(openshift_dyn_client): @pytest.mark.validate_acm_self_registration_managed_clusters def test_validate_acm_self_registration_managed_clusters(openshift_dyn_client): logger.info("Check ACM self registration for edge site") - - kubefile = os.getenv("KUBECONFIG_EDGE") - kubefile_exp = os.path.expandvars(kubefile) - with open(kubefile_exp) as stream: - try: - out = yaml.safe_load(stream) - site_name = out["clusters"][0]["name"] - except yaml.YAMLError: - err_msg = "Failed to load kubeconfig file" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - - clusters = ManagedCluster.get(dyn_client=openshift_dyn_client, name=site_name) - cluster = next(clusters) - is_managed_cluster_joined, managed_cluster_status = cluster.self_registered - - logger.info(f"Cluster Managed : {is_managed_cluster_joined}") - logger.info(f"Managed Cluster Status : {managed_cluster_status}") - - if not is_managed_cluster_joined: - err_msg = f"{site_name} is not self registered" + kubefiles = [os.getenv("KUBECONFIG_EDGE")] + err_msg = components.validate_acm_self_registration_managed_clusters( + openshift_dyn_client, kubefiles + ) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: - logger.info(f"PASS: {site_name} is self registered") + logger.info("PASS: Edge site is self registered") @pytest.mark.validate_argocd_reachable_hub_site def test_validate_argocd_reachable_hub_site(openshift_dyn_client): - namespace = "openshift-gitops" - name = "openshift-gitops-server" - sub_string = "argocd-dex-server-token" logger.info("Check if argocd route/url on hub site is reachable") - try: - argocd_route_url = application.get_argocd_route_url( - openshift_dyn_client, namespace, name - ) - argocd_route_response = application.get_site_api_response( - openshift_dyn_client, argocd_route_url, namespace, sub_string - ) - except StopIteration: - err_msg = "Argocd url/route is missing in open-cluster-management namespace" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - except AssertionError: - err_msg = "Bearer token is missing for argocd-dex-server" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - - logger.info(f"Argocd route response : {argocd_route_response}") - - if argocd_route_response.status_code != 200: - err_msg = "Argocd is not reachable. Please check the deployment" + err_msg = components.validate_argocd_reachable(openshift_dyn_client) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg else: @@ -160,13 +83,11 @@ def test_validate_argocd_reachable_hub_site(openshift_dyn_client): @pytest.mark.validate_argocd_applications_health_hub_site def test_validate_argocd_applications_health_hub_site(openshift_dyn_client): - unhealthy_apps = [] logger.info("Get all applications deployed by argocd on hub site") projects = ["openshift-gitops", "retail-hub"] - for project in projects: - unhealthy_apps += application.get_argocd_application_status( - openshift_dyn_client, project - ) + unhealthy_apps = application.get_argocd_application_status( + openshift_dyn_client, projects + ) if unhealthy_apps: err_msg = "Some or all applications deployed on hub site are unhealthy" logger.error(f"FAIL: {err_msg}:\n{unhealthy_apps}") diff --git a/tests/interop/test_validate_pipelineruns.py b/tests/interop/test_validate_pipelineruns.py index bfa314c8..7044735d 100644 --- a/tests/interop/test_validate_pipelineruns.py +++ b/tests/interop/test_validate_pipelineruns.py @@ -1,13 +1,8 @@ import logging import os -import re -import subprocess -import time import pytest -from ocp_resources.pipeline import Pipeline -from ocp_resources.pipelineruns import PipelineRun -from ocp_resources.task_run import TaskRun +from validatedpatterns_tests.interop import components from . import __loggername__ @@ -18,6 +13,7 @@ @pytest.mark.test_validate_pipelineruns def test_validate_pipelineruns(openshift_dyn_client): + logger.info("Checking Openshift pipelines") project = "quarkuscoffeeshop-cicd" expected_pipelines = [ @@ -40,158 +36,11 @@ def test_validate_pipelineruns(openshift_dyn_client): "quarkuscoffeeshop-web", ] - found_pipelines = [] - found_pipelineruns = [] - passed_pipelineruns = [] - failed_pipelineruns = [] - - logger.info("Checking Openshift pipelines") - - # FAIL here if no pipelines are found - try: - pipelines = Pipeline.get(dyn_client=openshift_dyn_client, namespace=project) - next(pipelines) - except StopIteration: - err_msg = "No pipelines were found" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - - for pipeline in Pipeline.get(dyn_client=openshift_dyn_client, namespace=project): - for expected_pipeline in expected_pipelines: - match = expected_pipeline + "$" - if re.match(match, pipeline.instance.metadata.name): - if pipeline.instance.metadata.name not in found_pipelines: - logger.info(f"found pipeline: {pipeline.instance.metadata.name}") - found_pipelines.append(pipeline.instance.metadata.name) - break - - if len(expected_pipelines) == len(found_pipelines): - logger.info("Found all expected pipelines") - else: - err_msg = "Some or all pipelines are missing" - logger.error( - f"FAIL: {err_msg}:\nExpected: {expected_pipelines}\nFound: {found_pipelines}" - ) - assert False, err_msg - - logger.info("Checking Openshift pipeline runs") - timeout = time.time() + 3600 - - # FAIL here if no pipelineruns are found - try: - pipelineruns = PipelineRun.get( - dyn_client=openshift_dyn_client, namespace=project - ) - next(pipelineruns) - except StopIteration: - err_msg = "No pipeline runs were found" + err_msg = components.validate_pipelineruns( + openshift_dyn_client, project, expected_pipelines, expected_pipelineruns + ) + if err_msg: logger.error(f"FAIL: {err_msg}") assert False, err_msg - - while time.time() < timeout: - for pipelinerun in PipelineRun.get( - dyn_client=openshift_dyn_client, namespace=project - ): - for expected_pipelinerun in expected_pipelineruns: - if re.search(expected_pipelinerun, pipelinerun.instance.metadata.name): - if pipelinerun.instance.metadata.name not in found_pipelineruns: - logger.info( - f"found pipelinerun: {pipelinerun.instance.metadata.name}" - ) - found_pipelineruns.append(pipelinerun.instance.metadata.name) - break - - if len(expected_pipelineruns) == len(found_pipelineruns): - break - else: - time.sleep(60) - continue - - if len(expected_pipelineruns) == len(found_pipelineruns): - logger.info("Found all expected pipeline runs") - else: - err_msg = "Some pipeline runs are missing" - logger.error( - f"FAIL: {err_msg}:\nExpected: {expected_pipelineruns}\nFound: {found_pipelineruns}" - ) - assert False, err_msg - - logger.info("Checking Openshift pipeline run status") - timeout = time.time() + 3600 - - while time.time() < timeout: - for pipelinerun in PipelineRun.get( - dyn_client=openshift_dyn_client, namespace=project - ): - if pipelinerun.instance.status.conditions[0].reason == "Succeeded": - if pipelinerun.instance.metadata.name not in passed_pipelineruns: - logger.info( - f"Pipeline run succeeded: {pipelinerun.instance.metadata.name}" - ) - passed_pipelineruns.append(pipelinerun.instance.metadata.name) - elif pipelinerun.instance.status.conditions[0].reason == "Running": - logger.info( - f"Pipeline {pipelinerun.instance.metadata.name} is still running" - ) - else: - reason = pipelinerun.instance.status.conditions[0].reason - logger.info( - f"Pipeline run FAILED: {pipelinerun.instance.metadata.name} Reason: {reason}" - ) - if pipelinerun.instance.metadata.name not in failed_pipelineruns: - failed_pipelineruns.append(pipelinerun.instance.metadata.name) - - logger.info(f"Failed pipelineruns: {failed_pipelineruns}") - logger.info(f"Passed pipelineruns: {passed_pipelineruns}") - - if (len(failed_pipelineruns) + len(passed_pipelineruns)) == len( - expected_pipelines - ): - break - else: - time.sleep(60) - continue - - if ((len(failed_pipelineruns)) > 0) or ( - len(passed_pipelineruns) < len(expected_pipelineruns) - ): - logger.info("Checking Openshift task runs") - - # FAIL here if no task runs are found - try: - taskruns = TaskRun.get(dyn_client=openshift_dyn_client, namespace=project) - next(taskruns) - except StopIteration: - err_msg = "No task runs were found" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - - for taskrun in TaskRun.get(dyn_client=openshift_dyn_client, namespace=project): - if taskrun.instance.status.conditions[0].status == "False": - reason = taskrun.instance.status.conditions[0].reason - logger.info( - f"Task FAILED: {taskrun.instance.metadata.name} Reason: {reason}" - ) - - message = taskrun.instance.status.conditions[0].message - logger.info(f"message: {message}") - - try: - cmdstring = re.search("for logs run: kubectl(.*)$", message).group( - 1 - ) - cmd = str(oc + cmdstring) - logger.info(f"CMD: {cmd}") - cmd_out = subprocess.run(cmd, shell=True, capture_output=True) - - logger.info(cmd_out.stdout.decode("utf-8")) - logger.info(cmd_out.stderr.decode("utf-8")) - except AttributeError: - logger.error("No logs to collect") - - err_msg = "Some or all tasks have failed" - logger.error(f"FAIL: {err_msg}") - assert False, err_msg - else: logger.info("PASS: Pipeline verification succeeded.")