Skip to content

Commit 9115cd8

Browse files
committed
[Test] Add integration tests to validate support for GB200.
1 parent fff826e commit 9115cd8

File tree

11 files changed

+777
-1
lines changed

11 files changed

+777
-1
lines changed

tests/integration-tests/clusters_factory.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import boto3
2020
import yaml
2121
from framework.credential_providers import run_pcluster_command
22+
from remote_command_executor import RemoteCommandExecutor
2223
from retrying import retry
2324
from time_utils import minutes, seconds
2425
from utils import (
@@ -34,6 +35,15 @@
3435
retry_if_subprocess_error,
3536
)
3637

38+
from tests.common.utils import read_remote_file
39+
40+
TAG_CLUSTER_NAME = "parallelcluster:cluster-name"
41+
TAG_NODE_TYPE = "parallelcluster:node-type"
42+
TAG_QUEUE_NAME = "parallelcluster:queue-name"
43+
TAG_QCOMPUTE_RESOURCE_NAME = "parallelcluster:compute-resource-name"
44+
45+
LAUNCH_TEMPLATES_CONFIG_FILE = "/opt/parallelcluster/shared/launch-templates-config.json"
46+
3747

3848
def suppress_and_log_exception(func):
3949
@functools.wraps(func)
@@ -253,6 +263,42 @@ def describe_cluster_instances(self, node_type=None, queue_name=None):
253263
logging.error("Failed when getting cluster instances with error:\n%s\nand output:\n%s", e.stderr, e.stdout)
254264
raise
255265

266+
def get_compute_nodes(self, queue_name: str = None, compute_resource_name: str = None, status: str = "running"):
267+
"""Return the EC2 instance details for compute nodes matching the provided criteria."""
268+
ec2 = boto3.client("ec2", region_name=self.region)
269+
filters = [
270+
{"Name": f"tag:{TAG_CLUSTER_NAME}", "Values": [self.cfn_name]},
271+
{"Name": f"tag:{TAG_NODE_TYPE}", "Values": ["Compute"]},
272+
{"Name": "instance-state-name", "Values": [status]},
273+
]
274+
275+
if queue_name:
276+
filters.append({"Name": f"tag:{TAG_QUEUE_NAME}", "Values": [queue_name]})
277+
if compute_resource_name:
278+
filters.append({"Name": f"tag:{TAG_QCOMPUTE_RESOURCE_NAME}", "Values": [compute_resource_name]})
279+
280+
return ec2.describe_instances(Filters=filters).get("Reservations")[0].get("Instances")
281+
282+
def get_compute_nodes_private_ip(
283+
self, queue_name: str = None, compute_resource_name: str = None, status: str = "running"
284+
):
285+
"""Return the private IP address of compute nodes matching the provided criteria."""
286+
return [i.get("PrivateIpAddress") for i in self.get_compute_nodes(queue_name, compute_resource_name, status)]
287+
288+
def get_compute_nodes_launch_template_logical_id(self, queue_name: str, compute_resource_name: str):
289+
"""Return the launch template logical id of compute nodes matching the provided criteria."""
290+
launch_templates_config = json.loads(
291+
read_remote_file(RemoteCommandExecutor(self), LAUNCH_TEMPLATES_CONFIG_FILE)
292+
)
293+
return (
294+
launch_templates_config.get("Queues", {})
295+
.get(queue_name, {})
296+
.get("ComputeResources", {})
297+
.get(compute_resource_name, {})
298+
.get("LaunchTemplate", {})
299+
.get("LogicalId")
300+
)
301+
256302
def get_cluster_instance_ids(self, node_type=None, queue_name=None):
257303
"""Run pcluster describe-cluster-instances and collect instance ids."""
258304
instances = self.describe_cluster_instances(node_type=node_type, queue_name=queue_name)

tests/integration-tests/configs/develop.yaml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -280,6 +280,13 @@ test-suites:
280280
instances: [{{ common.instance("instance_type_1") }}]
281281
oss: [{{ OS_X86_6 }}]
282282
schedulers: [ "slurm" ]
283+
gb200:
284+
test_gb200.py::test_gb200:
285+
dimensions:
286+
- regions: [ "us-east-1" ]
287+
instances: [ "g4dn.2xlarge" ]
288+
oss: [ "alinux2023" ]
289+
schedulers: [ "slurm" ]
283290
health_checks:
284291
test_gpu_health_checks.py::test_cluster_with_gpu_health_checks:
285292
dimensions:

tests/integration-tests/conftest.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -579,6 +579,32 @@ def test_datadir(request, datadir):
579579
return datadir / "{0}/{1}".format(class_name, function_name)
580580

581581

582+
@pytest.fixture()
583+
def file_reader(test_datadir, request, vpc_stack):
584+
"""
585+
Define a fixture to render file templates associated to the running test.
586+
587+
The template file for a given test is a generic file stored in the configs_datadir folder.
588+
The template can be written by using Jinja2 template engine.
589+
590+
:return: a _file_renderer(**kwargs) function which gets as input a dictionary of values to replace in the template
591+
"""
592+
593+
def _file_renderer(input_file: str = "script.sh", output_file: str = "script_rendered.sh", **kwargs):
594+
input_file_path = test_datadir / input_file
595+
if not os.path.isfile(input_file_path):
596+
raise FileNotFoundError(f"Input file not found in the expected dir {input_file_path}")
597+
output_file_path = test_datadir / output_file if output_file else input_file_path
598+
default_values = _get_default_template_values(vpc_stack, request)
599+
file_loader = FileSystemLoader(str(test_datadir))
600+
env = SandboxedEnvironment(loader=file_loader)
601+
rendered_template = env.get_template(input_file).render(**{**default_values, **kwargs})
602+
output_file_path.write_text(rendered_template)
603+
return output_file_path
604+
605+
return _file_renderer
606+
607+
582608
@pytest.fixture()
583609
def pcluster_config_reader(test_datadir, vpc_stack, request, region, instance, architecture):
584610
"""

tests/integration-tests/tests/common/assertions.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,14 @@
99
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
1010
# limitations under the License.
1111
import logging
12+
import re
1213
import time
1314
from typing import List, Union
1415

1516
import boto3
1617
import pytest
1718
from assertpy import assert_that, soft_assertions
19+
from clusters_factory import Cluster
1820
from constants import NodeType
1921
from remote_command_executor import RemoteCommandExecutor
2022
from retrying import RetryError, retry
@@ -28,7 +30,7 @@
2830
)
2931

3032
from tests.common.scaling_common import get_compute_nodes_allocation
31-
from tests.common.utils import get_ddb_item
33+
from tests.common.utils import get_ddb_item, read_remote_file
3234

3335

3436
@retry(wait_fixed=seconds(20), stop_max_delay=minutes(6))
@@ -422,3 +424,9 @@ def _assert_build_image_stack_deleted(stack_name, region, timeout_seconds=600, p
422424
time.sleep(poll_interval)
423425

424426
pytest.fail(f"Timed-out waiting for stack {stack_name} deletion (last status: {last_status})")
427+
428+
429+
def assert_regex_in_file(cluster: Cluster, compute_node_ip: str, file_name: str, pattern: str):
430+
rce = RemoteCommandExecutor(cluster, compute_node_ip)
431+
file_content = read_remote_file(rce, file_name)
432+
assert_that(bool(re.search(pattern, file_content, re.IGNORECASE))).is_true()

tests/integration-tests/tests/common/schedulers_common.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,13 @@ def get_unique_static_nodes(self):
440440
logging.info("All running nodes: %s", result.stdout)
441441
return result.stdout.splitlines()
442442

443+
def get_nodename_from_ip(self, ip: str):
444+
"""Get the nodename from IP address"""
445+
command = f"scontrol show nodes --json | jq -r --arg ip \"{ip}\" '.nodes[] | select(.address == $ip) | .hostname'" # noqa: W605
446+
result = self._remote_command_executor.run_remote_command(command)
447+
logging.info(f"Nodename for {ip} is: {result.stdout}")
448+
return result.stdout
449+
443450
@retry(retry_on_result=lambda result: "drain" not in result, wait_fixed=seconds(3), stop_max_delay=minutes(5))
444451
def wait_for_locked_node(self): # noqa: D102
445452
return self._remote_command_executor.run_remote_command("sinfo -h -o '%t'").stdout

tests/integration-tests/tests/common/utils.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -536,3 +536,12 @@ def write_file(dirname, filename, content):
536536
f.write(content)
537537
logging.info(f"File written: {filepath}")
538538
return filepath
539+
540+
541+
def terminate_nodes_manually(instance_ids, region):
542+
ec2_client = boto3.client("ec2", region_name=region)
543+
for instance_id in instance_ids:
544+
instance_states = ec2_client.terminate_instances(InstanceIds=[instance_id]).get("TerminatingInstances")[0]
545+
assert_that(instance_states.get("InstanceId")).is_equal_to(instance_id)
546+
assert_that(instance_states.get("CurrentState").get("Name")).is_in("shutting-down", "terminated")
547+
logging.info("Terminated nodes: {}".format(instance_ids))
Lines changed: 220 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,220 @@
1+
# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License").
4+
# You may not use this file except in compliance with the License.
5+
# A copy of the License is located at
6+
#
7+
# http://aws.amazon.com/apache2.0/
8+
#
9+
# or in the "LICENSE.txt" file accompanying this file.
10+
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
11+
# See the License for the specific language governing permissions and limitations under the License.
12+
import json
13+
import logging
14+
15+
import boto3
16+
import pytest
17+
from assertpy import assert_that
18+
from clusters_factory import Cluster
19+
from remote_command_executor import RemoteCommandExecutor
20+
21+
from tests.common.assertions import assert_regex_in_file
22+
from tests.common.schedulers_common import SlurmCommands
23+
from tests.common.utils import read_remote_file, terminate_nodes_manually
24+
25+
26+
def submit_job_imex_status(rce: RemoteCommandExecutor, launch_template_id: str, queue_name: str, max_nodes: int = 1):
27+
logging.info("Submitting job to check IMEX status")
28+
slurm = SlurmCommands(rce)
29+
job_id = slurm.submit_command_and_assert_job_accepted(
30+
submit_command_args={
31+
"command": f"srun /usr/bin/nvidia-imex-ctl -N -j -c /opt/parallelcluster/shared/nvidia-imex/config_{launch_template_id}.cfg",
32+
"partition": queue_name,
33+
"nodes": max_nodes,
34+
"other_options": "--output slurm-%j-%N.out --error slurm-%j-%N.err",
35+
}
36+
)
37+
slurm.wait_job_completed(job_id)
38+
slurm.assert_job_succeeded(job_id)
39+
return job_id
40+
41+
def assert_imex_nodes_config_is_correct(rce: RemoteCommandExecutor, launch_template_id: str, expected_ips: list):
42+
logging.info(f"Checking IMEX nodes config contains the expected nodes: {expected_ips}")
43+
imex_nodes_config_file = f"/opt/parallelcluster/shared/nvidia-imex/nodes_config_{launch_template_id}.cfg"
44+
imex_config_content = read_remote_file(rce, imex_nodes_config_file)
45+
actual_ips = [ip.strip() for ip in imex_config_content.strip().split("\n")]
46+
assert_that(actual_ips).contains_only(*expected_ips)
47+
logging.info(f"IMEX nodes config contains the expected nodes: {expected_ips}")
48+
49+
def assert_imex_status_ok(rce: RemoteCommandExecutor, job_id: str, ips: list):
50+
"""
51+
Assert that the output returned by the nvidia-imex-ctl command represent a healthy status for IMEX.
52+
IMEX is considered healthy if every node of the domain reports a healthy status, i.e:
53+
* every node is READY
54+
* every node is CONNECTED to every other node
55+
56+
Example of healthy IMEX status:
57+
{
58+
"nodes": {
59+
"0": {
60+
"status": "READY",
61+
"host": "192.168.103.159",
62+
"connections": {
63+
"1": {
64+
"host": "192.168.107.187",
65+
"status": "CONNECTED",
66+
"changed": true
67+
},
68+
"0": {
69+
"host": "192.168.103.159",
70+
"status": "CONNECTED",
71+
"changed": true
72+
}
73+
},
74+
"changed": true,
75+
"version": "570.172.08"
76+
},
77+
"1": {
78+
"status": "READY",
79+
"host": "192.168.107.187",
80+
"connections": {
81+
"0": {
82+
"host": "192.168.103.159",
83+
"status": "CONNECTED",
84+
"changed": true
85+
},
86+
"1": {
87+
"host": "192.168.107.187",
88+
"status": "CONNECTED",
89+
"changed": true
90+
}
91+
},
92+
"changed": true,
93+
"version": "570.172.08"
94+
}
95+
},
96+
"timestamp": "8/8/2025 17:38:02.641",
97+
"status": "UP"
98+
}
99+
"""
100+
slurm = SlurmCommands(rce)
101+
102+
for reporting_node_ip in ips:
103+
reporting_node_name = slurm.get_nodename_from_ip(reporting_node_ip)
104+
logging.info(f"Checking IMEX status reported by node {reporting_node_ip} with hostname {reporting_node_name}")
105+
job_stdout = rce.run_remote_command(f"cat slurm-{job_id}-{reporting_node_name}.out").stdout
106+
logging.info(f"IMEX status is: {job_stdout}")
107+
logging.info(f"Checking that IMEX sees the expected nodes: {ips}")
108+
imex_status = json.loads(job_stdout)
109+
assert_that(imex_status["status"]).is_equal_to("UP")
110+
for ip_source in ips:
111+
node_item = next(filter(lambda i: i['host'] == ip_source, imex_status['nodes'].values()), None)
112+
assert_that(node_item).is_not_none()
113+
assert_that(node_item['status']).is_equal_to("READY")
114+
for ip_destination in ips:
115+
connection_item = next(filter(lambda i: i['host'] == ip_destination, node_item['connections'].values()), None)
116+
assert_that(node_item).is_not_none()
117+
assert_that(connection_item['status']).is_equal_to("CONNECTED")
118+
119+
def assert_imex_healthy(cluster: Cluster, queue_name: str, compute_resource_name: str, max_nodes: int = 1):
120+
rce = RemoteCommandExecutor(cluster)
121+
122+
launch_template_id = cluster.get_compute_nodes_launch_template_logical_id(queue_name, compute_resource_name)
123+
logging.info(
124+
f"Launch template for compute nodes in queue {queue_name} and compute resource {compute_resource_name}: {launch_template_id}"
125+
)
126+
127+
ips = cluster.get_compute_nodes_private_ip(queue_name, compute_resource_name)
128+
logging.info(
129+
f"Private IP addresses for compute nodes in queue {queue_name} and compute resource {compute_resource_name}: {ips}"
130+
)
131+
132+
job_id = submit_job_imex_status(rce, launch_template_id, queue_name, max_nodes)
133+
134+
assert_imex_nodes_config_is_correct(rce, launch_template_id, ips)
135+
assert_imex_status_ok(rce, job_id, ips)
136+
137+
for compute_node_ip in cluster.get_compute_nodes_private_ip(queue_name, compute_resource_name):
138+
for file_name in ["/var/log/nvidia-imex-verbose.log", "/var/log/parallelcluster/nvidia-imex-prolog.log"]:
139+
logging.info(f"Checking file {file_name} log does not contain any error")
140+
assert_regex_in_file(cluster, compute_node_ip, file_name, r"^(?!.*(?:err|warn|fail)).*$")
141+
142+
def assert_imex_not_configured(cluster: Cluster, queue_name: str, compute_resource_name: str, max_nodes: int = 1):
143+
rce = RemoteCommandExecutor(cluster)
144+
145+
launch_template_id = cluster.get_compute_nodes_launch_template_logical_id(queue_name, compute_resource_name)
146+
logging.info(
147+
f"Launch template for compute nodes in queue {queue_name} and compute resource {compute_resource_name}: {launch_template_id}"
148+
)
149+
150+
submit_job_imex_status(rce, launch_template_id, queue_name, max_nodes)
151+
152+
assert_imex_nodes_config_is_correct(rce, launch_template_id, ["0.0.0.0", "0.0.0.0"])
153+
154+
155+
@pytest.mark.usefixtures("region", "os", "instance", "scheduler")
156+
def test_gb200(pcluster_config_reader, file_reader, clusters_factory, test_datadir, s3_bucket_factory, region):
157+
"""
158+
Test automated configuration of Nvidia IMEX.
159+
160+
This test creates a cluster with the necessary custom actions to configure NVIDIA IMEX and verifies the following:
161+
1. On the compute resource supporting IMEX (q1-cr1), the IMEX nodes file is configured by the prolog,
162+
IMEX service is healthy and no errors are reported in IMEX's or prolog's logs.
163+
Also, IMEX gets reconfigured when nodes belonging to the same compute resource get replaced
164+
2. On the compute resource not supporting IMEX (q1-cr2), the IMEX nodes file is not configured by the prolog,
165+
keeping the default values and IMEX is not started.
166+
167+
The test prints in test log the full IMEX status to facilitate troubleshooting.
168+
The test uses instance type g4dn to simulate a p6e-gb200 instance.
169+
This is a reasonable approximation for the test because the focus of the test is on IMEX configuration,
170+
which can be executed on g4dn as well.
171+
"""
172+
max_queue_size = 2
173+
174+
# Create an S3 bucket for custom action scripts
175+
bucket_name = s3_bucket_factory()
176+
bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
177+
178+
# Upload files to test bucket
179+
headnode_start_filename = "head_node_start.sh"
180+
prolog_filename = "nvidia-imex.prolog.sh"
181+
bucket.upload_file(str(test_datadir / prolog_filename), prolog_filename)
182+
head_node_start_script_rendered = file_reader(
183+
input_file=headnode_start_filename,
184+
output_file=f"{headnode_start_filename}.rendered",
185+
bucket_name=bucket_name,
186+
prolog_key=prolog_filename,
187+
)
188+
bucket.upload_file(head_node_start_script_rendered, headnode_start_filename)
189+
190+
# TODO: Remove after testing: BEGIN: added compute custom action to force the configuraiton of IMEX
191+
compute_configured_filename = "compute_node_configured.sh"
192+
bucket.upload_file(str(test_datadir / compute_configured_filename), compute_configured_filename)
193+
# TODO: Remove after testing: END
194+
195+
queue_name = "q1"
196+
compute_resource_with_imex = "cr1"
197+
compute_resource_without_imex = "cr2"
198+
199+
cluster_config = pcluster_config_reader(
200+
bucket_name=bucket_name,
201+
head_node_start_script=headnode_start_filename,
202+
compute_node_configured_script=compute_configured_filename,
203+
max_queue_size=max_queue_size,
204+
queue_name=queue_name,
205+
compute_resource_with_imex=compute_resource_with_imex,
206+
compute_resource_without_imex=compute_resource_without_imex,
207+
)
208+
cluster = clusters_factory(cluster_config)
209+
210+
assert_imex_healthy(cluster, queue_name, compute_resource_with_imex, max_queue_size)
211+
212+
# IMEX is not configured on compute resource thta do not support it
213+
assert_imex_not_configured(cluster, queue_name, compute_resource_without_imex)
214+
215+
# Forcefully terminate a compute node in the compute resource supporting IMEX
216+
# to simulate an outage that forces the replacement of the node and consequently the IMEX reconfiguration.
217+
terminate_nodes_manually(
218+
[cluster.get_compute_nodes(queue_name, compute_resource_with_imex)[0].get("InstanceId")], region
219+
)
220+
assert_imex_healthy(cluster, queue_name, compute_resource_with_imex, max_queue_size)

0 commit comments

Comments
 (0)