|
18 | 18 | import select
|
19 | 19 | import shutil
|
20 | 20 | import signal
|
| 21 | +import subprocess |
21 | 22 | import time
|
22 | 23 | import uuid
|
23 | 24 | from collections import namedtuple
|
@@ -299,9 +300,9 @@ def kill(self):
|
299 | 300 | backend.kill()
|
300 | 301 | self.disks_vhost_user.clear()
|
301 | 302 |
|
302 |
| - assert ( |
303 |
| - "Shutting down VM after intercepting signal" not in self.log_data |
304 |
| - ), self.log_data |
| 303 | + assert "Shutting down VM after intercepting signal" not in self.log_data, ( |
| 304 | + self.log_data |
| 305 | + ) |
305 | 306 |
|
306 | 307 | try:
|
307 | 308 | if self.firecracker_pid:
|
@@ -330,9 +331,9 @@ def kill(self):
|
330 | 331 | f"ps aux | grep {self.jailer.jailer_id}"
|
331 | 332 | )
|
332 | 333 | # make sure firecracker was killed
|
333 |
| - assert ( |
334 |
| - stderr == "" and "firecracker" not in stdout |
335 |
| - ), f"Firecracker reported its pid {self.firecracker_pid}, which was killed, but there still exist processes using the supposedly dead Firecracker's jailer_id: {stdout}" |
| 334 | + assert stderr == "" and "firecracker" not in stdout, ( |
| 335 | + f"Firecracker reported its pid {self.firecracker_pid}, which was killed, but there still exist processes using the supposedly dead Firecracker's jailer_id: {stdout}" |
| 336 | + ) |
336 | 337 |
|
337 | 338 | # Mark the microVM as not spawned, so we avoid trying to kill twice.
|
338 | 339 | self._spawned = False
|
@@ -391,9 +392,9 @@ def _validate_api_response_times(self):
|
391 | 392 | if current_call.url != "/snapshot/create":
|
392 | 393 | exec_time = float(match.group("execution_time")) / 1000.0
|
393 | 394 |
|
394 |
| - assert ( |
395 |
| - exec_time <= MAX_API_CALL_DURATION_MS |
396 |
| - ), f"{current_call.method} {current_call.url} API call exceeded maximum duration: {exec_time} ms. Body: {current_call.body}" |
| 395 | + assert exec_time <= MAX_API_CALL_DURATION_MS, ( |
| 396 | + f"{current_call.method} {current_call.url} API call exceeded maximum duration: {exec_time} ms. Body: {current_call.body}" |
| 397 | + ) |
397 | 398 |
|
398 | 399 | current_call = None
|
399 | 400 |
|
@@ -560,18 +561,18 @@ def pin_threads(self, first_cpu):
|
560 | 561 | Return next "free" cpu core.
|
561 | 562 | """
|
562 | 563 | for vcpu, pcpu in enumerate(range(first_cpu, first_cpu + self.vcpus_count)):
|
563 |
| - assert self.pin_vcpu( |
564 |
| - vcpu, pcpu |
565 |
| - ), f"Failed to pin fc_vcpu {vcpu} thread to core {pcpu}." |
| 564 | + assert self.pin_vcpu(vcpu, pcpu), ( |
| 565 | + f"Failed to pin fc_vcpu {vcpu} thread to core {pcpu}." |
| 566 | + ) |
566 | 567 | # The cores first_cpu,...,first_cpu + self.vcpus_count - 1 are assigned to the individual vCPU threads,
|
567 | 568 | # So the remaining two threads (VMM and API) get first_cpu + self.vcpus_count
|
568 | 569 | # and first_cpu + self.vcpus_count + 1
|
569 |
| - assert self.pin_vmm( |
570 |
| - first_cpu + self.vcpus_count |
571 |
| - ), "Failed to pin firecracker thread." |
572 |
| - assert self.pin_api( |
573 |
| - first_cpu + self.vcpus_count + 1 |
574 |
| - ), "Failed to pin fc_api thread." |
| 570 | + assert self.pin_vmm(first_cpu + self.vcpus_count), ( |
| 571 | + "Failed to pin firecracker thread." |
| 572 | + ) |
| 573 | + assert self.pin_api(first_cpu + self.vcpus_count + 1), ( |
| 574 | + "Failed to pin fc_api thread." |
| 575 | + ) |
575 | 576 |
|
576 | 577 | return first_cpu + self.vcpus_count + 2
|
577 | 578 |
|
@@ -683,9 +684,9 @@ def _wait_create(self):
|
683 | 684 | @retry(wait=wait_fixed(0.2), stop=stop_after_attempt(5), reraise=True)
|
684 | 685 | def check_log_message(self, message):
|
685 | 686 | """Wait until `message` appears in logging output."""
|
686 |
| - assert ( |
687 |
| - message in self.log_data |
688 |
| - ), f'Message ("{message}") not found in log data ("{self.log_data}").' |
| 687 | + assert message in self.log_data, ( |
| 688 | + f'Message ("{message}") not found in log data ("{self.log_data}").' |
| 689 | + ) |
689 | 690 |
|
690 | 691 | @retry(wait=wait_fixed(0.2), stop=stop_after_attempt(5), reraise=True)
|
691 | 692 | def get_exit_code(self):
|
@@ -1115,13 +1116,33 @@ def build_from_snapshot(self, snapshot: Snapshot):
|
1115 | 1116 | vm.restore_from_snapshot(snapshot, resume=True)
|
1116 | 1117 | return vm
|
1117 | 1118 |
|
| 1119 | + def unmount(self, path: str): |
| 1120 | + try: |
| 1121 | + subprocess.run(["umount", path], check=True) |
| 1122 | + except subprocess.CalledProcessError: |
| 1123 | + print(f"Failed to unmount {path}") |
| 1124 | + |
| 1125 | + def get_mounts_at_path(self, path: str) -> list: |
| 1126 | + try: |
| 1127 | + with open("/proc/mounts", "r") as f: |
| 1128 | + return [ |
| 1129 | + line.split()[1] |
| 1130 | + for line in f |
| 1131 | + if line.split()[1].startswith(os.path.abspath(path)) |
| 1132 | + ] |
| 1133 | + except FileNotFoundError: |
| 1134 | + return False # /proc/mounts may not exist on some systems |
| 1135 | + |
1118 | 1136 | def kill(self):
|
1119 | 1137 | """Clean up all built VMs"""
|
1120 | 1138 | for vm in self.vms:
|
1121 | 1139 | vm.kill()
|
1122 | 1140 | vm.jailer.cleanup()
|
1123 | 1141 | chroot_base_with_id = vm.jailer.chroot_base_with_id()
|
1124 | 1142 | if len(vm.jailer.jailer_id) > 0 and chroot_base_with_id.exists():
|
| 1143 | + mounts = self.get_mounts_at_path(chroot_base_with_id) |
| 1144 | + if mounts: |
| 1145 | + [self.unmount(mounted_path) for mounted_path in mounts] |
1125 | 1146 | shutil.rmtree(chroot_base_with_id)
|
1126 | 1147 | vm.netns.cleanup()
|
1127 | 1148 |
|
|
0 commit comments