diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index e638ac7874..382ee5617f 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -8,6 +8,32 @@ jobs: name: "🧪 Unit Tests" runs-on: ubuntu-latest steps: + - name: 🔧 Disable IPv6 + run: | + sudo sysctl -w net.ipv6.conf.all.disable_ipv6=1 + sudo sysctl -w net.ipv6.conf.default.disable_ipv6=1 + sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=1 + + - name: 🔧 Configure docker address pool + run: | + CONFIG='{ + "default-address-pools" : [ + { + "base" : "172.17.0.0/12", + "size" : 20 + }, + { + "base" : "192.168.0.0/16", + "size" : 24 + } + ] + }' + mkdir -p /etc/docker + echo "$CONFIG" | sudo tee /etc/docker/daemon.json + + - name: 🔧 Restart docker daemon + run: sudo systemctl restart docker + - name: ⬇️ Checkout repo uses: actions/checkout@v4 with: diff --git a/apps/webapp/tsconfig.check.json b/apps/webapp/tsconfig.check.json index 8839d20eb4..091b4ddb36 100644 --- a/apps/webapp/tsconfig.check.json +++ b/apps/webapp/tsconfig.check.json @@ -1,6 +1,8 @@ { "extends": "./tsconfig.json", "compilerOptions": { + "lib": ["DOM", "DOM.Iterable", "DOM.AsyncIterable", "ES2022"], + "target": "ES2022", "noEmit": true, "paths": { "~/*": ["./app/*"], diff --git a/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts b/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts index 7381a6d908..7bc5122f7e 100644 --- a/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts +++ b/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts @@ -155,7 +155,7 @@ describe("RunEngine attempt failures", () => { expect(executionData4.run.attemptNumber).toBe(2); expect(executionData4.run.status).toBe("COMPLETED_SUCCESSFULLY"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -266,7 +266,7 @@ describe("RunEngine attempt failures", () => { expect(executionData3.run.attemptNumber).toBe(1); expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -375,7 +375,7 @@ describe("RunEngine attempt failures", () => { expect(executionData3.run.attemptNumber).toBe(1); expect(executionData3.run.status).toBe("CRASHED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -482,7 +482,7 @@ describe("RunEngine attempt failures", () => { expect(executionData.run.attemptNumber).toBe(1); expect(executionData.run.status).toBe("CRASHED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -639,7 +639,7 @@ describe("RunEngine attempt failures", () => { expect(executionData4.run.attemptNumber).toBe(2); expect(executionData4.run.status).toBe("COMPLETED_SUCCESSFULLY"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -803,7 +803,7 @@ describe("RunEngine attempt failures", () => { expect(finalExecutionData.run.attemptNumber).toBe(2); expect(finalExecutionData.run.status).toBe("CRASHED"); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index bcaa3a59f6..7b6626bcf9 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -177,7 +177,7 @@ describe("RunEngine batchTrigger", () => { }); expect(batchAfter2?.status).toBe("COMPLETED"); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts index 36deab4698..58ea7244ab 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -352,7 +352,7 @@ describe("RunEngine batchTriggerAndWait", () => { }); expect(batchAfter?.status === "COMPLETED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -570,7 +570,7 @@ describe("RunEngine batchTriggerAndWait", () => { ); expect(parentAfterTriggerAndWait.batch).toBeUndefined(); } finally { - engine.quit(); + await engine.quit(); } } ); diff --git a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts index d4792d0b0c..91702faba7 100644 --- a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts +++ b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts @@ -220,7 +220,7 @@ describe("RunEngine cancelling", () => { ); expect(envConcurrencyCompleted).toBe(0); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -321,7 +321,7 @@ describe("RunEngine cancelling", () => { ); expect(envConcurrencyCompleted).toBe(0); } finally { - engine.quit(); + await engine.quit(); } }); diff --git a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts index 88d116b0b2..d9fcd5da8c 100644 --- a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts @@ -1375,7 +1375,7 @@ describe("RunEngine checkpoints", () => { }); expect(batchAfter?.status === "COMPLETED"); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/delays.test.ts b/internal-packages/run-engine/src/engine/tests/delays.test.ts index 7b48859b55..cf131f55ad 100644 --- a/internal-packages/run-engine/src/engine/tests/delays.test.ts +++ b/internal-packages/run-engine/src/engine/tests/delays.test.ts @@ -86,7 +86,7 @@ describe("RunEngine delays", () => { assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -183,7 +183,7 @@ describe("RunEngine delays", () => { assertNonNullable(executionData3); expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -287,7 +287,7 @@ describe("RunEngine delays", () => { expect(run3.status).toBe("EXPIRED"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -398,7 +398,7 @@ describe("RunEngine delays", () => { expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); expect(executionData4.run.status).toBe("CANCELED"); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts index 6d2f79053f..c0d269017f 100644 --- a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts +++ b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts @@ -77,7 +77,7 @@ describe("RunEngine dequeuing", () => { expect(dequeued.length).toBe(5); } finally { - engine.quit(); + await engine.quit(); } }); @@ -169,7 +169,7 @@ describe("RunEngine dequeuing", () => { const queueLength3 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); expect(queueLength3).toBe(12); } finally { - engine.quit(); + await engine.quit(); } } ); diff --git a/internal-packages/run-engine/src/engine/tests/pendingVersion.test.ts b/internal-packages/run-engine/src/engine/tests/pendingVersion.test.ts index 2164920cfc..c3fa33eba1 100644 --- a/internal-packages/run-engine/src/engine/tests/pendingVersion.test.ts +++ b/internal-packages/run-engine/src/engine/tests/pendingVersion.test.ts @@ -158,7 +158,7 @@ describe("RunEngine pending version", () => { ); expect(queueLength2).toBe(2); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -319,7 +319,7 @@ describe("RunEngine pending version", () => { ); expect(queueLength3).toBe(1); } finally { - engine.quit(); + await engine.quit(); } } ); diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts index c5bb40788e..6f31f9df7d 100644 --- a/internal-packages/run-engine/src/engine/tests/priority.test.ts +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -103,7 +103,7 @@ describe("RunEngine priority", () => { expect(dequeue2.length).toBe(1); expect(dequeue2[0].run.friendlyId).toBe(runs[2].friendlyId); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -197,7 +197,7 @@ describe("RunEngine priority", () => { expect(dequeue[3].run.friendlyId).toBe(runs[4].friendlyId); expect(dequeue[4].run.friendlyId).toBe(runs[0].friendlyId); } finally { - engine.quit(); + await engine.quit(); } } ); diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts index 2736b27b0a..2716cf3df1 100644 --- a/internal-packages/run-engine/src/engine/tests/trigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -198,7 +198,7 @@ describe("RunEngine trigger()", () => { expect(runWaitpointAfter[0].type).toBe("RUN"); expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); } finally { - engine.quit(); + await engine.quit(); } }); @@ -325,7 +325,7 @@ describe("RunEngine trigger()", () => { expect(output.type).toBe(error.type); expect(runWaitpointAfter[0].outputIsError).toBe(true); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 911b141622..fe806168bb 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -189,7 +189,7 @@ describe("RunEngine triggerAndWait", () => { ); expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); } finally { - engine.quit(); + await engine.quit(); } }); @@ -445,7 +445,7 @@ describe("RunEngine triggerAndWait", () => { ); expect(parent2ExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); } finally { - engine.quit(); + await engine.quit(); } } ); diff --git a/internal-packages/run-engine/src/engine/tests/ttl.test.ts b/internal-packages/run-engine/src/engine/tests/ttl.test.ts index 2643f5cae7..0ede60fbfd 100644 --- a/internal-packages/run-engine/src/engine/tests/ttl.test.ts +++ b/internal-packages/run-engine/src/engine/tests/ttl.test.ts @@ -102,7 +102,7 @@ describe("RunEngine ttl", () => { ); expect(envConcurrencyCompleted).toBe(0); } finally { - engine.quit(); + await engine.quit(); } }); }); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 3a8446edea..3e4ae20afa 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -121,7 +121,7 @@ describe("RunEngine Waitpoints", () => { const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); } finally { - engine.quit(); + await engine.quit(); } }); @@ -261,7 +261,7 @@ describe("RunEngine Waitpoints", () => { }); expect(runWaitpoint).toBeNull(); } finally { - engine.quit(); + await engine.quit(); } }); @@ -400,7 +400,7 @@ describe("RunEngine Waitpoints", () => { }); expect(runWaitpoint).toBeNull(); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -516,7 +516,7 @@ describe("RunEngine Waitpoints", () => { }); expect(runWaitpoint).toBeNull(); } finally { - engine.quit(); + await engine.quit(); } }); @@ -664,7 +664,7 @@ describe("RunEngine Waitpoints", () => { expect(runWaitpoints.length).toBe(0); } } finally { - engine.quit(); + await engine.quit(); } } ); @@ -814,7 +814,7 @@ describe("RunEngine Waitpoints", () => { const isTimeout = isWaitpointOutputTimeout(waitpoint2.output); expect(isTimeout).toBe(true); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -966,7 +966,7 @@ describe("RunEngine Waitpoints", () => { expect(waitpoint2.status).toBe("COMPLETED"); expect(waitpoint2.outputIsError).toBe(false); } finally { - engine.quit(); + await engine.quit(); } }); @@ -1126,7 +1126,7 @@ describe("RunEngine Waitpoints", () => { expect(waitpoint2.status).toBe("COMPLETED"); expect(waitpoint2.outputIsError).toBe(false); } finally { - engine.quit(); + await engine.quit(); } }); diff --git a/internal-packages/testcontainers/package.json b/internal-packages/testcontainers/package.json index 33a4e43870..ab41c7c4a3 100644 --- a/internal-packages/testcontainers/package.json +++ b/internal-packages/testcontainers/package.json @@ -10,10 +10,11 @@ "ioredis": "^5.3.2" }, "devDependencies": { - "@testcontainers/postgresql": "^10.13.1", - "@testcontainers/redis": "^10.13.1", + "@testcontainers/postgresql": "^10.25.0", + "@testcontainers/redis": "^10.25.0", "@trigger.dev/core": "workspace:*", - "testcontainers": "^10.13.1", + "std-env": "^3.9.0", + "testcontainers": "^10.25.0", "tinyexec": "^0.3.0", "vitest": "^1.4.0" }, diff --git a/internal-packages/testcontainers/src/docker.ts b/internal-packages/testcontainers/src/docker.ts new file mode 100644 index 0000000000..45cacb98aa --- /dev/null +++ b/internal-packages/testcontainers/src/docker.ts @@ -0,0 +1,160 @@ +import { x } from "tinyexec"; + +function stringToLines(str: string): string[] { + return str.split("\n").filter(Boolean); +} + +function lineToWords(line: string): string[] { + return line.trim().split(/\s+/); +} + +async function getDockerNetworks(): Promise { + try { + const result = await x("docker", ["network", "ls" /* , "--no-trunc" */]); + return stringToLines(result.stdout); + } catch (error) { + console.error(error); + return ["error: check additional logs for more details"]; + } +} + +async function getDockerContainers(): Promise { + try { + const result = await x("docker", ["ps", "-a" /* , "--no-trunc" */]); + return stringToLines(result.stdout); + } catch (error) { + console.error(error); + return ["error: check additional logs for more details"]; + } +} + +type DockerResource = { id: string; name: string }; + +type DockerNetworkAttachment = DockerResource & { + containers: string[]; +}; + +export async function getDockerNetworkAttachments(): Promise { + let attachments: DockerNetworkAttachment[] = []; + let networks: DockerResource[] = []; + + try { + const result = await x("docker", [ + "network", + "ls", + "--format", + '{{.ID | printf "%.12s"}} {{.Name}}', + ]); + + const lines = stringToLines(result.stdout); + + for (const line of lines) { + const [id, name] = lineToWords(line); + + if (!id || !name) { + continue; + } + + networks.push({ id, name }); + } + } catch (err) { + console.error("Failed to list docker networks:", err); + } + + for (const { id, name } of networks) { + try { + // Get containers, one per line: id name\n + const containersResult = await x("docker", [ + "network", + "inspect", + "--format", + '{{range $k, $v := .Containers}}{{$k | printf "%.12s"}} {{$v.Name}}\n{{end}}', + id, + ]); + + const containers = stringToLines(containersResult.stdout); + + attachments.push({ id, name, containers }); + } catch (err) { + console.error(`Failed to inspect network ${id}:`, err); + attachments.push({ id, name, containers: [] }); + } + } + + return attachments; +} + +type DockerContainerNetwork = DockerResource & { + networks: string[]; +}; + +export async function getDockerContainerNetworks(): Promise { + let results: DockerContainerNetwork[] = []; + let containers: DockerResource[] = []; + + try { + const result = await x("docker", [ + "ps", + "-a", + "--format", + '{{.ID | printf "%.12s"}} {{.Names}}', + ]); + + const lines = stringToLines(result.stdout); + + for (const line of lines) { + const [id, name] = lineToWords(line); + + if (!id || !name) { + continue; + } + + containers.push({ id, name }); + } + } catch (err) { + console.error("Failed to list docker containers:", err); + } + + for (const { id, name } of containers) { + try { + const inspectResult = await x("docker", [ + "inspect", + "--format", + '{{ range $k, $v := .NetworkSettings.Networks }}{{ $k | printf "%.12s" }} {{ $v.Name }}\n{{ end }}', + id, + ]); + + const networks = stringToLines(inspectResult.stdout); + + results.push({ id, name, networks }); + } catch (err) { + console.error(`Failed to inspect container ${id}:`, err); + results.push({ id, name: String(err), networks: [] }); + } + } + + return results; +} + +export type DockerDiagnostics = { + containers?: string[]; + networks?: string[]; + containerNetworks?: DockerContainerNetwork[]; + networkAttachments?: DockerNetworkAttachment[]; +}; + +export async function getDockerDiagnostics(): Promise { + const [containers, networks, networkAttachments, containerNetworks] = await Promise.all([ + getDockerContainers(), + getDockerNetworks(), + getDockerNetworkAttachments(), + getDockerContainerNetworks(), + ]); + + return { + containers, + networks, + containerNetworks, + networkAttachments, + }; +} diff --git a/internal-packages/testcontainers/src/index.ts b/internal-packages/testcontainers/src/index.ts index ae5dcc76d6..ef36de754c 100644 --- a/internal-packages/testcontainers/src/index.ts +++ b/internal-packages/testcontainers/src/index.ts @@ -3,8 +3,15 @@ import { StartedRedisContainer } from "@testcontainers/redis"; import { PrismaClient } from "@trigger.dev/database"; import { RedisOptions } from "ioredis"; import { Network, type StartedNetwork } from "testcontainers"; -import { test } from "vitest"; -import { createElectricContainer, createPostgresContainer, createRedisContainer } from "./utils"; +import { TaskContext, test } from "vitest"; +import { + createElectricContainer, + createPostgresContainer, + createRedisContainer, + useContainer, + withContainerSetup, +} from "./utils"; +import { getTaskMetadata, logCleanup, logSetup } from "./logs"; export { assertNonNullable } from "./utils"; export { StartedRedisContainer }; @@ -31,38 +38,50 @@ type ContainerWithElectricContext = NetworkContext & PostgresContext & ElectricC type Use = (value: T) => Promise; -const network = async ({}, use: Use) => { +const network = async ({ task }: TaskContext, use: Use) => { + const testName = task.name; + + logSetup("network: starting", { testName }); + + const start = Date.now(); const network = await new Network().start(); + const startDurationMs = Date.now() - start; + + const metadata = { + ...getTaskMetadata(task), + networkId: network.getId().slice(0, 12), + networkName: network.getName(), + startDurationMs, + }; + + logSetup("network: started", metadata); + try { await use(network); } finally { - try { - await network.stop(); - } catch (error) { - console.warn("Network stop error (ignored):", error); - } // Make sure to stop the network after use + await logCleanup("network", network.stop(), metadata); } }; const postgresContainer = async ( - { network }: { network: StartedNetwork }, + { network, task }: { network: StartedNetwork } & TaskContext, use: Use ) => { - const { container } = await createPostgresContainer(network); - try { - await use(container); - } finally { - // WARNING: Testcontainers by default will not wait until the container has stopped. It will simply issue the stop command and return immediately. - // If you need to wait for the container to be stopped, you can provide a timeout. The unit of timeout option here is second - await container.stop({ timeout: 10 }); - } + const { container, metadata } = await withContainerSetup({ + name: "postgresContainer", + task, + setup: createPostgresContainer(network), + }); + + await useContainer("postgresContainer", { container, task, use: () => use(container) }); }; const prisma = async ( - { postgresContainer }: { postgresContainer: StartedPostgreSqlContainer }, + { postgresContainer, task }: { postgresContainer: StartedPostgreSqlContainer } & TaskContext, use: Use ) => { + const testName = task.name; const url = postgresContainer.getConnectionUri(); console.log("Initializing Prisma with URL:", url); @@ -77,27 +96,26 @@ const prisma = async ( try { await use(prisma); } finally { - await prisma.$disconnect(); + await logCleanup("prisma", prisma.$disconnect(), { testName }); } }; export const postgresTest = test.extend({ network, postgresContainer, prisma }); const redisContainer = async ( - { network }: { network: StartedNetwork }, + { network, task }: { network: StartedNetwork } & TaskContext, use: Use ) => { - const { container } = await createRedisContainer({ - port: 6379, - network, + const { container, metadata } = await withContainerSetup({ + name: "redisContainer", + task, + setup: createRedisContainer({ + port: 6379, + network, + }), }); - try { - await use(container); - } finally { - // WARNING: Testcontainers by default will not wait until the container has stopped. It will simply issue the stop command and return immediately. - // If you need to wait for the container to be stopped, you can provide a timeout. The unit of timeout option here is second - await container.stop({ timeout: 10 }); - } + + await useContainer("redisContainer", { container, task, use: () => use(container) }); }; const redisOptions = async ( @@ -139,17 +157,17 @@ const electricOrigin = async ( { postgresContainer, network, - }: { postgresContainer: StartedPostgreSqlContainer; network: StartedNetwork }, + task, + }: { postgresContainer: StartedPostgreSqlContainer; network: StartedNetwork } & TaskContext, use: Use ) => { - const { origin, container } = await createElectricContainer(postgresContainer, network); - try { - await use(origin); - } finally { - // WARNING: Testcontainers by default will not wait until the container has stopped. It will simply issue the stop command and return immediately. - // If you need to wait for the container to be stopped, you can provide a timeout. The unit of timeout option here is second - await container.stop({ timeout: 10 }); - } + const { origin, container, metadata } = await withContainerSetup({ + name: "electricContainer", + task, + setup: createElectricContainer(postgresContainer, network), + }); + + await useContainer("electricContainer", { container, task, use: () => use(origin) }); }; export const containerTest = test.extend({ diff --git a/internal-packages/testcontainers/src/logs.ts b/internal-packages/testcontainers/src/logs.ts new file mode 100644 index 0000000000..1a844c3df9 --- /dev/null +++ b/internal-packages/testcontainers/src/logs.ts @@ -0,0 +1,101 @@ +import { env, isCI } from "std-env"; +import { TaskContext } from "vitest"; +import { DockerDiagnostics, getDockerDiagnostics } from "./docker"; +import { StartedTestContainer } from "testcontainers"; + +let setupOrder = 0; + +export function logSetup(resource: string, metadata: Record) { + const order = setupOrder++; + + if (!isCI) { + return; + } + + console.log( + JSON.stringify({ + type: "setup", + order, + resource, + timestamp: new Date().toISOString(), + ...metadata, + }) + ); +} + +export function getContainerMetadata(container: StartedTestContainer) { + return { + containerName: container.getName(), + containerId: container.getId().slice(0, 12), + containerNetworkNames: container.getNetworkNames(), + }; +} + +export function getTaskMetadata(task: TaskContext["task"]) { + return { + testName: task.name, + }; +} + +let cleanupOrder = 0; +let activeCleanups = 0; + +/** + * Logs the cleanup of a resource. + * @param resource - The resource that is being cleaned up. + * @param promise - The cleanup promise to await.. + */ +export async function logCleanup( + resource: string, + promise: Promise, + metadata: Record = {} +) { + const start = new Date(); + const order = cleanupOrder++; + const activeAtStart = ++activeCleanups; + + let error: unknown = null; + + try { + await promise; + } catch (err) { + error = err instanceof Error ? err.message : String(err); + } + + const end = new Date(); + const durationMs = end.getTime() - start.getTime(); + const activeAtEnd = --activeCleanups; + const parallel = activeAtStart > 1 || activeAtEnd > 0; + + if (!isCI) { + return; + } + + let dockerDiagnostics: DockerDiagnostics = {}; + + // Only run docker diagnostics if there was an error or cleanup took longer than 5s + if (error || durationMs > 5000 || env.DOCKER_DIAGNOSTICS) { + try { + dockerDiagnostics = await getDockerDiagnostics(); + } catch (diagnosticErr) { + console.error("Failed to get docker diagnostics:", diagnosticErr); + } + } + + console.log( + JSON.stringify({ + type: "cleanup", + order, + resource, + durationMs, + start: start.toISOString(), + end: end.toISOString(), + parallel, + error, + activeAtStart, + activeAtEnd, + ...metadata, + ...dockerDiagnostics, + }) + ); +} diff --git a/internal-packages/testcontainers/src/utils.ts b/internal-packages/testcontainers/src/utils.ts index b770c7fcee..dec2093539 100644 --- a/internal-packages/testcontainers/src/utils.ts +++ b/internal-packages/testcontainers/src/utils.ts @@ -1,10 +1,14 @@ import { PostgreSqlContainer, StartedPostgreSqlContainer } from "@testcontainers/postgresql"; import { RedisContainer, StartedRedisContainer } from "@testcontainers/redis"; +import { tryCatch } from "@trigger.dev/core"; import Redis from "ioredis"; import path from "path"; -import { GenericContainer, StartedNetwork, Wait } from "testcontainers"; +import { isDebug } from "std-env"; +import { GenericContainer, StartedNetwork, StartedTestContainer, Wait } from "testcontainers"; import { x } from "tinyexec"; -import { expect } from "vitest"; +import { expect, TaskContext } from "vitest"; +import { getContainerMetadata, getTaskMetadata, logCleanup } from "./logs"; +import { logSetup } from "./logs"; export async function createPostgresContainer(network: StartedNetwork) { const container = await new PostgreSqlContainer("docker.io/postgres:14") @@ -67,7 +71,12 @@ export async function createRedisContainer({ .start(); // Add a verification step - await verifyRedisConnection(startedContainer); + const [error] = await tryCatch(verifyRedisConnection(startedContainer)); + + if (error) { + await startedContainer.stop({ timeout: 30 }); + throw new Error("verifyRedisConnection error", { cause: error }); + } return { container: startedContainer, @@ -87,12 +96,28 @@ async function verifyRedisConnection(container: StartedRedisContainer) { }, }); + const containerMetadata = { + containerId: container.getId().slice(0, 12), + containerName: container.getName(), + containerNetworkNames: container.getNetworkNames(), + }; + redis.on("error", (error) => { - // swallow the error + if (isDebug) { + console.log("verifyRedisConnection: client error", error, containerMetadata); + } + + // Don't throw here, we'll do that below if the ping fails }); try { await redis.ping(); + } catch (error) { + if (isDebug) { + console.log("verifyRedisConnection: ping error", error, containerMetadata); + } + + throw new Error("verifyRedisConnection: ping error", { cause: error }); } finally { await redis.quit(); } @@ -126,3 +151,56 @@ export function assertNonNullable(value: T): asserts value is NonNullable expect(value).toBeDefined(); expect(value).not.toBeNull(); } + +export async function withContainerSetup({ + name, + task, + setup, +}: { + name: string; + task: TaskContext["task"]; + setup: Promise; +}): Promise }> { + const testName = task.name; + logSetup(`${name}: starting`, { testName }); + + const start = Date.now(); + const result = await setup; + const startDurationMs = Date.now() - start; + + const metadata = { + ...getTaskMetadata(task), + ...getContainerMetadata(result.container), + startDurationMs, + }; + + logSetup(`${name}: started`, metadata); + + return { ...result, metadata }; +} + +export async function useContainer( + name: string, + { + container, + task, + use, + }: { container: TContainer; task: TaskContext["task"]; use: () => Promise } +) { + const metadata = { + ...getTaskMetadata(task), + ...getContainerMetadata(container), + useDurationMs: 0, + }; + + try { + const start = Date.now(); + await use(); + const useDurationMs = Date.now() - start; + metadata.useDurationMs = useDurationMs; + } finally { + // WARNING: Testcontainers by default will not wait until the container has stopped. It will simply issue the stop command and return immediately. + // If you need to wait for the container to be stopped, you can provide a timeout. The unit of timeout option here is second + await logCleanup(name, container.stop({ timeout: 10 }), metadata); + } +} diff --git a/internal-packages/testcontainers/tsconfig.json b/internal-packages/testcontainers/tsconfig.json index e5cea6ed2d..15d4754cb6 100644 --- a/internal-packages/testcontainers/tsconfig.json +++ b/internal-packages/testcontainers/tsconfig.json @@ -13,6 +13,7 @@ "skipLibCheck": true, "noEmit": true, "strict": true, + "noUncheckedIndexedAccess": true, "paths": { "@trigger.dev/core": ["../../packages/core/src/index"], "@trigger.dev/core/*": ["../../packages/core/src/*"], diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4705459353..1a42f0a2b9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1006,17 +1006,20 @@ importers: version: 5.3.2 devDependencies: '@testcontainers/postgresql': - specifier: ^10.13.1 - version: 10.13.1 + specifier: ^10.25.0 + version: 10.25.0 '@testcontainers/redis': - specifier: ^10.13.1 - version: 10.13.1 + specifier: ^10.25.0 + version: 10.25.0 '@trigger.dev/core': specifier: workspace:* version: link:../../packages/core + std-env: + specifier: ^3.9.0 + version: 3.9.0 testcontainers: - specifier: ^10.13.1 - version: 10.13.1 + specifier: ^10.25.0 + version: 10.25.0 tinyexec: specifier: ^0.3.0 version: 0.3.0 @@ -8075,7 +8078,6 @@ packages: dependencies: '@grpc/proto-loader': 0.7.13 '@js-sdsl/ordered-map': 4.4.2 - dev: false /@grpc/grpc-js@1.8.17: resolution: {integrity: sha512-DGuSbtMFbaRsyffMf+VEkVu8HkSXEUfO3UyGJNtqxW9ABdtTIA+2UXAJpwbJS+xfQxuwqLUeELmL6FuZkOqPxw==} @@ -8093,7 +8095,6 @@ packages: long: 5.2.3 protobufjs: 7.3.2 yargs: 17.7.2 - dev: false /@grpc/proto-loader@0.7.7: resolution: {integrity: sha512-1TIeXOi8TuSCQprPItwoMymZXxWT0CPxUhkrkeCUH+D8U7QDwQ6b7SUz2MaLuWM2llT+J/TVFLmQI5KtML3BhQ==} @@ -8697,7 +8698,6 @@ packages: /@js-sdsl/ordered-map@4.4.2: resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} - dev: false /@jsep-plugin/assignment@1.3.0(jsep@1.4.0): resolution: {integrity: sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==} @@ -17796,19 +17796,21 @@ packages: zod: 3.23.8 dev: false - /@testcontainers/postgresql@10.13.1: - resolution: {integrity: sha512-HAh/3uLAzAhOmzXsOE6hVxkvetczPnX/Zoyt+SgK7QotW98Npr1MDx8OKiaLGTJ8XkIvVvS4Ch6bl+frt4pnkQ==} + /@testcontainers/postgresql@10.25.0: + resolution: {integrity: sha512-VkpqpX9YZ8aq4wfk6sJRopGTmlBdE1kErzAFWJ/1pY/XrEZ7nxdfFBG+En2icQnbv3BIFQYysEKxEFMNB+hQVw==} dependencies: - testcontainers: 10.13.1 + testcontainers: 10.25.0 transitivePeerDependencies: + - bare-buffer - supports-color dev: true - /@testcontainers/redis@10.13.1: - resolution: {integrity: sha512-pXg15o4oTRaEyb5xryQZUdePtoRId/+3TeU7vnUgDpqOmRacF8/7zL7jqs13uPh1uea6M7a8MDgHQM8j8kXZUg==} + /@testcontainers/redis@10.25.0: + resolution: {integrity: sha512-ALNrrnYnB59kV5c/EjiUkzn0roCtcnOu2KfHHF8xBi3vq3dYSqzADL8rL2BExeoFhyaEtlUT9P4ZecRB60O+/Q==} dependencies: - testcontainers: 10.13.1 + testcontainers: 10.25.0 transitivePeerDependencies: + - bare-buffer - supports-color dev: true @@ -20359,6 +20361,12 @@ packages: /bare-events@2.4.2: resolution: {integrity: sha512-qMKFd2qG/36aA4GwvKq8MxnPgCQAmBWmSyLWsJcbn8v03wvIPQ/hG1Ms8bPzndZxMDoHpxez5VOS+gC9Yi24/Q==} requiresBuild: true + dev: false + optional: true + + /bare-events@2.5.4: + resolution: {integrity: sha512-+gFfDkR8pj4/TrWCGUGWmJIkBwuxPS5F+a5yWjOHQt2hHvNZd5YLzadjmDUtFmMM4y429bnKLa8bYBMHcYdnQA==} + requiresBuild: true optional: true /bare-fs@2.3.5: @@ -20368,11 +20376,36 @@ packages: bare-events: 2.4.2 bare-path: 2.1.3 bare-stream: 2.3.0 + dev: false + optional: true + + /bare-fs@4.1.4: + resolution: {integrity: sha512-r8+26Voz8dGX3AYpJdFb1ZPaUSM8XOLCZvy+YGpRTmwPHIxA7Z3Jov/oMPtV7hfRQbOnH8qGlLTzQAbgtdNN0Q==} + engines: {bare: '>=1.16.0'} + requiresBuild: true + peerDependencies: + bare-buffer: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + dependencies: + bare-events: 2.5.4 + bare-path: 3.0.0 + bare-stream: 2.6.5(bare-events@2.5.4) + dev: true optional: true /bare-os@2.4.4: resolution: {integrity: sha512-z3UiI2yi1mK0sXeRdc4O1Kk8aOa/e+FNWZcTiPB/dfTWyLypuE99LibgRaQki914Jq//yAWylcAt+mknKdixRQ==} requiresBuild: true + dev: false + optional: true + + /bare-os@3.6.1: + resolution: {integrity: sha512-uaIjxokhFidJP+bmmvKSgiMzj2sV5GPHaZVAIktcxcpCyBFFWO+YlikVAdhmUo2vYFvFhOXIAlldqV29L8126g==} + engines: {bare: '>=1.14.0'} + requiresBuild: true + dev: true optional: true /bare-path@2.1.3: @@ -20380,6 +20413,15 @@ packages: requiresBuild: true dependencies: bare-os: 2.4.4 + dev: false + optional: true + + /bare-path@3.0.0: + resolution: {integrity: sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==} + requiresBuild: true + dependencies: + bare-os: 3.6.1 + dev: true optional: true /bare-stream@2.3.0: @@ -20388,6 +20430,24 @@ packages: dependencies: b4a: 1.6.6 streamx: 2.20.1 + dev: false + optional: true + + /bare-stream@2.6.5(bare-events@2.5.4): + resolution: {integrity: sha512-jSmxKJNJmHySi6hC42zlZnq00rga4jjxcgNZjY9N5WlOe/iOoGRtdwGsHzQv2RlH2KOYMwGUXhf2zXd32BA9RA==} + requiresBuild: true + peerDependencies: + bare-buffer: '*' + bare-events: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + bare-events: + optional: true + dependencies: + bare-events: 2.5.4 + streamx: 2.22.0 + dev: true optional: true /base64-js@1.5.1: @@ -22192,19 +22252,7 @@ packages: resolution: {integrity: sha512-plizRs/Vf15H+GCVxq2EUvyPK7ei9b/cVesHvjnX4xaXjM9spHe2Ytq0BitndFgvTJ3E3NljPNUEl7BAN43iZw==} engines: {node: '>= 6.0.0'} dependencies: - yaml: 2.3.1 - dev: true - - /docker-modem@3.0.8: - resolution: {integrity: sha512-f0ReSURdM3pcKPNS30mxOHSbaFLcknGmQjwSfmbcdOw1XWKXVhukM3NJHhr7NpY9BIyyWQb0EBo3KQvvuU5egQ==} - engines: {node: '>= 8.0'} - dependencies: - debug: 4.4.0(supports-color@10.0.0) - readable-stream: 3.6.0 - split-ca: 1.0.1 - ssh2: 1.16.0 - transitivePeerDependencies: - - supports-color + yaml: 2.7.1 dev: true /docker-modem@5.0.6: @@ -22217,21 +22265,24 @@ packages: ssh2: 1.16.0 transitivePeerDependencies: - supports-color - dev: false - /dockerode@3.3.5: - resolution: {integrity: sha512-/0YNa3ZDNeLr/tSckmD69+Gq+qVNhvKfAHNeZJBnp7EOP6RGKV8ORrJHkUn20So5wU+xxT7+1n5u8PjHbfjbSA==} + /dockerode@4.0.4: + resolution: {integrity: sha512-6GYP/EdzEY50HaOxTVTJ2p+mB5xDHTMJhS+UoGrVyS6VC+iQRh7kZ4FRpUYq6nziby7hPqWhOrFFUFTMUZJJ5w==} engines: {node: '>= 8.0'} dependencies: '@balena/dockerignore': 1.0.2 - docker-modem: 3.0.8 + '@grpc/grpc-js': 1.12.6 + '@grpc/proto-loader': 0.7.13 + docker-modem: 5.0.6 + protobufjs: 7.3.2 tar-fs: 2.0.1 + uuid: 10.0.0 transitivePeerDependencies: - supports-color - dev: true + dev: false - /dockerode@4.0.4: - resolution: {integrity: sha512-6GYP/EdzEY50HaOxTVTJ2p+mB5xDHTMJhS+UoGrVyS6VC+iQRh7kZ4FRpUYq6nziby7hPqWhOrFFUFTMUZJJ5w==} + /dockerode@4.0.6: + resolution: {integrity: sha512-FbVf3Z8fY/kALB9s+P9epCpWhfi/r0N2DgYYcYpsAUlaTxPjdsitsFobnltb+lyCgAIvf9C+4PSWlTnHlJMf1w==} engines: {node: '>= 8.0'} dependencies: '@balena/dockerignore': 1.0.2 @@ -22239,11 +22290,11 @@ packages: '@grpc/proto-loader': 0.7.13 docker-modem: 5.0.6 protobufjs: 7.3.2 - tar-fs: 2.0.1 + tar-fs: 2.1.2 uuid: 10.0.0 transitivePeerDependencies: - supports-color - dev: false + dev: true /doctrine@2.1.0: resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==} @@ -24603,6 +24654,11 @@ packages: resolution: {integrity: sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==} engines: {node: '>=8'} + /get-port@7.1.0: + resolution: {integrity: sha512-QB9NKEeDg3xxVwCCwJQ9+xycaz6pBB6iQ76wiWMl1927n0Kir6alPiP+yuiICLLU4jpMe08dXfpebuQppFA2zw==} + engines: {node: '>=16'} + dev: true + /get-proto@1.0.1: resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} engines: {node: '>= 0.4'} @@ -32570,6 +32626,10 @@ packages: /std-env@3.8.1: resolution: {integrity: sha512-vj5lIj3Mwf9D79hBkltk5qmkFI+biIKWS2IBxEyEU3AX1tUf7AoL8nSazCOiiqQsGKIq01SClsKEzweu34uwvA==} + /std-env@3.9.0: + resolution: {integrity: sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==} + dev: true + /stoppable@1.1.0: resolution: {integrity: sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==} engines: {node: '>=4', npm: '>=6'} @@ -32605,7 +32665,18 @@ packages: queue-tick: 1.0.1 text-decoder: 1.2.0 optionalDependencies: - bare-events: 2.4.2 + bare-events: 2.5.4 + + /streamx@2.22.0: + resolution: {integrity: sha512-sLh1evHOzBy/iWRiR6d1zRcLao4gGZr3C1kzNz4fopCOKJb6xD9ub8Mpi9Mr1R6id5o43S+d93fI48UC5uM9aw==} + requiresBuild: true + dependencies: + fast-fifo: 1.3.2 + text-decoder: 1.2.0 + optionalDependencies: + bare-events: 2.5.4 + dev: true + optional: true /strict-event-emitter@0.5.1: resolution: {integrity: sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==} @@ -33203,6 +33274,7 @@ packages: mkdirp-classic: 0.5.3 pump: 3.0.0 tar-stream: 2.2.0 + dev: false /tar-fs@2.1.1: resolution: {integrity: sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==} @@ -33213,6 +33285,15 @@ packages: tar-stream: 2.2.0 dev: true + /tar-fs@2.1.2: + resolution: {integrity: sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==} + dependencies: + chownr: 1.1.4 + mkdirp-classic: 0.5.3 + pump: 3.0.0 + tar-stream: 2.2.0 + dev: true + /tar-fs@3.0.6: resolution: {integrity: sha512-iokBDQQkUyeXhgPYaZxmczGPhnhXZ0CmrqI+MOb/WFGS9DW5wnfrLgtjUJBvz50vQ3qfRwJ62QVoCFu8mPVu5w==} dependencies: @@ -33221,6 +33302,19 @@ packages: optionalDependencies: bare-fs: 2.3.5 bare-path: 2.1.3 + dev: false + + /tar-fs@3.0.8: + resolution: {integrity: sha512-ZoROL70jptorGAlgAYiLoBLItEKw/fUxg9BSYK/dF/GAGYFJOJJJMvjPAKDJraCXFwadD456FCuvLWgfhMsPwg==} + dependencies: + pump: 3.0.0 + tar-stream: 3.1.7 + optionalDependencies: + bare-fs: 4.1.4 + bare-path: 3.0.0 + transitivePeerDependencies: + - bare-buffer + dev: true /tar-stream@2.2.0: resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==} @@ -33380,25 +33474,26 @@ packages: minimatch: 9.0.5 dev: true - /testcontainers@10.13.1: - resolution: {integrity: sha512-JBbOhxmygj/ouH/47GnoVNt+c55Telh/45IjVxEbDoswsLchVmJiuKiw/eF6lE5i7LN+/99xsrSCttI3YRtirg==} + /testcontainers@10.25.0: + resolution: {integrity: sha512-X3x6cjorEMgei1vVx3M7dnTMzWoWOTi4krpUf3C2iOvOcwsaMUHbca9J4yzpN65ieiWhcK2dA5dxpZyUonwC2Q==} dependencies: '@balena/dockerignore': 1.0.2 '@types/dockerode': 3.3.35 archiver: 7.0.1 async-lock: 1.4.1 byline: 5.0.0 - debug: 4.3.7(supports-color@10.0.0) + debug: 4.4.0(supports-color@10.0.0) docker-compose: 0.24.8 - dockerode: 3.3.5 - get-port: 5.1.1 + dockerode: 4.0.6 + get-port: 7.1.0 proper-lockfile: 4.1.2 properties-reader: 2.3.0 ssh-remote-port-forward: 1.0.4 - tar-fs: 3.0.6 + tar-fs: 3.0.8 tmp: 0.2.3 - undici: 5.28.4 + undici: 5.29.0 transitivePeerDependencies: + - bare-buffer - supports-color dev: true @@ -34341,6 +34436,14 @@ packages: engines: {node: '>=14.0'} dependencies: '@fastify/busboy': 2.0.0 + dev: false + + /undici@5.29.0: + resolution: {integrity: sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.0.0 + dev: true /unenv-nightly@1.10.0-1717606461.a117952: resolution: {integrity: sha512-u3TfBX02WzbHTpaEfWEKwDijDSFAHcgXkayUZ+MVDrjhLFvgAJzFGTSTmwlEhwWi2exyRQey23ah9wELMM6etg==} @@ -34758,7 +34861,6 @@ packages: /uuid@10.0.0: resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} hasBin: true - dev: false /uuid@3.4.0: resolution: {integrity: sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==} @@ -35274,7 +35376,7 @@ packages: magic-string: 0.30.17 pathe: 1.1.2 picocolors: 1.1.1 - std-env: 3.8.1 + std-env: 3.9.0 strip-literal: 2.1.0 tinybench: 2.9.0 tinypool: 0.8.3