diff --git a/.github/workflows/test-integration.yaml b/.github/workflows/test-integration.yaml index f41eed6b47..ae28c85f07 100644 --- a/.github/workflows/test-integration.yaml +++ b/.github/workflows/test-integration.yaml @@ -297,6 +297,7 @@ jobs: - TestTagsAuthKeyWithoutUserInheritsTags - TestTagsAuthKeyWithoutUserRejectsAdvertisedTags - TestTagsAuthKeyConvertToUserViaCLIRegister + - TestTailscaleRustAxum uses: ./.github/workflows/integration-test-template.yml secrets: inherit with: diff --git a/Dockerfile.tailscale-rs b/Dockerfile.tailscale-rs new file mode 100644 index 0000000000..c604092387 --- /dev/null +++ b/Dockerfile.tailscale-rs @@ -0,0 +1,26 @@ +FROM rust:1.94-bookworm AS builder + +ARG TAILSCALE_RS_REPO=https://github.com/tailscale/tailscale-rs.git +ARG TAILSCALE_RS_REF=main + +WORKDIR /app +RUN git clone --depth 1 --branch "$TAILSCALE_RS_REF" "$TAILSCALE_RS_REPO" . + +# Re-export ts_control's insecure-keyfetch feature through the tailscale +# crate so the axum example can fetch the headscale control key over +# plain HTTP. The integration harness serves the control plane without +# TLS, and upstream only allows plain-HTTP key fetches when this Cargo +# feature is compiled in. +RUN sed -i '/^axum = \["dep:axum"\]/a insecure-keyfetch = ["ts_control/insecure-keyfetch"]' Cargo.toml + +RUN cargo build --release --features axum,insecure-keyfetch --example axum + +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + iproute2 \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /app/target/release/examples/axum /usr/local/bin/axum diff --git a/integration/tsric/tsric.go b/integration/tsric/tsric.go new file mode 100644 index 0000000000..b7f19c7246 --- /dev/null +++ b/integration/tsric/tsric.go @@ -0,0 +1,334 @@ +// Package tsric provides a TailscaleRustInContainer (tsric) implementation +// that runs the tailscale-rs axum example inside a Docker container for +// integration testing with headscale. +// +// Unlike tsic (which runs the official Tailscale client), tsric runs a Rust +// implementation of a Tailscale node. It does not have the `tailscale` CLI, +// so verification is done externally via headscale API and peer connectivity. +package tsric + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "strings" + + "github.com/juanfont/headscale/hscontrol/util" + "github.com/juanfont/headscale/integration/dockertestutil" + "github.com/juanfont/headscale/integration/integrationutil" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" +) + +const ( + tsricHashLength = 6 + caCertRoot = "/usr/local/share/ca-certificates" + + dockerfileName = "Dockerfile.tailscale-rs" + dockerContextPath = "../." + + buildArgRepo = "TAILSCALE_RS_REPO" + buildArgRef = "TAILSCALE_RS_REF" +) + +// getPrebuiltImage returns the pre-built tailscale-rs Docker image name if set. +func getPrebuiltImage() string { + return os.Getenv("HEADSCALE_INTEGRATION_TAILSCALE_RS_IMAGE") +} + +// TailscaleRustInContainer runs the tailscale-rs axum example as an +// integration test peer. +type TailscaleRustInContainer struct { + hostname string + + pool *dockertest.Pool + container *dockertest.Resource + network *dockertest.Network + + caCerts [][]byte + headscaleURL string + authKey string + extraHosts []string + repo string + ref string +} + +// Option represents optional settings for a TailscaleRustInContainer instance. +type Option = func(c *TailscaleRustInContainer) + +// WithCACert adds a CA certificate to the trusted certificates of the container. +func WithCACert(cert []byte) Option { + return func(t *TailscaleRustInContainer) { + t.caCerts = append(t.caCerts, cert) + } +} + +// WithNetwork sets the Docker container network. +func WithNetwork(network *dockertest.Network) Option { + return func(t *TailscaleRustInContainer) { + t.network = network + } +} + +// WithHeadscaleURL sets the headscale control server URL. +func WithHeadscaleURL(url string) Option { + return func(t *TailscaleRustInContainer) { + t.headscaleURL = url + } +} + +// WithAuthKey sets the pre-authentication key for joining the tailnet. +func WithAuthKey(key string) Option { + return func(t *TailscaleRustInContainer) { + t.authKey = key + } +} + +// WithExtraHosts adds extra /etc/hosts entries to the container. +func WithExtraHosts(hosts []string) Option { + return func(t *TailscaleRustInContainer) { + t.extraHosts = append(t.extraHosts, hosts...) + } +} + +// WithRepo overrides the tailscale-rs git repository URL used by the +// Dockerfile. Defaults to the public github.com/tailscale/tailscale-rs. +func WithRepo(url string) Option { + return func(t *TailscaleRustInContainer) { + t.repo = url + } +} + +// WithRef overrides the tailscale-rs git ref (branch, tag, commit) used +// by the Dockerfile. Defaults to "main". +func WithRef(ref string) Option { + return func(t *TailscaleRustInContainer) { + t.ref = ref + } +} + +// buildEntrypoint constructs the container entrypoint command. +// +// The axum example reads the control URL from TS_CONTROL_URL, the +// hostname from -H, and the auth key from -k. The key file (-c) is +// created on first run. +func (t *TailscaleRustInContainer) buildEntrypoint() []string { + var commands []string + + commands = append(commands, + "while ! ip route show default >/dev/null 2>&1; do sleep 0.1; done") + + // CA certs are written by New after the container starts, so the + // entrypoint races with that write. Block until the first cert lands. + if len(t.caCerts) > 0 { + commands = append(commands, + fmt.Sprintf("while [ ! -f %s/user-0.crt ]; do sleep 0.1; done", caCertRoot)) + } + + commands = append(commands, "update-ca-certificates 2>/dev/null || true") + + commands = append(commands, + fmt.Sprintf(`export TS_CONTROL_URL=%q`, t.headscaleURL), + // The tailscale crate refuses to run without this env gate; + // see lib.rs in tailscale-rs. + "export TS_RS_EXPERIMENT=this_is_unstable_software", + ) + + axumCmd := "/usr/local/bin/axum -c /tmp/tsrs-keys.json -H " + t.hostname + if t.authKey != "" { + axumCmd += " -k " + t.authKey + } + + commands = append(commands, "exec "+axumCmd) + + return []string{"/bin/sh", "-c", strings.Join(commands, " ; ")} +} + +// New creates and starts a new TailscaleRustInContainer instance. +func New( + pool *dockertest.Pool, + opts ...Option, +) (*TailscaleRustInContainer, error) { + hash, err := util.GenerateRandomStringDNSSafe(tsricHashLength) + if err != nil { + return nil, err + } + + runID := dockertestutil.GetIntegrationRunID() + + var hostname string + + if runID != "" { + runIDShort := runID[len(runID)-6:] + hostname = fmt.Sprintf("tsrs-%s-%s", runIDShort, hash) + } else { + hostname = "tsrs-" + hash + } + + t := &TailscaleRustInContainer{ + hostname: hostname, + pool: pool, + } + + for _, opt := range opts { + opt(t) + } + + if t.network == nil { + return nil, errors.New("tsric: no network set") //nolint:err113 + } + + if t.headscaleURL == "" { + return nil, errors.New("tsric: no headscale URL set") //nolint:err113 + } + + if t.authKey == "" { + return nil, errors.New("tsric: no auth key set") //nolint:err113 + } + + entrypoint := t.buildEntrypoint() + + runOptions := &dockertest.RunOptions{ + Name: hostname, + Networks: []*dockertest.Network{t.network}, + Entrypoint: entrypoint, + ExtraHosts: append(t.extraHosts, "host.docker.internal:host-gateway"), + Env: []string{}, + } + + dockertestutil.DockerAddIntegrationLabels(runOptions, "tailscale-rs") + + err = pool.RemoveContainerByName(hostname) + if err != nil { + return nil, err + } + + var container *dockertest.Resource + + if prebuiltImage := getPrebuiltImage(); prebuiltImage != "" { + log.Printf("Using pre-built tailscale-rs image: %s", prebuiltImage) + + repo, tag, ok := strings.Cut(prebuiltImage, ":") + if !ok { + return nil, fmt.Errorf("tsric: invalid image format %q, expected repository:tag", prebuiltImage) //nolint:err113 + } + + runOptions.Repository = repo + runOptions.Tag = tag + + container, err = pool.RunWithOptions( + runOptions, + dockertestutil.DockerRestartPolicy, + dockertestutil.DockerAllowLocalIPv6, + dockertestutil.DockerMemoryLimit, + ) + if err != nil { + return nil, fmt.Errorf( + "tsric: could not start pre-built tailscale-rs container %s: %w", + hostname, err, + ) + } + } else { + // Build from the Dockerfile so callers don't need a local + // tailscale-rs checkout; the Dockerfile clones at build time. + var buildArgs []docker.BuildArg + + if t.repo != "" { + buildArgs = append(buildArgs, docker.BuildArg{Name: buildArgRepo, Value: t.repo}) + } + + if t.ref != "" { + buildArgs = append(buildArgs, docker.BuildArg{Name: buildArgRef, Value: t.ref}) + } + + buildOptions := &dockertest.BuildOptions{ + Dockerfile: dockerfileName, + ContextDir: dockerContextPath, + BuildArgs: buildArgs, + } + + log.Printf("Building tailscale-rs container %s from upstream (this may take a while for the first build)...", hostname) + + container, err = pool.BuildAndRunWithBuildOptions( + buildOptions, + runOptions, + dockertestutil.DockerRestartPolicy, + dockertestutil.DockerAllowLocalIPv6, + dockertestutil.DockerMemoryLimit, + ) + if err != nil { + return nil, fmt.Errorf( + "tsric: could not build and start tailscale-rs container %s: %w", + hostname, err, + ) + } + } + + log.Printf("Created tailscale-rs container %s", hostname) + + t.container = container + + for i, cert := range t.caCerts { + err = t.WriteFile(fmt.Sprintf("%s/user-%d.crt", caCertRoot, i), cert) + if err != nil { + return nil, fmt.Errorf("writing TLS certificate to container: %w", err) + } + } + + return t, nil +} + +// Hostname returns the hostname of the TailscaleRustInContainer instance. +func (t *TailscaleRustInContainer) Hostname() string { + return t.hostname +} + +// ContainerID returns the Docker container ID. +func (t *TailscaleRustInContainer) ContainerID() string { + return t.container.Container.ID +} + +// Shutdown stops and cleans up the container. +func (t *TailscaleRustInContainer) Shutdown() (string, string, error) { + stdoutPath, stderrPath, err := t.SaveLog("/tmp/control") + if err != nil { + log.Printf( + "saving log from %s: %s", + t.hostname, + fmt.Errorf("saving log: %w", err), + ) + } + + return stdoutPath, stderrPath, t.pool.Purge(t.container) +} + +// SaveLog saves the current container logs to the given path. +func (t *TailscaleRustInContainer) SaveLog(path string) (string, string, error) { + return dockertestutil.SaveLog(t.pool, t.container, path) +} + +// WriteLogs writes the current stdout/stderr log of the container to +// the given io.Writers. +func (t *TailscaleRustInContainer) WriteLogs(stdout, stderr io.Writer) error { + return dockertestutil.WriteLog(t.pool, t.container, stdout, stderr) +} + +// Execute runs a command inside the container. +func (t *TailscaleRustInContainer) Execute( + command []string, + options ...dockertestutil.ExecuteCommandOption, +) (string, string, error) { + return dockertestutil.ExecuteCommand( + t.container, + command, + []string{}, + options..., + ) +} + +// WriteFile writes a file into the container. +func (t *TailscaleRustInContainer) WriteFile(path string, data []byte) error { + return integrationutil.WriteFileToContainer(t.pool, t.container, path, data) +} diff --git a/integration/tsric_test.go b/integration/tsric_test.go new file mode 100644 index 0000000000..525106155e --- /dev/null +++ b/integration/tsric_test.go @@ -0,0 +1,271 @@ +package integration + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/juanfont/headscale/integration/hsic" + "github.com/juanfont/headscale/integration/tsic" + "github.com/juanfont/headscale/integration/tsric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestTailscaleRustAxum tests that the tailscale-rs axum example can join a +// headscale network and serve HTTP to other peers on the tailnet. +// +// Architecture: +// +// headscale (control) <--- tsic (probe client) --curl--> tsric (axum server) +// +// The test: +// 1. Creates a headscale environment with one regular Tailscale client (tsic) +// 2. Creates a tailscale-rs container running the axum example (tsric) +// 3. Verifies the tsric node registers with headscale +// 4. Uses the tsic client to curl the axum web server through the tailnet +func TestTailscaleRustAxum(t *testing.T) { + IntegrationSkip(t) + + // Set up a scenario with one user and one regular Tailscale client. + // The regular client acts as a "probe" to verify the tsric node + // is reachable on the tailnet. + spec := ScenarioSpec{ + NodesPerUser: 1, + Users: []string{"user1"}, //nolint:goconst // consistent with other integration tests + } + + scenario, err := NewScenario(spec) + + require.NoError(t, err) + defer scenario.ShutdownAssertNoPanics(t) + + err = scenario.CreateHeadscaleEnv( + []tsic.Option{}, + hsic.WithTestName("tailscalers"), + // The embedded DERP server uses a self-signed cert that + // tailscale-rs cannot validate without a custom CA bundle, so + // we route DERP through Tailscale's public relays. + hsic.WithPublicDERP(), + // TODO: drop WithoutTLS once tailscale-rs lets us inject the + // headscale CA into its trust chain; until then the control + // plane has to be plain HTTP for the Rust client to register. + hsic.WithoutTLS(), + ) + requireNoErrHeadscaleEnv(t, err) + + // Get the headscale instance and probe client + headscale, err := scenario.Headscale() + require.NoError(t, err) + + allClients, err := scenario.ListTailscaleClients() + requireNoErrListClients(t, err) + require.Len(t, allClients, 1, "expected exactly 1 probe client") + + probeClient := allClients[0] + + // Create auth key for the tailscale-rs node + users, err := headscale.ListUsers() + require.NoError(t, err) + require.NotEmpty(t, users, "expected at least one user") + + var userID uint64 + + for _, u := range users { + if u.GetName() == "user1" { //nolint:goconst + userID = u.GetId() + + break + } + } + + require.NotZero(t, userID, "user1 not found") + + pak, err := headscale.CreateAuthKey(userID, false, true) + require.NoError(t, err) + + // Determine the network and headscale connection details + networks := scenario.Networks() + require.NotEmpty(t, networks) + + network := networks[0] + headscaleIP := headscale.GetIPInNetwork(network) + headscaleHostname := headscale.GetHostname() + headscaleEndpoint := headscale.GetEndpoint() + + t.Logf("Headscale endpoint: %s (hostname: %s, IP: %s)", + headscaleEndpoint, headscaleHostname, headscaleIP) + + // Create the tailscale-rs container + tsrsOpts := []tsric.Option{ + tsric.WithNetwork(network), + tsric.WithHeadscaleURL(headscaleEndpoint), + tsric.WithAuthKey(pak.GetKey()), + tsric.WithExtraHosts([]string{headscaleHostname + ":" + headscaleIP}), + } + + cert := headscale.GetCert() + if len(cert) > 0 { + tsrsOpts = append(tsrsOpts, tsric.WithCACert(cert)) + } + + t.Log("Creating tailscale-rs container (first build may take several minutes)...") + + tsrs, err := tsric.New(scenario.Pool(), tsrsOpts...) + require.NoError(t, err, "failed to create tailscale-rs container") + + defer func() { + _, _, err := tsrs.Shutdown() + if err != nil { + t.Logf("error shutting down tailscale-rs container: %s", err) + } + }() + + // Wait for the tailscale-rs node to appear in headscale's node list. + // Verify it gets both IPv4 and IPv6 addresses and has the expected hostname. + var ( + rustNodeIPv4 string + rustNodeIPv6 string + rustNodeName string + ) + + t.Log("Waiting for tailscale-rs node to register with headscale...") + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + nodes, err := headscale.ListNodes() + assert.NoError(c, err) + + // Expect 2 nodes: 1 tsic probe + 1 tsric + assert.GreaterOrEqual(c, len(nodes), 2, + "expected at least 2 nodes (1 probe + 1 tailscale-rs)") + + // Find the tailscale-rs node by hostname prefix + for _, n := range nodes { + if strings.HasPrefix(n.GetGivenName(), "tsrs-") { + addrs := n.GetIpAddresses() + if len(addrs) > 0 { + rustNodeIPv4 = addrs[0] + } + + if len(addrs) > 1 { + rustNodeIPv6 = addrs[1] + } + + rustNodeName = n.GetGivenName() + } + } + + assert.NotEmpty(c, rustNodeIPv4, "tailscale-rs node should have an IPv4 address") + }, 120*time.Second, 2*time.Second, "tailscale-rs node should register with headscale") + + require.NotEmpty(t, rustNodeIPv4, "failed to find tailscale-rs node IP") + + t.Logf("tailscale-rs node %q registered with IPv4=%s IPv6=%s", + rustNodeName, rustNodeIPv4, rustNodeIPv6) + + // Verify IPv6 was allocated. The axum example only listens on IPv4, + // so we can't curl via IPv6, but headscale should still assign both. + assert.NotEmpty(t, rustNodeIPv6, + "headscale should assign both IPv4 and IPv6 to the tailscale-rs node") + + // Verify the hostname propagated correctly from the config + assert.True(t, strings.HasPrefix(rustNodeName, "tsrs-"), + "tailscale-rs node name should start with tsrs- prefix") + + // Verify the probe client sees the tailscale-rs node as a peer + t.Log("Verifying probe client sees tailscale-rs as a peer...") + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + status, err := probeClient.Status() + assert.NoError(c, err) + + found := false + + for _, peerKey := range status.Peers() { + peer := status.Peer[peerKey] + if strings.HasPrefix(peer.HostName, "tsrs-") { + found = true + } + } + + assert.True(c, found, "probe client should see tsrs node as a peer") + }, 30*time.Second, 2*time.Second, "probe should see tailscale-rs peer in status") + + // Test 1: GET /index.html — verify the axum web server serves content + axumURL := fmt.Sprintf("http://%s/index.html", rustNodeIPv4) + + t.Logf("Verifying axum web server is reachable at %s via probe client...", axumURL) + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + result, err := probeClient.Curl(axumURL) + assert.NoError(c, err, "curl to axum server failed") + assert.Contains(c, result, "tailscale-rs", + "expected index.html to contain 'tailscale-rs'") + }, 120*time.Second, 2*time.Second, "axum /index.html should be reachable from probe client") + + t.Log("axum web server is serving content through the tailnet") + + // Test 2: GET /assets/index.css — verify static asset serving works + cssURL := fmt.Sprintf("http://%s/assets/index.css", rustNodeIPv4) + + t.Logf("Verifying static asset at %s...", cssURL) + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + result, err := probeClient.Curl(cssURL) + assert.NoError(c, err, "curl to CSS asset failed") + assert.Contains(c, result, "font-family", + "expected CSS file to contain 'font-family'") + }, 10*time.Second, 1*time.Second, "axum should serve static CSS assets") + + // Test 3: Sequential POST /count — verify the counter increments correctly. + // This exercises multiple TCP connections and proves the netstack maintains + // state across requests. + countURL := fmt.Sprintf("http://%s/count", rustNodeIPv4) + + t.Logf("Verifying /count POST endpoint increments at %s...", countURL) + + // First POST establishes connectivity and gets the initial counter value + assert.EventuallyWithT(t, func(c *assert.CollectT) { + stdout, _, err := probeClient.Execute([]string{ + "curl", "--silent", + "--connect-timeout", "3", + "--max-time", "5", + "-X", "POST", + countURL, + }) + assert.NoError(c, err, "curl POST to /count failed") + assert.Contains(c, stdout, `"count"`, + "expected /count response to contain 'count'") + }, 30*time.Second, 2*time.Second, "axum /count POST should work") + + // Fire several more POSTs and verify the counter advances. + // The axum handler returns {"count": N} where N is the pre-increment value. + // After the initial EventuallyWithT loop we don't know the exact counter, + // but two back-to-back POSTs should return consecutive values. + t.Log("Verifying counter increments across multiple requests...") + + var firstCount, secondCount string + + stdout1, _, err := probeClient.Execute([]string{ + "curl", "--silent", "--max-time", "5", "-X", "POST", countURL, + }) + require.NoError(t, err, "first sequential POST failed") + + firstCount = stdout1 + + stdout2, _, err := probeClient.Execute([]string{ + "curl", "--silent", "--max-time", "5", "-X", "POST", countURL, + }) + require.NoError(t, err, "second sequential POST failed") + + secondCount = stdout2 + + t.Logf("Counter responses: first=%s second=%s", firstCount, secondCount) + + // Verify they're different (counter is incrementing) + require.NotEqual(t, firstCount, secondCount, + "counter should increment between sequential POST requests") + + t.Log("TestTailscaleRustAxum: all checks passed") +}