diff --git a/Makefile b/Makefile index 5371f42..6c68e65 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,20 @@ # Copyright 2025 Christopher O'Connell # All rights reserved -.PHONY: build install docker signing-image clean test help release release-snapshot license-check +.PHONY: build install install-completion docker signing-image clean test help release release-snapshot license-check # Default target help: @echo "MCL Build Targets:" - @echo " make build - Build the maestro binary" - @echo " make docker - Build the Docker image locally" - @echo " make signing-image - Build the code signing Docker image" - @echo " make install - Install maestro to /usr/local/bin (requires sudo)" - @echo " make test - Run tests" - @echo " make clean - Remove built binaries" - @echo " make all - Build everything (binary + docker)" - @echo " make license-check - Check/add Apache 2.0 headers to source files" + @echo " make build - Build the maestro binary" + @echo " make docker - Build the Docker image locally" + @echo " make signing-image - Build the code signing Docker image" + @echo " make install - Install maestro to /usr/local/bin (requires sudo)" + @echo " make install-completion - Install shell completion for current shell" + @echo " make test - Run tests" + @echo " make clean - Remove built binaries" + @echo " make all - Build everything (binary + docker)" + @echo " make license-check - Check/add Apache 2.0 headers to source files" @echo "" @echo "Release Targets:" @echo " make release-preflight - Check release prerequisites" @@ -55,6 +56,43 @@ signing-image: # Install to system PATH (run 'make build' first, then 'sudo make install') install: cp bin/maestro /usr/local/bin/ + @echo "" + @echo "Run 'make install-completion' to enable shell autocompletion" + +# Install shell completion for current shell +install-completion: + @if [ ! -f bin/maestro ]; then \ + echo "Error: bin/maestro not found. Run 'make build' first."; \ + exit 1; \ + fi + @SHELL_NAME=$$(basename "$$SHELL"); \ + case "$$SHELL_NAME" in \ + bash) \ + echo "Installing bash completion..."; \ + mkdir -p ~/.local/share/bash-completion/completions; \ + bin/maestro completion bash > ~/.local/share/bash-completion/completions/maestro; \ + echo "Installed to ~/.local/share/bash-completion/completions/maestro"; \ + echo "Run 'source ~/.local/share/bash-completion/completions/maestro' or restart your shell"; \ + ;; \ + zsh) \ + echo "Installing zsh completion..."; \ + mkdir -p ~/.zsh/completions; \ + bin/maestro completion zsh > ~/.zsh/completions/_maestro; \ + echo "Installed to ~/.zsh/completions/_maestro"; \ + echo "Add 'fpath=(~/.zsh/completions \$$fpath)' to ~/.zshrc if not already present"; \ + echo "Then run 'autoload -U compinit && compinit' or restart your shell"; \ + ;; \ + fish) \ + echo "Installing fish completion..."; \ + mkdir -p ~/.config/fish/completions; \ + bin/maestro completion fish > ~/.config/fish/completions/maestro.fish; \ + echo "Installed to ~/.config/fish/completions/maestro.fish"; \ + ;; \ + *) \ + echo "Unknown shell: $$SHELL_NAME"; \ + echo "Run 'maestro completion --help' for manual installation instructions"; \ + ;; \ + esac # Run tests test: diff --git a/README.md b/README.md index c410835..01976f0 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ This stores credentials in `~/.maestro/` and shares them (read-only) with all co ### 2. Configure (Optional) -Edit `~/.maestro/config.yml` to add additional folders and network domains: +Edit `~/.maestro/config.yml` to customize your setup: ```yaml firewall: @@ -66,15 +66,130 @@ firewall: - github.com - api.anthropic.com # Add your domains here + # For corporate networks with internal DNS (Zscaler, VPN, etc.) + internal_dns: "10.0.0.1" + internal_domains: + - "internal.company.com" sync: additional_folders: - ~/Documents/Code/mcp-servers - ~/Documents/Code/helpers + # Compression: gzip the tar stream when copying files to containers + # - true (default): smaller transfer, good for remote Docker or slow I/O + # - false: faster for large local projects (8GB+), skips compression overhead + compress: false + +# Git user for commits inside containers +git: + user_name: "Your Name" + user_email: "you@example.com" + +# SSH agent forwarding for git authentication (keys stay on host) +ssh: + enabled: true + known_hosts_path: "~/.ssh/known_hosts" # mount host's known_hosts to avoid prompts + +# GitHub CLI integration (for PRs, issues, etc.) +github: + enabled: true + hostname: "github.mycompany.com" # For GitHub Enterprise (omit for github.com) + +# AWS Bedrock support (alternative to Anthropic API) +aws: + enabled: true + profile: "your-aws-profile" + region: "us-east-1" + +bedrock: + enabled: true + model: "anthropic.claude-sonnet-4-20250514-v1:0" + +# SSL certificates for corporate HTTPS inspection +ssl: + certificates_path: "~/.maestro/certificates" + +# Android SDK for mobile development +android: + sdk_path: "~/Android/Sdk" + +# Container defaults +containers: + default_return_to_tui: true # Auto-check "Return to TUI" when creating containers + +# Daemon and notification settings +daemon: + check_interval: "10s" # How often to check containers (default: 30m) + notifications: + enabled: true + attention_threshold: "5s" # Notify after this duration of waiting + notify_on: + - attention_needed # When Claude waits for input + - token_expiring # When auth token is expiring ``` You can also set firewall rules from the text UI using the `f` shortcut. +#### AWS Bedrock Setup + +To use Claude via AWS Bedrock instead of the Anthropic API: + +1. Configure your AWS profile with Bedrock access +2. Enable bedrock in config (see above) +3. Run `maestro auth` to set up AWS SSO login +4. Containers will automatically use Bedrock for Claude + +#### Corporate Network / VPN Setup + +If you're behind a corporate proxy (Zscaler, etc.) or need to access internal resources: + +1. Set `firewall.internal_dns` to your internal DNS server +2. Add internal domains to `firewall.internal_domains` +3. Host SSL certificates are automatically mounted for HTTPS inspection + +#### SSL Certificates + +For corporate environments with HTTPS inspection (Zscaler, etc.), place your CA certificates in the configured path: + +1. Create the certificates directory: `mkdir -p ~/.maestro/certificates` +2. Copy your corporate CA certificates (`.crt`, `.pem` files) to this directory +3. Certificates are automatically imported into both the system trust store and Java keystore inside containers + +#### Android SDK + +For Android/mobile development, mount your host Android SDK into containers: + +1. Set `android.sdk_path` to your SDK location (e.g., `~/Android/Sdk`) +2. The SDK will be mounted read-only at `/opt/android-sdk` inside containers +3. Environment variables (`ANDROID_HOME`, `ANDROID_SDK_ROOT`) are automatically configured + +#### Project-Level Exclusions (.maestroignore) + +Create a `.maestroignore` file in your project root to exclude files/directories when copying to containers. This is useful for large projects with build artifacts that shouldn't be transferred. + +```bash +# .maestroignore - exclude patterns (like .gitignore) +# Comments start with # + +# Android/Gradle build artifacts +build +.gradle +.idea +.cxx +.kotlin + +# Other common exclusions +dist +target +__pycache__ +*.log +``` + +**Notes:** +- `node_modules` and `.git` are always excluded by default +- Each line is passed to `tar --exclude=` +- Empty lines and lines starting with `#` are ignored + ### 3. Create Your First Container ```bash @@ -123,22 +238,27 @@ When connected via `maestro connect`: _Note: Not tested on Windows._ -Start the daemon to monitor containers and get desktop notifications: +The daemon monitors containers and sends desktop notifications when Claude needs your attention. It **auto-starts** when you launch the TUI (`maestro`), but you can also manage it manually: ```bash -maestro daemon start - -# Check status -maestro daemon status - -# View logs -maestro daemon logs +maestro daemon start # Start manually +maestro daemon stop # Stop the daemon +maestro daemon status # Check status +maestro daemon logs # View logs ``` The daemon monitors: -- Token expiration (warns when < 1 hour remaining) -- Container attention needs (bell indicators) -- Automatic health checks every 30 minutes +- **Attention needs** - Notifies when Claude is waiting for input (configurable delay) +- **Token expiration** - Warns when auth token is expiring soon +- **Container health** - Periodic checks based on `check_interval` + +Configure notification speed in `~/.maestro/config.yml`: +```yaml +daemon: + check_interval: "10s" # Check every 10 seconds (default: 30m) + notifications: + attention_threshold: "5s" # Notify after 5s of waiting +``` ## Container Status diff --git a/assets/init-firewall.sh b/assets/init-firewall.sh index 110621e..3d15a30 100644 --- a/assets/init-firewall.sh +++ b/assets/init-firewall.sh @@ -115,6 +115,40 @@ echo "server=/.githubusercontent.com/8.8.8.8" >> "$DNSMASQ_CONF" echo "ipset=/.anthropic.com/allowed-domains" >> "$DNSMASQ_CONF" echo "server=/.anthropic.com/8.8.8.8" >> "$DNSMASQ_CONF" +# Add wildcard entries for AWS (only if AWS/Bedrock is enabled) +# This is controlled by /etc/aws-enabled.txt which is written by maestro when aws.enabled or bedrock.enabled is true +AWS_ENABLED_FILE="/etc/aws-enabled.txt" +if [ -f "$AWS_ENABLED_FILE" ]; then + echo "AWS/Bedrock enabled - adding AWS domain rules" + echo "ipset=/.amazonaws.com/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.amazonaws.com/8.8.8.8" >> "$DNSMASQ_CONF" + echo "ipset=/.awsapps.com/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.awsapps.com/8.8.8.8" >> "$DNSMASQ_CONF" +else + echo "AWS/Bedrock not enabled - skipping AWS domain rules" +fi + +# Configure internal DNS for corporate networks (Zscaler, VPN, etc.) +INTERNAL_DNS_FILE="/etc/internal-dns.txt" +INTERNAL_DOMAINS_FILE="/etc/internal-domains.txt" +if [ -f "$INTERNAL_DNS_FILE" ] && [ -f "$INTERNAL_DOMAINS_FILE" ]; then + INTERNAL_DNS=$(cat "$INTERNAL_DNS_FILE") + if [ -n "$INTERNAL_DNS" ]; then + echo "Configuring internal DNS server: $INTERNAL_DNS" + while read -r domain; do + [ -z "$domain" ] && continue + echo " Routing $domain via internal DNS" + echo "ipset=/$domain/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/$domain/$INTERNAL_DNS" >> "$DNSMASQ_CONF" + # Also add wildcard for subdomains + echo "ipset=/.$domain/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.$domain/$INTERNAL_DNS" >> "$DNSMASQ_CONF" + done < "$INTERNAL_DOMAINS_FILE" + fi +elif [ -f "$INTERNAL_DNS_FILE" ]; then + echo "Warning: Internal DNS configured but no internal domains specified" +fi + # Start dnsmasq echo "Starting dnsmasq..." dnsmasq --conf-file="$DNSMASQ_CONF" @@ -125,7 +159,7 @@ echo "nameserver 127.0.0.1" | tee /etc/resolv.conf > /dev/null # Process GitHub API ranges and add them directly to ipset # We do this because GitHub has many IPs and we want to ensure we catch them all echo "Fetching GitHub IP ranges..." -gh_ranges=$(curl -s https://api.github.com/meta) +gh_ranges=$(curl -s --connect-timeout 5 --max-time 10 https://api.github.com/meta) if [ -z "$gh_ranges" ]; then echo "WARNING: Failed to fetch GitHub IP ranges - GitHub access may be limited" else @@ -193,24 +227,36 @@ iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited echo "Firewall configuration complete" -# Verify firewall rules +# Verify firewall rules (run all checks in parallel) echo "Verifying firewall rules..." -if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then + +# Create temp files for results +VERIFY_DIR=$(mktemp -d) +trap "rm -rf $VERIFY_DIR" EXIT + +# Run all verification tests in parallel +(curl --connect-timeout 3 --max-time 5 https://example.com >/dev/null 2>&1 && echo "fail" || echo "pass") > "$VERIFY_DIR/block" & +(curl --connect-timeout 3 --max-time 5 https://api.github.com/zen >/dev/null 2>&1 && echo "pass" || echo "fail") > "$VERIFY_DIR/github" & +(curl --connect-timeout 3 --max-time 5 https://api.anthropic.com >/dev/null 2>&1 && echo "pass" || echo "fail") > "$VERIFY_DIR/anthropic" & + +# Wait for all background jobs +wait + +# Check results +if [ "$(cat "$VERIFY_DIR/block")" = "fail" ]; then echo "ERROR: Firewall verification failed - was able to reach https://example.com" exit 1 else echo "✓ Firewall blocking works - unable to reach https://example.com" fi -# Verify allowed access -echo "Testing DNS resolution and access to whitelisted domains..." -if ! curl --connect-timeout 10 https://api.github.com/zen >/dev/null 2>&1; then +if [ "$(cat "$VERIFY_DIR/github")" = "fail" ]; then echo "WARNING: Unable to reach https://api.github.com - GitHub access may be limited" else echo "✓ GitHub API access works" fi -if ! curl --connect-timeout 10 https://api.anthropic.com >/dev/null 2>&1; then +if [ "$(cat "$VERIFY_DIR/anthropic")" = "fail" ]; then echo "WARNING: Unable to reach https://api.anthropic.com - Anthropic access may be limited" else echo "✓ Anthropic API access works" diff --git a/cmd/auth.go b/cmd/auth.go index e5f8933..4b1a0cd 100644 --- a/cmd/auth.go +++ b/cmd/auth.go @@ -53,7 +53,125 @@ func init() { authCmd.Flags().BoolVar(&noSync, "no-sync", false, "Skip syncing credentials to running containers") } +// runBedrockAuth handles authentication for AWS Bedrock users +func runBedrockAuth() error { + fmt.Println("Bedrock mode enabled - using AWS authentication") + + // Copy Claude config from ~/.claude to maestro's auth directory + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed to get home directory: %w", err) + } + + sourceClaudeDir := filepath.Join(homeDir, ".claude") + destAuthPath := expandPath(config.Claude.AuthPath) + + // Ensure destination directory exists + if err := os.MkdirAll(destAuthPath, 0755); err != nil { + return fmt.Errorf("failed to create auth directory: %w", err) + } + + // Check if source ~/.claude exists + if _, err := os.Stat(sourceClaudeDir); os.IsNotExist(err) { + fmt.Printf("Warning: ~/.claude directory not found\n") + fmt.Println("You may need to run 'claude' once on the host to create initial config") + } else { + // Copy .credentials.json if exists + srcCreds := filepath.Join(sourceClaudeDir, ".credentials.json") + if _, err := os.Stat(srcCreds); err == nil { + destCreds := filepath.Join(destAuthPath, ".credentials.json") + if err := copyFile(srcCreds, destCreds); err != nil { + fmt.Printf("Warning: Failed to copy credentials: %v\n", err) + } else { + fmt.Printf("✓ Copied credentials from %s\n", srcCreds) + } + } + + // Copy settings.json if exists (Claude Code settings) + srcSettings := filepath.Join(sourceClaudeDir, "settings.json") + if _, err := os.Stat(srcSettings); err == nil { + destSettings := filepath.Join(destAuthPath, "settings.json") + if err := copyFile(srcSettings, destSettings); err != nil { + fmt.Printf("Warning: Failed to copy settings: %v\n", err) + } else { + fmt.Printf("✓ Copied settings from %s\n", srcSettings) + } + } + } + + // Copy .claude.json from home directory if exists + srcClaudeJson := filepath.Join(homeDir, ".claude.json") + if _, err := os.Stat(srcClaudeJson); err == nil { + destClaudeJson := filepath.Join(destAuthPath, ".claude.json") + if err := copyFile(srcClaudeJson, destClaudeJson); err != nil { + fmt.Printf("Warning: Failed to copy .claude.json: %v\n", err) + } else { + fmt.Printf("✓ Copied config from %s\n", srcClaudeJson) + } + } + + // Run AWS SSO login if profile is configured + if config.AWS.Profile != "" { + fmt.Printf("\nRunning AWS SSO login for profile: %s\n", config.AWS.Profile) + fmt.Println("This will open a browser window for authentication...") + + ssoCmd := exec.Command("aws", "sso", "login", "--profile", config.AWS.Profile) + ssoCmd.Stdin = os.Stdin + ssoCmd.Stdout = os.Stdout + ssoCmd.Stderr = os.Stderr + + if err := ssoCmd.Run(); err != nil { + return fmt.Errorf("AWS SSO login failed: %w", err) + } + fmt.Println("✓ AWS SSO login successful") + } + + fmt.Println("\n✅ Bedrock authentication setup complete!") + fmt.Printf("AWS Profile: %s\n", config.AWS.Profile) + fmt.Printf("AWS Region: %s\n", config.AWS.Region) + fmt.Printf("Bedrock Model: %s\n", config.Bedrock.Model) + + // Ask user if they want to set up GitHub CLI (same as non-Bedrock flow) + fmt.Println("\n========================================================================") + hostname := config.GitHub.Hostname + if hostname == "" { + hostname = "github.com" + } + fmt.Printf("\nWould you like to set up GitHub CLI (gh) authentication for %s? (y/N): ", hostname) + var response string + fmt.Scanln(&response) + + if response == "y" || response == "Y" || response == "yes" || response == "Yes" { + if err := setupGitHubAuth(); err != nil { + fmt.Printf("\n⚠️ GitHub CLI setup failed: %v\n", err) + fmt.Println("You can skip this and run 'gh auth login' manually later.") + } + } else { + fmt.Println("\nSkipping GitHub CLI setup.") + fmt.Println("You can set it up later by running 'gh auth login' in a container,") + fmt.Println("or enable github.enabled in your config file and authenticate on the host.") + } + + fmt.Println("\nYou can now create containers with: maestro new ") + + return nil +} + +// copyFile copies a file from src to dst +func copyFile(src, dst string) error { + input, err := os.ReadFile(src) + if err != nil { + return err + } + return os.WriteFile(dst, input, 0644) +} + func runAuth(cmd *cobra.Command, cmdArgs []string) error { + // If Bedrock is enabled, use different auth flow + if config.Bedrock.Enabled { + return runBedrockAuth() + } + // Ensure MCL Claude directory exists authPath := expandPath(config.Claude.AuthPath) if err := os.MkdirAll(authPath, 0755); err != nil { @@ -107,11 +225,26 @@ func runAuth(cmd *cobra.Command, cmdArgs []string) error { "--name", authContainerName, "-v", fmt.Sprintf("%s:/home/node/.claude", authPath), "-w", "/workspace", - config.Containers.Image, - "claude", - "--dangerously-skip-permissions", } + // Mount host SSL certificates for corporate proxies (Zscaler, etc.) + // This allows the container to use the same CA trust store as the host + if _, err := os.Stat("/etc/ssl/certs/ca-certificates.crt"); err == nil { + args = append(args, + "-v", "/etc/ssl/certs:/etc/ssl/certs:ro", + "-e", "NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt", + "-e", "NODE_OPTIONS=--use-openssl-ca", + "-e", "SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt", + "-e", "CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt", + "-e", "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt", + ) + } + + args = append(args, + config.Containers.Image, + "claude", "--dangerously-skip-permissions", + ) + authCmd := exec.Command("docker", args...) authCmd.Stdin = os.Stdin authCmd.Stdout = os.Stdout @@ -203,7 +336,14 @@ func setupGitHubAuth() error { return fmt.Errorf("failed to create MCL gh directory: %w", err) } + // Determine hostname (default to github.com) + hostname := config.GitHub.Hostname + if hostname == "" { + hostname = "github.com" + } + fmt.Printf("\nGitHub CLI directory: %s\n", mclGhPath) + fmt.Printf("GitHub hostname: %s\n", hostname) // Clear existing GitHub auth data fmt.Println("Clearing existing GitHub authentication data...") @@ -241,10 +381,22 @@ func setupGitHubAuth() error { "--name", ghAuthContainerName, "-v", fmt.Sprintf("%s:/home/node/.config/gh", mclGhPath), "-w", "/workspace", - config.Containers.Image, - "gh", "auth", "login", } + // Mount host SSL certificates for corporate proxies (Zscaler, etc.) + if _, err := os.Stat("/etc/ssl/certs/ca-certificates.crt"); err == nil { + args = append(args, + "-v", "/etc/ssl/certs:/etc/ssl/certs:ro", + "-e", "SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt", + "-e", "CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt", + ) + } + + // Build gh auth login command with hostname + ghAuthArgs := []string{"gh", "auth", "login", "--hostname", hostname} + args = append(args, config.Containers.Image) + args = append(args, ghAuthArgs...) + ghAuthCmd := exec.Command("docker", args...) ghAuthCmd.Stdin = os.Stdin ghAuthCmd.Stdout = os.Stdout @@ -264,6 +416,7 @@ func setupGitHubAuth() error { hostsPath := filepath.Join(mclGhPath, "hosts.yml") if _, err := os.Stat(hostsPath); err == nil { fmt.Println("\n✅ GitHub CLI authentication successful!") + fmt.Printf("Hostname: %s\n", hostname) fmt.Printf("Configuration saved to: %s\n", mclGhPath) fmt.Println("\nGitHub CLI will be available in all MCL containers when github.enabled is true.") } else { diff --git a/cmd/batch.go b/cmd/batch.go new file mode 100644 index 0000000..660e5d6 --- /dev/null +++ b/cmd/batch.go @@ -0,0 +1,441 @@ +// Copyright 2025 Nandor Kis +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/spf13/cobra" +) + +var ( + batchFile string + extraCommand string +) + +// Task represents a single task extracted from the markdown file +type Task struct { + Number int `json:"number"` + Title string `json:"title"` + Description string `json:"description"` +} + +var batchCmd = &cobra.Command{ + Use: "batch", + Short: "Create multiple containers from a task file", + Long: `Analyze a markdown file with multiple tasks and create separate containers for each. + +Uses AI to identify distinct tasks in the file, then lets you select which ones +to start as separate Maestro containers. + +The --extra-command flag allows you to add an instruction that will be sent to Claude +in every container after the main task is complete. This is useful for common follow-up +actions like committing, pushing, and creating PRs. + +Examples: + maestro batch --file tasks.md + maestro batch -f sprint-backlog.md + maestro batch -f tasks.md -e "When done, commit your changes, push to origin, and open a PR against main"`, + RunE: runBatch, +} + +func init() { + rootCmd.AddCommand(batchCmd) + batchCmd.Flags().StringVarP(&batchFile, "file", "f", "", "Markdown file containing tasks (required)") + batchCmd.Flags().StringVarP(&extraCommand, "extra-command", "e", "", "Extra command to send to Claude in all containers after the main task") + batchCmd.MarkFlagRequired("file") +} + +func runBatch(cmd *cobra.Command, args []string) error { + // Read the markdown file + content, err := os.ReadFile(batchFile) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + fmt.Println("Analyzing tasks...") + + // Use LLM to analyze and extract tasks + tasks, err := analyzeTasks(string(content)) + if err != nil { + return fmt.Errorf("failed to analyze tasks: %w", err) + } + + if len(tasks) == 0 { + fmt.Println("No distinct tasks found in the file.") + return nil + } + + // Display found tasks + fmt.Printf("\nFound %d task(s):\n", len(tasks)) + for _, task := range tasks { + fmt.Printf(" %d. %s\n", task.Number, task.Title) + } + + // Prompt for selection + selectedTasks, err := promptTaskSelection(tasks) + if err != nil { + return err + } + + if len(selectedTasks) == 0 { + fmt.Println("No tasks selected. Exiting.") + return nil + } + + fmt.Printf("\nStarting %d container(s)...\n\n", len(selectedTasks)) + + // Create containers in parallel, passing full markdown as reference and extra command + if err := createContainersInParallel(selectedTasks, string(content), extraCommand); err != nil { + return err + } + + fmt.Println("\nDone! Use 'maestro list' to see container status.") + return nil +} + +// analyzeTasks uses Claude to analyze the markdown and extract tasks +func analyzeTasks(content string) ([]Task, error) { + prompt := fmt.Sprintf(`Analyze this document and identify tasks that can be worked on IN PARALLEL by different developers. + +Document: +--- +%s +--- + +Instructions: +1. Identify LOGICALLY INDEPENDENT work units that could be assigned to different people +2. CRITICAL: Do NOT split sequential/dependent steps into separate tasks + - If steps must be done in order (e.g., "create file A, then modify file A"), keep them as ONE task + - If a section has numbered sub-steps, that's usually ONE task, not multiple + - Phases, milestones, or features are typically good task boundaries +3. Look for natural parallelization boundaries: + - Different phases (Phase 1, Phase 2, etc.) are usually separate tasks + - Different features or components that don't depend on each other + - Different bug fixes that affect unrelated code +4. Extract a short title (max 60 chars) and include ALL related steps in the description +5. Number them starting from 1 + +Examples of WRONG splitting: +- "Create UserService class" and "Add methods to UserService" → Should be ONE task +- "Add config entry" and "Use config entry in code" → Should be ONE task + +Examples of CORRECT splitting: +- "Phase 3: Stopwords" and "Phase 4: Splitting" → TWO separate tasks (independent phases) +- "Fix login bug" and "Add export feature" → TWO separate tasks (unrelated work) + +Respond ONLY with valid JSON in this exact format (no markdown, no explanation): +{"tasks": [{"number": 1, "title": "Short task title", "description": "Full task description with all sub-steps..."}, ...]} + +If no distinct tasks are found, respond with: {"tasks": []}`, content) + + // Call Claude CLI to analyze tasks (--print is read-only, no permissions needed) + cmd := exec.Command("claude", "--print") + cmd.Stdin = strings.NewReader(prompt) + + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to call Claude: %w\nOutput: %s", err, string(output)) + } + + // Parse JSON response + outputStr := strings.TrimSpace(string(output)) + + // Try to extract JSON if wrapped in markdown code blocks + if strings.Contains(outputStr, "```") { + re := regexp.MustCompile("```(?:json)?\\s*([\\s\\S]*?)```") + if matches := re.FindStringSubmatch(outputStr); len(matches) > 1 { + outputStr = strings.TrimSpace(matches[1]) + } + } + + var result struct { + Tasks []Task `json:"tasks"` + } + + if err := json.Unmarshal([]byte(outputStr), &result); err != nil { + // Try to find JSON object in the output + start := strings.Index(outputStr, "{") + end := strings.LastIndex(outputStr, "}") + if start >= 0 && end > start { + outputStr = outputStr[start : end+1] + if err := json.Unmarshal([]byte(outputStr), &result); err != nil { + return nil, fmt.Errorf("failed to parse response: %w\nOutput: %s", err, string(output)) + } + } else { + return nil, fmt.Errorf("failed to parse response: %w\nOutput: %s", err, string(output)) + } + } + + return result.Tasks, nil +} + +// promptTaskSelection prompts the user to select which tasks to start +func promptTaskSelection(tasks []Task) ([]Task, error) { + fmt.Printf("\nWhich tasks to start? ") + fmt.Printf("[1-%d, 'all', or comma-separated like '1,3,5'] (default: all): ", len(tasks)) + + reader := bufio.NewReader(os.Stdin) + input, err := reader.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("failed to read input: %w", err) + } + + input = strings.TrimSpace(strings.ToLower(input)) + + // Default to all tasks if user just presses Enter + if input == "" { + return tasks, nil + } + + // Handle 'all' + if input == "all" || input == "a" { + return tasks, nil + } + + // Handle range like '1-3' + if strings.Contains(input, "-") && !strings.Contains(input, ",") { + parts := strings.Split(input, "-") + if len(parts) == 2 { + start, err1 := strconv.Atoi(strings.TrimSpace(parts[0])) + end, err2 := strconv.Atoi(strings.TrimSpace(parts[1])) + if err1 == nil && err2 == nil && start >= 1 && end <= len(tasks) && start <= end { + var selected []Task + for i := start - 1; i < end; i++ { + selected = append(selected, tasks[i]) + } + return selected, nil + } + } + } + + // Handle comma-separated list like '1,3,5' + var selected []Task + for _, part := range strings.Split(input, ",") { + num, err := strconv.Atoi(strings.TrimSpace(part)) + if err != nil { + fmt.Printf("Warning: '%s' is not a valid number, skipping\n", part) + continue + } + if num < 1 || num > len(tasks) { + fmt.Printf("Warning: %d is out of range, skipping\n", num) + continue + } + selected = append(selected, tasks[num-1]) + } + + return selected, nil +} + +// ContainerResult holds the result of creating a container +type ContainerResult struct { + TaskNumber int + TaskTitle string + Success bool + Message string +} + +// createContainersInParallel creates containers for selected tasks concurrently +func createContainersInParallel(tasks []Task, fullMarkdown string, extraCmd string) error { + var wg sync.WaitGroup + results := make(chan ContainerResult, len(tasks)) + + // Track created containers for summary + var createdContainers []string + var mu sync.Mutex + + // Initialize multi-progress display for copy operations + mp := InitMultiProgress() + + // Pre-generate container names so we can add them to progress display + type taskInfo struct { + task Task + containerName string + branchName string + fullPrompt string + } + var taskInfos []taskInfo + + fmt.Println("Preparing containers...") + for _, task := range tasks { + taskDescription := task.Description + if taskDescription == "" { + taskDescription = task.Title + } + + // Build the full prompt with markdown as reference + fullPrompt := fmt.Sprintf(`You are working on Task %d: %s + +YOUR SPECIFIC TASK: +%s + +FULL DOCUMENT FOR REFERENCE: +--- +%s +--- + +Focus ONLY on your assigned task above. The document is provided for context only.`, + task.Number, task.Title, taskDescription, fullMarkdown) + + // Append extra command if provided + if extraCmd != "" { + fullPrompt += fmt.Sprintf(` + +ADDITIONAL INSTRUCTION (execute after completing the task above): +%s`, extraCmd) + } + + // Generate branch name from the specific task + branchName, _, err := generateBranchAndPrompt(taskDescription, false) + if err != nil { + return fmt.Errorf("failed to generate branch for task %d: %w", task.Number, err) + } + + if !isValidBranchName(branchName) { + branchName = generateSimpleBranch(task.Title) + } + + containerName, err := getNextContainerName(branchName) + if err != nil { + return fmt.Errorf("failed to get container name for task %d: %w", task.Number, err) + } + + taskInfos = append(taskInfos, taskInfo{ + task: task, + containerName: containerName, + branchName: branchName, + fullPrompt: fullPrompt, + }) + + // Add to progress display + mp.AddItem(containerName, 0) + } + + // Start progress display + fmt.Println("\nCopying source code to containers:") + mp.Start() + + // Start container creation in parallel + for _, ti := range taskInfos { + wg.Add(1) + go func(info taskInfo) { + defer wg.Done() + + result := ContainerResult{ + TaskNumber: info.task.Number, + TaskTitle: info.task.Title, + } + + // Create the container + if err := createBatchContainer(info.containerName, info.branchName, info.fullPrompt); err != nil { + result.Success = false + result.Message = fmt.Sprintf("failed to create container: %v", err) + results <- result + return + } + + mu.Lock() + createdContainers = append(createdContainers, info.containerName) + mu.Unlock() + + result.Success = true + result.Message = info.containerName + results <- result + }(ti) + } + + // Wait for all to complete + go func() { + wg.Wait() + close(results) + }() + + // Collect results (don't print yet, progress display is active) + var resultsList []ContainerResult + for result := range results { + resultsList = append(resultsList, result) + } + + // Stop progress display + mp.Stop() + + // Print final summary + fmt.Println("\nContainer creation results:") + successCount := 0 + for _, result := range resultsList { + if result.Success { + fmt.Printf(" [%d] ✓ %s\n", result.TaskNumber, result.Message) + successCount++ + } else { + fmt.Printf(" [%d] ✗ %s\n", result.TaskNumber, result.Message) + } + } + + fmt.Printf("\nCreated %d/%d containers successfully.\n", successCount, len(tasks)) + return nil +} + +// createBatchContainer creates a single container without connecting +func createBatchContainer(containerName, branchName, planningPrompt string) error { + // Step 1: Ensure Docker image + if err := ensureDockerImage(); err != nil { + return fmt.Errorf("failed to ensure Docker image: %w", err) + } + + // Step 2: Start container + if err := startContainer(containerName); err != nil { + return fmt.Errorf("failed to start container: %w", err) + } + + // Step 3: Copy project files + if err := copyProjectToContainer(containerName); err != nil { + return fmt.Errorf("failed to copy project: %w", err) + } + + // Step 4: Copy additional folders + if err := copyAdditionalFolders(containerName); err != nil { + return fmt.Errorf("failed to copy additional folders: %w", err) + } + + // Step 5: Initialize git branch + if err := initializeGitBranch(containerName, branchName); err != nil { + return fmt.Errorf("failed to initialize git branch: %w", err) + } + + // Step 6: Configure git user + if err := configureGitUser(containerName); err != nil { + // Just warn, don't fail + } + + // Step 7: Setup GitHub remote + if err := setupGitHubRemote(containerName); err != nil { + // Just warn, don't fail + } + + // Step 8: Start tmux session + if err := startTmuxSession(containerName, branchName, planningPrompt, false); err != nil { + return fmt.Errorf("failed to start tmux session: %w", err) + } + + return nil +} diff --git a/cmd/completion.go b/cmd/completion.go new file mode 100644 index 0000000..8ac8485 --- /dev/null +++ b/cmd/completion.go @@ -0,0 +1,81 @@ +// Copyright 2025 Nandor Kis +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "os" + + "github.com/spf13/cobra" +) + +var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate shell completion scripts", + Long: `Generate shell completion scripts for maestro. + +To load completions: + +Bash: + $ source <(maestro completion bash) + + # To load completions for each session, execute once: + # Linux: + $ maestro completion bash > /etc/bash_completion.d/maestro + # macOS: + $ maestro completion bash > $(brew --prefix)/etc/bash_completion.d/maestro + +Zsh: + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ maestro completion zsh > "${fpath[1]}/_maestro" + + # You may need to start a new shell for this setup to take effect. + +Fish: + $ maestro completion fish | source + + # To load completions for each session, execute once: + $ maestro completion fish > ~/.config/fish/completions/maestro.fish + +PowerShell: + PS> maestro completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> maestro completion powershell > maestro.ps1 + # and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, +} + +func init() { + rootCmd.AddCommand(completionCmd) +} diff --git a/cmd/daemon.go b/cmd/daemon.go index 6eb84af..08abbbe 100644 --- a/cmd/daemon.go +++ b/cmd/daemon.go @@ -252,6 +252,7 @@ func runDaemonBackground(cmd *cobra.Command, args []string) error { NotifyOn: config.Daemon.Notifications.NotifyOn, QuietHoursStart: config.Daemon.Notifications.QuietHours.Start, QuietHoursEnd: config.Daemon.Notifications.QuietHours.End, + ContainerPrefix: config.Containers.Prefix, } // Create and start daemon with embedded icon @@ -263,6 +264,36 @@ func runDaemonBackground(cmd *cobra.Command, args []string) error { return d.Start() } +// EnsureDaemonRunning starts the daemon if it's not already running. +// This is called automatically when the TUI starts. +func EnsureDaemonRunning() { + authDir := expandPath(config.Claude.AuthPath) + pidFile := filepath.Join(authDir, "daemon.pid") + + // Check if already running + if _, running := isDaemonRunning(pidFile); running { + return // Already running, nothing to do + } + + // Start daemon silently in background + binary, err := os.Executable() + if err != nil { + return // Fail silently + } + + daemonCmd := exec.Command(binary, "daemon", "_run") + daemonCmd.Stdout = nil + daemonCmd.Stderr = nil + daemonCmd.Stdin = nil + + if err := daemonCmd.Start(); err != nil { + return // Fail silently + } + + // Detach from parent + daemonCmd.Process.Release() +} + // Helper functions func isDaemonRunning(pidFile string) (int, bool) { diff --git a/cmd/new.go b/cmd/new.go index 1a9622b..4aeaa7d 100644 --- a/cmd/new.go +++ b/cmd/new.go @@ -17,18 +17,22 @@ package cmd import ( "bufio" "bytes" + "crypto/rand" "fmt" + "io" + "math/big" "os" "os/exec" "path/filepath" "regexp" "strings" + "sync" "time" + "github.com/spf13/cobra" "github.com/uprockcom/maestro/assets" "github.com/uprockcom/maestro/pkg/container" "github.com/uprockcom/maestro/pkg/version" - "github.com/spf13/cobra" ) var ( @@ -133,6 +137,11 @@ func runNew(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to initialize git branch: %w", err) } + // Step 7.1: Configure git user if specified + if err := configureGitUser(containerName); err != nil { + fmt.Printf("Warning: Failed to configure git user: %v\n", err) + } + // Step 7.5: Convert SSH GitHub remotes to HTTPS for gh authentication if err := setupGitHubRemote(containerName); err != nil { // Don't fail container creation, just warn @@ -584,7 +593,13 @@ func startContainer(containerName string) error { configExists = true } - if !credExists || !configExists { + // Skip credential checks when using Bedrock (uses AWS auth instead) + if config.Bedrock.Enabled { + if !configExists { + fmt.Println("⚠️ Warning: Missing .claude.json configuration.") + fmt.Println("Run 'maestro auth' to copy config from ~/.claude") + } + } else if !credExists || !configExists { fmt.Println("⚠️ Warning: Claude authentication/configuration incomplete.") if !credExists { fmt.Println(" - Missing .credentials.json") @@ -633,6 +648,83 @@ func startContainer(containerName string) error { "-v", fmt.Sprintf("%s-history:/commandhistory", containerName), ) + // Mount host SSL certificates for corporate proxies (Zscaler, etc.) + // This allows the container to use the same CA trust store as the host + if _, err := os.Stat("/etc/ssl/certs/ca-certificates.crt"); err == nil { + args = append(args, + "-v", "/etc/ssl/certs:/etc/ssl/certs:ro", + "-e", "NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt", + "-e", "NODE_OPTIONS=--use-openssl-ca", + "-e", "SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt", + "-e", "CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt", + "-e", "REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt", + ) + } + + // Mount AWS config and credentials for Bedrock support + if config.AWS.Enabled || config.Bedrock.Enabled { + homeDir, _ := os.UserHomeDir() + awsDir := filepath.Join(homeDir, ".aws") + if _, err := os.Stat(awsDir); err == nil { + // Mount as read-write so SSO token refresh can work + args = append(args, + "-v", fmt.Sprintf("%s:/home/node/.aws", awsDir), + ) + } + + // Set AWS environment variables + if config.AWS.Profile != "" { + args = append(args, "-e", fmt.Sprintf("AWS_PROFILE=%s", config.AWS.Profile)) + } + if config.AWS.Region != "" { + args = append(args, "-e", fmt.Sprintf("AWS_REGION=%s", config.AWS.Region)) + args = append(args, "-e", fmt.Sprintf("AWS_DEFAULT_REGION=%s", config.AWS.Region)) + } + + // Set Bedrock environment variables + if config.Bedrock.Enabled { + args = append(args, "-e", "CLAUDE_CODE_USE_BEDROCK=1") + if config.Bedrock.Model != "" { + args = append(args, "-e", fmt.Sprintf("ANTHROPIC_MODEL=%s", config.Bedrock.Model)) + } + } + } + + // Mount SSH agent socket for git authentication (more secure than mounting keys) + // Only the agent socket is exposed - private keys stay on the host + if config.SSH.Enabled { + sshAuthSock := os.Getenv("SSH_AUTH_SOCK") + if sshAuthSock != "" { + args = append(args, + "-v", fmt.Sprintf("%s:/ssh-agent", sshAuthSock), + "-e", "SSH_AUTH_SOCK=/ssh-agent", + ) + } else { + fmt.Println("Warning: SSH enabled but SSH_AUTH_SOCK not set. Run 'ssh-add' first.") + } + + // Mount known_hosts from host to avoid SSH host key verification prompts + if config.SSH.KnownHostsPath != "" { + knownHostsPath := expandPath(config.SSH.KnownHostsPath) + if _, err := os.Stat(knownHostsPath); err == nil { + args = append(args, + "-v", fmt.Sprintf("%s:/home/node/.ssh/known_hosts:ro", knownHostsPath), + ) + } + } + } + + // Mount Android SDK if configured (read-only for safety) + if config.Android.SDKPath != "" { + sdkPath := expandPath(config.Android.SDKPath) + if _, err := os.Stat(sdkPath); err == nil { + args = append(args, + "-v", fmt.Sprintf("%s:/home/node/Android/Sdk:ro", sdkPath), + "-e", "ANDROID_HOME=/home/node/Android/Sdk", + ) + } + } + // Use version-synchronized image (or config override if set) args = append(args, getDockerImage()) @@ -770,6 +862,16 @@ PROMPT_EOF`) } } + // Copy and import SSL certificates for Java + if err := copySSLCertificates(containerName); err != nil { + fmt.Printf("Warning: Failed to install SSL certificates: %v\n", err) + } + + // Setup Android SDK environment (SDK is mounted as volume) + if err := setupAndroidSDK(containerName); err != nil { + fmt.Printf("Warning: Failed to setup Android SDK: %v\n", err) + } + // Initialize firewall fmt.Println("Setting up firewall...") if err := initializeFirewall(containerName); err != nil { @@ -779,40 +881,383 @@ PROMPT_EOF`) return nil } +// MultiProgress manages a multi-line progress display (like docker pull) +type MultiProgress struct { + mu sync.Mutex + items map[string]*ProgressItem + order []string // Track order of items + lineCount int + initialized bool + done chan bool +} + +type ProgressItem struct { + Name string + Status string // "waiting", "copying", "done", "error" + BytesRead int64 + TotalSize int64 + StartTime time.Time + EndTime time.Time +} + +var globalProgress *MultiProgress + +// InitMultiProgress initializes the global multi-progress display +func InitMultiProgress() *MultiProgress { + globalProgress = &MultiProgress{ + items: make(map[string]*ProgressItem), + done: make(chan bool), + } + return globalProgress +} + +// GetMultiProgress returns the global progress display +func GetMultiProgress() *MultiProgress { + return globalProgress +} + +// AddItem adds a new item to track +func (mp *MultiProgress) AddItem(name string, totalSize int64) { + mp.mu.Lock() + defer mp.mu.Unlock() + + mp.items[name] = &ProgressItem{ + Name: name, + Status: "waiting", + TotalSize: totalSize, + } + mp.order = append(mp.order, name) +} + +// StartItem marks an item as started +func (mp *MultiProgress) StartItem(name string) { + mp.mu.Lock() + defer mp.mu.Unlock() + + if item, ok := mp.items[name]; ok { + item.Status = "copying" + item.StartTime = time.Now() + } +} + +// UpdateItem updates bytes read for an item +func (mp *MultiProgress) UpdateItem(name string, bytesRead int64) { + mp.mu.Lock() + defer mp.mu.Unlock() + + if item, ok := mp.items[name]; ok { + item.BytesRead = bytesRead + } +} + +// CompleteItem marks an item as done +func (mp *MultiProgress) CompleteItem(name string) { + mp.mu.Lock() + defer mp.mu.Unlock() + + if item, ok := mp.items[name]; ok { + item.Status = "done" + item.EndTime = time.Now() + } +} + +// ErrorItem marks an item as failed +func (mp *MultiProgress) ErrorItem(name string, err error) { + mp.mu.Lock() + defer mp.mu.Unlock() + + if item, ok := mp.items[name]; ok { + item.Status = "error" + item.EndTime = time.Now() + } +} + +// Start begins the progress display loop +func (mp *MultiProgress) Start() { + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-mp.done: + return + case <-ticker.C: + mp.render() + } + } + }() +} + +// Stop stops the progress display and renders final state +func (mp *MultiProgress) Stop() { + close(mp.done) + mp.renderFinal() +} + +func (mp *MultiProgress) render() { + mp.mu.Lock() + defer mp.mu.Unlock() + + if len(mp.items) == 0 { + return + } + + // Count active items (skip "waiting" - they haven't started yet) + activeCount := 0 + for _, name := range mp.order { + if mp.items[name].Status != "waiting" { + activeCount++ + } + } + + if activeCount == 0 { + return + } + + // Move cursor up if we've already printed lines + if mp.initialized && mp.lineCount > 0 { + fmt.Printf("\033[%dA", mp.lineCount) + } + + mp.lineCount = activeCount + mp.initialized = true + + for _, name := range mp.order { + item := mp.items[name] + if item.Status != "waiting" { + mp.renderLine(item) + } + } +} + +func (mp *MultiProgress) renderFinal() { + mp.mu.Lock() + defer mp.mu.Unlock() + + // Move cursor up to overwrite progress lines + if mp.initialized && mp.lineCount > 0 { + fmt.Printf("\033[%dA", mp.lineCount) + } + + for _, name := range mp.order { + item := mp.items[name] + if item.Status != "waiting" { + mp.renderLine(item) + } + } +} + +func (mp *MultiProgress) renderLine(item *ProgressItem) { + // Truncate name if too long + displayName := item.Name + if len(displayName) > 40 { + displayName = displayName[:37] + "..." + } + + // Clear line + fmt.Print("\033[K") + + switch item.Status { + case "copying": + elapsed := time.Since(item.StartTime).Seconds() + if elapsed < 0.1 { + elapsed = 0.1 + } + speed := float64(item.BytesRead) / elapsed / 1024 / 1024 + + if item.TotalSize > 0 { + pct := float64(item.BytesRead) / float64(item.TotalSize) * 100 + if pct > 100 { + pct = 100 + } + barWidth := 20 + filled := int(pct / 100 * float64(barWidth)) + bar := strings.Repeat("█", filled) + strings.Repeat("░", barWidth-filled) + fmt.Printf("%-40s [%s] %5.1f%% %6.1f MB/s\n", displayName, bar, pct, speed) + } else { + fmt.Printf("%-40s %8s %6.1f MB/s\n", displayName, formatBytes(item.BytesRead), speed) + } + case "done": + duration := item.EndTime.Sub(item.StartTime).Seconds() + speed := float64(item.BytesRead) / duration / 1024 / 1024 + fmt.Printf("%-40s ✓ %s in %.1fs (%.1f MB/s)\n", displayName, formatBytes(item.BytesRead), duration, speed) + case "error": + fmt.Printf("%-40s ✗ Failed\n", displayName) + } +} + +// progressReader wraps an io.Reader to track bytes and report to MultiProgress +type progressReader struct { + reader io.Reader + containerName string + bytesRead int64 + mu sync.Mutex +} + +func (pr *progressReader) Read(p []byte) (int, error) { + n, err := pr.reader.Read(p) + pr.mu.Lock() + pr.bytesRead += int64(n) + bytes := pr.bytesRead + pr.mu.Unlock() + + // Report to global progress if active + if mp := GetMultiProgress(); mp != nil { + mp.UpdateItem(pr.containerName, bytes) + } + return n, err +} + +func (pr *progressReader) getBytesRead() int64 { + pr.mu.Lock() + defer pr.mu.Unlock() + return pr.bytesRead +} + +func formatBytes(bytes int64) string { + const ( + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + ) + switch { + case bytes >= GB: + return fmt.Sprintf("%.1f GB", float64(bytes)/GB) + case bytes >= MB: + return fmt.Sprintf("%.1f MB", float64(bytes)/MB) + case bytes >= KB: + return fmt.Sprintf("%.1f KB", float64(bytes)/KB) + default: + return fmt.Sprintf("%d B", bytes) + } +} + +// readMaestroIgnore reads exclusion patterns from .maestroignore file +func readMaestroIgnore(dir string) []string { + ignorePath := filepath.Join(dir, ".maestroignore") + file, err := os.Open(ignorePath) + if err != nil { + return nil // No .maestroignore file, that's fine + } + defer file.Close() + + var patterns []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") { + continue + } + patterns = append(patterns, line) + } + return patterns +} + func copyProjectToContainer(containerName string) error { cwd, err := os.Getwd() if err != nil { return err } - // Create tar of current directory (excluding .git if it's huge) - tarCmd := exec.Command("tar", "-czf", "-", "--exclude=node_modules", "--exclude=.git", ".") - tarCmd.Dir = cwd + // Determine compression setting (default: true for backward compatibility) + useCompression := config.Sync.Compress == nil || *config.Sync.Compress - // Pipe to docker cp - dockerCmd := exec.Command("docker", "exec", "-i", containerName, "tar", "-xzf", "-", "-C", "/workspace") + // Check if we're in batch mode (MultiProgress active) + mp := GetMultiProgress() + isBatchMode := mp != nil - // Connect pipes + // Signal start to MultiProgress + if isBatchMode { + mp.StartItem(containerName) + } else { + fmt.Printf("Copying source code to %s...\n", containerName) + } + + startTime := time.Now() + + // Build exclude arguments (defaults + .maestroignore) + excludeArgs := []string{"--exclude=node_modules", "--exclude=.git"} + for _, pattern := range readMaestroIgnore(cwd) { + excludeArgs = append(excludeArgs, "--exclude="+pattern) + } + + // Create tar of current directory (excluding .git which is copied separately) + var tarCmd *exec.Cmd + var dockerCmd *exec.Cmd + if useCompression { + // Use gzip compression (slower for large projects but smaller transfer) + tarArgs := append([]string{"-czf", "-"}, excludeArgs...) + tarArgs = append(tarArgs, ".") + tarCmd = exec.Command("tar", tarArgs...) + dockerCmd = exec.Command("docker", "exec", "-i", containerName, "tar", "-xzf", "-", "-C", "/workspace") + } else { + // No compression (faster for large projects on local Docker) + tarArgs := append([]string{"-cf", "-"}, excludeArgs...) + tarArgs = append(tarArgs, ".") + tarCmd = exec.Command("tar", tarArgs...) + dockerCmd = exec.Command("docker", "exec", "-i", containerName, "tar", "-xf", "-", "-C", "/workspace") + } + tarCmd.Dir = cwd + + // Connect pipes with progress tracking pipe, err := tarCmd.StdoutPipe() if err != nil { + if isBatchMode { + mp.ErrorItem(containerName, err) + } return err } - dockerCmd.Stdin = pipe + + // Create progress reader + pr := &progressReader{ + reader: pipe, + containerName: containerName, + } + dockerCmd.Stdin = pr // Start both commands if err := tarCmd.Start(); err != nil { + if isBatchMode { + mp.ErrorItem(containerName, err) + } return err } if err := dockerCmd.Start(); err != nil { + if isBatchMode { + mp.ErrorItem(containerName, err) + } return err } // Wait for completion - if err := tarCmd.Wait(); err != nil { - return err + tarErr := tarCmd.Wait() + dockerErr := dockerCmd.Wait() + + bytesRead := pr.getBytesRead() + duration := time.Since(startTime) + + if tarErr != nil { + if isBatchMode { + mp.ErrorItem(containerName, tarErr) + } + return tarErr } - if err := dockerCmd.Wait(); err != nil { - return err + if dockerErr != nil { + if isBatchMode { + mp.ErrorItem(containerName, dockerErr) + } + return dockerErr + } + + // Update final bytes and mark complete + if isBatchMode { + mp.UpdateItem(containerName, bytesRead) + mp.CompleteItem(containerName) + } else { + speed := float64(bytesRead) / duration.Seconds() / 1024 / 1024 + fmt.Printf(" Copied %s in %.1fs (%.1f MB/s)\n", formatBytes(bytesRead), duration.Seconds(), speed) } // Copy .git separately if it exists @@ -874,6 +1319,22 @@ func initializeGitBranch(containerName, branchName string) error { return cmd.Run() } +func configureGitUser(containerName string) error { + if config.Git.UserName != "" { + cmd := exec.Command("docker", "exec", containerName, "git", "config", "--global", "user.name", config.Git.UserName) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to set git user.name: %w", err) + } + } + if config.Git.UserEmail != "" { + cmd := exec.Command("docker", "exec", containerName, "git", "config", "--global", "user.email", config.Git.UserEmail) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to set git user.email: %w", err) + } + } + return nil +} + func setupGitHubRemote(containerName string) error { // Check if origin remote exists getOriginCmd := exec.Command("docker", "exec", containerName, "sh", "-c", @@ -1017,10 +1478,18 @@ Please analyze this task and create a detailed implementation plan. Do not start } // Create a background script to send the initial prompt - // .claude.json is deleted before starting Claude to force fresh credential discovery + // First accepts the bypass permissions prompt, then sends the task autoInputScript := fmt.Sprintf(`#!/bin/sh -# Wait for Claude to fully start -sleep 5 +# Wait for Claude to start and show the bypass permissions prompt +sleep 3 + +# Accept the bypass permissions prompt by pressing Down then Enter +tmux send-keys -t main:0 Down 2>/dev/null +sleep 0.3 +tmux send-keys -t main:0 Enter 2>/dev/null + +# Wait for Claude to fully initialize after accepting +sleep 3 # Send the task prompt cat > /tmp/prompt-input.txt << 'PROMPT_EOF' @@ -1113,6 +1582,35 @@ func initializeFirewall(containerName string) error { return fmt.Errorf("failed to write allowed domains: %w", err) } + // Write internal DNS config if configured (for corporate networks) + if config.Firewall.InternalDNS != "" { + writeInternalDNSCmd := exec.Command("docker", "exec", "-u", "root", containerName, "sh", "-c", + fmt.Sprintf("echo '%s' > /etc/internal-dns.txt", config.Firewall.InternalDNS)) + if err := writeInternalDNSCmd.Run(); err != nil { + fmt.Printf("Warning: Failed to write internal DNS config: %v\n", err) + } + } + + // Write internal domains if configured + if len(config.Firewall.InternalDomains) > 0 { + internalDomainsList := strings.Join(config.Firewall.InternalDomains, "\n") + writeInternalDomainsCmd := exec.Command("docker", "exec", "-u", "root", containerName, "sh", "-c", + fmt.Sprintf("echo '%s' > /etc/internal-domains.txt", internalDomainsList)) + if err := writeInternalDomainsCmd.Run(); err != nil { + fmt.Printf("Warning: Failed to write internal domains config: %v\n", err) + } + } + + // Write AWS config flag if Bedrock or AWS is enabled + // This tells the firewall script to add AWS domain rules + if config.AWS.Enabled || config.Bedrock.Enabled { + writeAWSConfigCmd := exec.Command("docker", "exec", "-u", "root", containerName, "sh", "-c", + "echo 'enabled' > /etc/aws-enabled.txt") + if err := writeAWSConfigCmd.Run(); err != nil { + fmt.Printf("Warning: Failed to write AWS config: %v\n", err) + } + } + // Run firewall initialization as root (with timeout in background) // We run it in the background because the verification steps can hang firewallCmd := exec.Command("docker", "exec", "-u", "root", "-d", containerName, "/usr/local/bin/init-firewall.sh") @@ -1133,6 +1631,147 @@ func initializeFirewall(containerName string) error { return nil } +func setupAndroidSDK(containerName string) error { + sdkPath := expandPath(config.Android.SDKPath) + if sdkPath == "" { + return nil // No Android SDK configured + } + + // Check if SDK exists + if _, err := os.Stat(sdkPath); err != nil { + return nil // SDK not found + } + + fmt.Println("Setting up Android SDK...") + + // Set ANDROID_HOME environment variable in .zshrc + envCmd := exec.Command("docker", "exec", containerName, "sh", "-c", + `echo 'export ANDROID_HOME=/home/node/Android/Sdk' >> /home/node/.zshrc && echo 'export PATH=$PATH:$ANDROID_HOME/platform-tools:$ANDROID_HOME/cmdline-tools/latest/bin' >> /home/node/.zshrc`) + if err := envCmd.Run(); err != nil { + fmt.Printf("Warning: Failed to set ANDROID_HOME: %v\n", err) + } + + // Update local.properties in workspace if it exists + updateLocalPropertiesCmd := exec.Command("docker", "exec", containerName, "sh", "-c", + `if [ -f /workspace/local.properties ]; then + sed -i 's|sdk.dir=.*|sdk.dir=/home/node/Android/Sdk|' /workspace/local.properties + echo " ✓ Updated local.properties" + fi`) + if err := updateLocalPropertiesCmd.Run(); err != nil { + fmt.Printf("Warning: Failed to update local.properties: %v\n", err) + } + + fmt.Println(" ✓ Android SDK mounted at /home/node/Android/Sdk") + + return nil +} + +func copySSLCertificates(containerName string) error { + certsPath := expandPath(config.SSL.CertificatesPath) + if certsPath == "" { + return nil // No certificates configured + } + + // Check if certificates directory exists + if _, err := os.Stat(certsPath); err != nil { + return nil // No certificates to copy + } + + // List certificate files + entries, err := os.ReadDir(certsPath) + if err != nil { + return fmt.Errorf("failed to read certificates directory: %w", err) + } + + var certFiles []string + for _, entry := range entries { + if !entry.IsDir() && (filepath.Ext(entry.Name()) == ".crt" || filepath.Ext(entry.Name()) == ".pem") { + certFiles = append(certFiles, entry.Name()) + } + } + + if len(certFiles) == 0 { + return nil // No certificate files found + } + + fmt.Printf("Installing %d SSL certificate(s) for Java...\n", len(certFiles)) + + // Create temporary directory in container for certificates + mkdirCmd := exec.Command("docker", "exec", "-u", "root", containerName, "mkdir", "-p", "/tmp/host-certs") + if err := mkdirCmd.Run(); err != nil { + return fmt.Errorf("failed to create temp certs directory: %w", err) + } + + // Copy each certificate and import into Java keystore + for _, certFile := range certFiles { + certPath := filepath.Join(certsPath, certFile) + + // Copy certificate to container + copyCmd := exec.Command("docker", "cp", certPath, fmt.Sprintf("%s:/tmp/host-certs/%s", containerName, certFile)) + if err := copyCmd.Run(); err != nil { + fmt.Printf(" ⚠ Failed to copy %s: %v\n", certFile, err) + continue + } + + // Generate alias from filename (remove extension, replace special chars) + alias := certFile[:len(certFile)-len(filepath.Ext(certFile))] + alias = regexp.MustCompile(`[^a-zA-Z0-9_-]`).ReplaceAllString(alias, "_") + + // Import into Java keystore (using keytool) + // The default cacerts password is 'changeit' + importCmd := exec.Command("docker", "exec", "-u", "root", containerName, "keytool", + "-importcert", + "-noprompt", + "-trustcacerts", + "-alias", alias, + "-file", fmt.Sprintf("/tmp/host-certs/%s", certFile), + "-keystore", "/usr/local/jdk-17.0.2/lib/security/cacerts", + "-storepass", "changeit", + ) + output, err := importCmd.CombinedOutput() + if err != nil { + // Check if it's just a duplicate alias error (certificate already exists) + if !strings.Contains(string(output), "already exists") { + fmt.Printf(" ⚠ Failed to import %s: %v\n", certFile, err) + } + continue + } + fmt.Printf(" ✓ %s\n", certFile) + } + + // Cleanup temp directory + cleanupCmd := exec.Command("docker", "exec", "-u", "root", containerName, "rm", "-rf", "/tmp/host-certs") + cleanupCmd.Run() // Ignore errors on cleanup + + // Change keystore password from default 'changeit' to a random password + // This prevents the default password from being used to tamper with the keystore + newPassword := generateRandomPassword(32) + changePassCmd := exec.Command("docker", "exec", "-u", "root", containerName, "keytool", + "-storepasswd", + "-keystore", "/usr/local/jdk-17.0.2/lib/security/cacerts", + "-storepass", "changeit", + "-new", newPassword, + ) + if err := changePassCmd.Run(); err != nil { + fmt.Printf(" ⚠ Failed to change keystore password: %v\n", err) + } else { + fmt.Println(" ✓ Keystore password randomized") + } + + return nil +} + +// generateRandomPassword generates a cryptographically random password +func generateRandomPassword(length int) string { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + b[i] = charset[n.Int64()] + } + return string(b) +} + func copyAppsToContainer(containerName string) error { if len(config.Apps) == 0 { return nil // No apps configured @@ -1284,6 +1923,11 @@ func CreateContainerFromTUI(taskDescription, branchNameOverride string, skipConn return fmt.Errorf("failed to initialize git branch: %w", err) } + // Step 7.1: Configure git user if specified + if err := configureGitUser(containerName); err != nil { + fmt.Printf("Warning: Failed to configure git user: %v\n", err) + } + // Step 7.5: Convert SSH GitHub remotes to HTTPS for gh authentication if err := setupGitHubRemote(containerName); err != nil { // Don't fail container creation, just warn diff --git a/cmd/root.go b/cmd/root.go index 4e64b70..118bb35 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -46,6 +46,7 @@ type Config struct { Memory string `mapstructure:"memory"` CPUs string `mapstructure:"cpus"` } `mapstructure:"resources"` + DefaultReturnToTUI bool `mapstructure:"default_return_to_tui"` } `mapstructure:"containers"` Tmux struct { @@ -54,18 +55,51 @@ type Config struct { } `mapstructure:"tmux"` Firewall struct { - AllowedDomains []string `mapstructure:"allowed_domains"` + AllowedDomains []string `mapstructure:"allowed_domains"` + InternalDNS string `mapstructure:"internal_dns"` + InternalDomains []string `mapstructure:"internal_domains"` } `mapstructure:"firewall"` Sync struct { AdditionalFolders []string `mapstructure:"additional_folders"` + Compress *bool `mapstructure:"compress"` // Use gzip compression when copying (default: true) } `mapstructure:"sync"` + SSH struct { + Enabled bool `mapstructure:"enabled"` + KnownHostsPath string `mapstructure:"known_hosts_path"` + } `mapstructure:"ssh"` + + SSL struct { + CertificatesPath string `mapstructure:"certificates_path"` + } `mapstructure:"ssl"` + + Android struct { + SDKPath string `mapstructure:"sdk_path"` + } `mapstructure:"android"` + + Git struct { + UserName string `mapstructure:"user_name"` + UserEmail string `mapstructure:"user_email"` + } `mapstructure:"git"` + GitHub struct { Enabled bool `mapstructure:"enabled"` ConfigPath string `mapstructure:"config_path"` + Hostname string `mapstructure:"hostname"` // For GitHub Enterprise (e.g., git.int.avast.com) } `mapstructure:"github"` + AWS struct { + Enabled bool `mapstructure:"enabled"` + Profile string `mapstructure:"profile"` + Region string `mapstructure:"region"` + } `mapstructure:"aws"` + + Bedrock struct { + Enabled bool `mapstructure:"enabled"` + Model string `mapstructure:"model"` + } `mapstructure:"bedrock"` + Daemon struct { CheckInterval string `mapstructure:"check_interval"` ShowNag bool `mapstructure:"show_nag"` @@ -94,6 +128,9 @@ var rootCmd = &cobra.Command{ for Claude Code development. It allows you to run multiple Claude instances in parallel, each in their own isolated environment with proper branch management.`, Run: func(cmd *cobra.Command, args []string) { + // Auto-start daemon if not running + EnsureDaemonRunning() + // Keep running TUI in a loop until user explicitly quits // Maintain cached state for seamless return from containers var cachedState *tui.CachedState @@ -266,6 +303,7 @@ func initConfig() { viper.SetDefault("containers.image", "ghcr.io/uprockcom/maestro:latest") viper.SetDefault("containers.resources.memory", "4g") viper.SetDefault("containers.resources.cpus", "2") + viper.SetDefault("containers.default_return_to_tui", false) viper.SetDefault("tmux.default_session", "main") viper.SetDefault("tmux.prefix", "C-b") viper.SetDefault("firewall.allowed_domains", []string{ @@ -277,9 +315,26 @@ func initConfig() { "sentry.io", "statsig.anthropic.com", "statsig.com", + // AWS Bedrock domains + "sts.amazonaws.com", + "bedrock.amazonaws.com", + "bedrock-runtime.amazonaws.com", }) + viper.SetDefault("firewall.internal_dns", "") + viper.SetDefault("firewall.internal_domains", []string{}) + viper.SetDefault("ssh.enabled", false) + viper.SetDefault("ssh.known_hosts_path", "~/.ssh/known_hosts") + viper.SetDefault("ssl.certificates_path", paths.CertificatesDir()) + viper.SetDefault("android.sdk_path", "") + viper.SetDefault("git.user_name", "") + viper.SetDefault("git.user_email", "") viper.SetDefault("github.enabled", false) viper.SetDefault("github.config_path", paths.GitHubAuthDir()) + viper.SetDefault("aws.enabled", false) + viper.SetDefault("aws.profile", "") + viper.SetDefault("aws.region", "") + viper.SetDefault("bedrock.enabled", false) + viper.SetDefault("bedrock.model", "") viper.SetDefault("daemon.check_interval", "30m") viper.SetDefault("daemon.show_nag", true) viper.SetDefault("daemon.token_refresh.enabled", true) diff --git a/docker/Dockerfile b/docker/Dockerfile index 3dda6f5..2872108 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -37,6 +37,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ wget \ build-essential \ locales \ + ca-certificates \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Set up UTF-8 locale @@ -56,6 +57,27 @@ ENV PATH="/usr/local/go/bin:${PATH}" ENV GOPATH="/home/node/go" ENV PATH="${GOPATH}/bin:${PATH}" +# Install Java JDK 17 (for Android development) +RUN ARCH=$(dpkg --print-architecture) && \ + if [ "$ARCH" = "amd64" ]; then JAVA_ARCH="x64"; elif [ "$ARCH" = "arm64" ]; then JAVA_ARCH="aarch64"; else JAVA_ARCH="$ARCH"; fi && \ + wget "https://download.java.net/java/GA/jdk17.0.2/dfd4a8d0985749f896bed50d7138ee7f/8/GPL/openjdk-17.0.2_linux-${JAVA_ARCH}_bin.tar.gz" && \ + tar -C /usr/local -xzf "openjdk-17.0.2_linux-${JAVA_ARCH}_bin.tar.gz" && \ + rm "openjdk-17.0.2_linux-${JAVA_ARCH}_bin.tar.gz" + +# Set Java environment variables +ENV JAVA_HOME="/usr/local/jdk-17.0.2" +ENV PATH="${JAVA_HOME}/bin:${PATH}" + +# Install Gradle 8.13 (for Android development) +ARG GRADLE_VERSION=8.13 +RUN wget "https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip" && \ + unzip -d /usr/local "gradle-${GRADLE_VERSION}-bin.zip" && \ + rm "gradle-${GRADLE_VERSION}-bin.zip" + +# Set Gradle environment variables +ENV GRADLE_HOME="/usr/local/gradle-${GRADLE_VERSION}" +ENV PATH="${GRADLE_HOME}/bin:${PATH}" + # Install UV (Python package manager) RUN curl -LsSf https://astral.sh/uv/install.sh | sh ENV PATH="/root/.cargo/bin:${PATH}" @@ -127,7 +149,7 @@ RUN sh -c "$(wget -O- https://github.com/deluan/zsh-in-docker/releases/download/ -a "source /usr/share/doc/fzf/examples/key-bindings.zsh" \ -a "source /usr/share/doc/fzf/examples/completion.zsh" \ -a "export PROMPT_COMMAND='history -a' && export HISTFILE=/commandhistory/.bash_history" \ - -a "export PATH=/usr/local/go/bin:\$GOPATH/bin:\$PATH" \ + -a "export PATH=/usr/local/go/bin:\$GOPATH/bin:\$JAVA_HOME/bin:\$GRADLE_HOME/bin:\$PATH" \ -a "export MCL_CONTAINER=true" \ -x @@ -150,6 +172,7 @@ RUN chmod +x /usr/local/bin/init-firewall.sh /usr/local/bin/container-startup.sh echo "node ALL=(root) NOPASSWD: /usr/bin/tee /etc/resolv.conf" >> /etc/sudoers.d/node && \ echo "node ALL=(root) NOPASSWD: /bin/chown" >> /etc/sudoers.d/node && \ echo "node ALL=(root) NOPASSWD: /bin/chmod" >> /etc/sudoers.d/node && \ + echo "node ALL=(root) NOPASSWD: /usr/sbin/update-ca-certificates" >> /etc/sudoers.d/node && \ chmod 0440 /etc/sudoers.d/node USER node diff --git a/docker/container-startup.sh b/docker/container-startup.sh index ef0e226..d22b3e0 100644 --- a/docker/container-startup.sh +++ b/docker/container-startup.sh @@ -10,6 +10,13 @@ echo "Maestro container starting..." # Ensure proper ownership of home directory sudo chown -R node:node /home/node 2>/dev/null || true +# Install custom CA certificate if mounted (for corporate proxies like Zscaler) +if [ -f /usr/local/share/ca-certificates/custom-ca.crt ]; then + echo "Installing custom CA certificate..." + sudo update-ca-certificates 2>/dev/null || true + echo "✓ CA certificate installed" +fi + # Set container hostname in prompt export PS1="[maestro] \w $ " diff --git a/docker/init-firewall.sh b/docker/init-firewall.sh index 110621e..3d15a30 100644 --- a/docker/init-firewall.sh +++ b/docker/init-firewall.sh @@ -115,6 +115,40 @@ echo "server=/.githubusercontent.com/8.8.8.8" >> "$DNSMASQ_CONF" echo "ipset=/.anthropic.com/allowed-domains" >> "$DNSMASQ_CONF" echo "server=/.anthropic.com/8.8.8.8" >> "$DNSMASQ_CONF" +# Add wildcard entries for AWS (only if AWS/Bedrock is enabled) +# This is controlled by /etc/aws-enabled.txt which is written by maestro when aws.enabled or bedrock.enabled is true +AWS_ENABLED_FILE="/etc/aws-enabled.txt" +if [ -f "$AWS_ENABLED_FILE" ]; then + echo "AWS/Bedrock enabled - adding AWS domain rules" + echo "ipset=/.amazonaws.com/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.amazonaws.com/8.8.8.8" >> "$DNSMASQ_CONF" + echo "ipset=/.awsapps.com/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.awsapps.com/8.8.8.8" >> "$DNSMASQ_CONF" +else + echo "AWS/Bedrock not enabled - skipping AWS domain rules" +fi + +# Configure internal DNS for corporate networks (Zscaler, VPN, etc.) +INTERNAL_DNS_FILE="/etc/internal-dns.txt" +INTERNAL_DOMAINS_FILE="/etc/internal-domains.txt" +if [ -f "$INTERNAL_DNS_FILE" ] && [ -f "$INTERNAL_DOMAINS_FILE" ]; then + INTERNAL_DNS=$(cat "$INTERNAL_DNS_FILE") + if [ -n "$INTERNAL_DNS" ]; then + echo "Configuring internal DNS server: $INTERNAL_DNS" + while read -r domain; do + [ -z "$domain" ] && continue + echo " Routing $domain via internal DNS" + echo "ipset=/$domain/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/$domain/$INTERNAL_DNS" >> "$DNSMASQ_CONF" + # Also add wildcard for subdomains + echo "ipset=/.$domain/allowed-domains" >> "$DNSMASQ_CONF" + echo "server=/.$domain/$INTERNAL_DNS" >> "$DNSMASQ_CONF" + done < "$INTERNAL_DOMAINS_FILE" + fi +elif [ -f "$INTERNAL_DNS_FILE" ]; then + echo "Warning: Internal DNS configured but no internal domains specified" +fi + # Start dnsmasq echo "Starting dnsmasq..." dnsmasq --conf-file="$DNSMASQ_CONF" @@ -125,7 +159,7 @@ echo "nameserver 127.0.0.1" | tee /etc/resolv.conf > /dev/null # Process GitHub API ranges and add them directly to ipset # We do this because GitHub has many IPs and we want to ensure we catch them all echo "Fetching GitHub IP ranges..." -gh_ranges=$(curl -s https://api.github.com/meta) +gh_ranges=$(curl -s --connect-timeout 5 --max-time 10 https://api.github.com/meta) if [ -z "$gh_ranges" ]; then echo "WARNING: Failed to fetch GitHub IP ranges - GitHub access may be limited" else @@ -193,24 +227,36 @@ iptables -A OUTPUT -j REJECT --reject-with icmp-admin-prohibited echo "Firewall configuration complete" -# Verify firewall rules +# Verify firewall rules (run all checks in parallel) echo "Verifying firewall rules..." -if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then + +# Create temp files for results +VERIFY_DIR=$(mktemp -d) +trap "rm -rf $VERIFY_DIR" EXIT + +# Run all verification tests in parallel +(curl --connect-timeout 3 --max-time 5 https://example.com >/dev/null 2>&1 && echo "fail" || echo "pass") > "$VERIFY_DIR/block" & +(curl --connect-timeout 3 --max-time 5 https://api.github.com/zen >/dev/null 2>&1 && echo "pass" || echo "fail") > "$VERIFY_DIR/github" & +(curl --connect-timeout 3 --max-time 5 https://api.anthropic.com >/dev/null 2>&1 && echo "pass" || echo "fail") > "$VERIFY_DIR/anthropic" & + +# Wait for all background jobs +wait + +# Check results +if [ "$(cat "$VERIFY_DIR/block")" = "fail" ]; then echo "ERROR: Firewall verification failed - was able to reach https://example.com" exit 1 else echo "✓ Firewall blocking works - unable to reach https://example.com" fi -# Verify allowed access -echo "Testing DNS resolution and access to whitelisted domains..." -if ! curl --connect-timeout 10 https://api.github.com/zen >/dev/null 2>&1; then +if [ "$(cat "$VERIFY_DIR/github")" = "fail" ]; then echo "WARNING: Unable to reach https://api.github.com - GitHub access may be limited" else echo "✓ GitHub API access works" fi -if ! curl --connect-timeout 10 https://api.anthropic.com >/dev/null 2>&1; then +if [ "$(cat "$VERIFY_DIR/anthropic")" = "fail" ]; then echo "WARNING: Unable to reach https://api.anthropic.com - Anthropic access may be limited" else echo "✓ Anthropic API access works" diff --git a/docs/GUIDE.md b/docs/GUIDE.md index ecf4bf0..f7195b6 100644 --- a/docs/GUIDE.md +++ b/docs/GUIDE.md @@ -101,6 +101,7 @@ sync: additional_folders: # Folders to copy as siblings - ~/Documents/Code/mcp-servers - ~/Documents/Code/helpers + compress: true # Set to false for faster copying of large projects github: enabled: false # Enable GitHub CLI (gh) integration diff --git a/pkg/container/info.go b/pkg/container/info.go index bafe8c8..c6bcd97 100644 --- a/pkg/container/info.go +++ b/pkg/container/info.go @@ -21,6 +21,7 @@ import ( "os/exec" "strconv" "strings" + "sync" "time" ) @@ -167,7 +168,15 @@ func GetRunningContainers(prefix string) ([]Info, error) { return nil, err } - containers := []Info{} + // Parse basic container info first + type basicInfo struct { + name string + status string + state string + createdAt time.Time + } + var basics []basicInfo + for _, line := range strings.Split(string(output), "\n") { if line == "" { continue @@ -183,40 +192,77 @@ func GetRunningContainers(prefix string) ([]Info, error) { continue } - status := parts[1] - state := parts[2] - createdStr := parts[3] - // Parse creation time - createdAt, err := time.Parse("2006-01-02 15:04:05 -0700 MST", createdStr) + createdAt, err := time.Parse("2006-01-02 15:04:05 -0700 MST", parts[3]) if err != nil { - // Fallback to zero time if parse fails createdAt = time.Time{} } - // Get branch name - branch := GetBranchName(name) - - // Check for attention needed and dormant status - needsAttention := false - isDormant := false - if state == "running" { - needsAttention = CheckBellStatus(name) - isDormant = !IsClaudeRunning(name) - } - - containers = append(containers, Info{ - Name: name, - ShortName: GetShortName(name, prefix), - Status: state, - StatusDetails: status, - Branch: branch, - NeedsAttention: needsAttention, - IsDormant: isDormant, - CreatedAt: createdAt, + basics = append(basics, basicInfo{ + name: name, + status: parts[1], + state: parts[2], + createdAt: createdAt, }) } + // Fetch detailed info for all containers in parallel + containers := make([]Info, len(basics)) + var wg sync.WaitGroup + + for i, b := range basics { + wg.Add(1) + go func(idx int, basic basicInfo) { + defer wg.Done() + + info := Info{ + Name: basic.name, + ShortName: GetShortName(basic.name, prefix), + Status: basic.state, + StatusDetails: basic.status, + CreatedAt: basic.createdAt, + } + + // Fetch details in parallel + var detailWg sync.WaitGroup + var mu sync.Mutex + + // Branch name + detailWg.Add(1) + go func() { + defer detailWg.Done() + branch := GetBranchName(basic.name) + mu.Lock() + info.Branch = branch + mu.Unlock() + }() + + // Bell status + detailWg.Add(1) + go func() { + defer detailWg.Done() + needsAttention := CheckBellStatus(basic.name) + mu.Lock() + info.NeedsAttention = needsAttention + mu.Unlock() + }() + + // Claude running check + detailWg.Add(1) + go func() { + defer detailWg.Done() + isDormant := !IsClaudeRunning(basic.name) + mu.Lock() + info.IsDormant = isDormant + mu.Unlock() + }() + + detailWg.Wait() + containers[idx] = info + }(i, b) + } + + wg.Wait() return containers, nil } @@ -229,7 +275,15 @@ func GetAllContainers(prefix string) ([]Info, error) { return nil, err } - containers := []Info{} + // Parse basic container info first + type basicInfo struct { + name string + status string + state string + createdAt time.Time + } + var basics []basicInfo + for _, line := range strings.Split(string(output), "\n") { if line == "" { continue @@ -245,53 +299,115 @@ func GetAllContainers(prefix string) ([]Info, error) { continue } - status := parts[1] - state := parts[2] - createdStr := parts[3] - // Parse creation time - createdAt, err := time.Parse("2006-01-02 15:04:05 -0700 MST", createdStr) + createdAt, err := time.Parse("2006-01-02 15:04:05 -0700 MST", parts[3]) if err != nil { - // Fallback to zero time if parse fails createdAt = time.Time{} } - // Get branch name - branch := GetBranchName(name) - - // Check for attention needed (bell/silence) only for running containers - needsAttention := false - isDormant := false - authStatus := "" - lastActivity := "-" - gitStatus := "-" - if state == "running" { - needsAttention = CheckBellStatus(name) - // Check if Claude is running - if not, container is dormant - isDormant = !IsClaudeRunning(name) - // Check authentication status - authStatus = GetAuthStatus(name) - // Get last activity - lastActivity = GetLastActivity(name) - // Get git status - gitStatus = GetGitStatus(name) - } - - containers = append(containers, Info{ - Name: name, - ShortName: GetShortName(name, prefix), - Status: state, - StatusDetails: status, - Branch: branch, - NeedsAttention: needsAttention, - IsDormant: isDormant, - AuthStatus: authStatus, - LastActivity: lastActivity, - GitStatus: gitStatus, - CreatedAt: createdAt, + basics = append(basics, basicInfo{ + name: name, + status: parts[1], + state: parts[2], + createdAt: createdAt, }) } + // Fetch detailed info for all containers in parallel + containers := make([]Info, len(basics)) + var wg sync.WaitGroup + + for i, b := range basics { + wg.Add(1) + go func(idx int, basic basicInfo) { + defer wg.Done() + + info := Info{ + Name: basic.name, + ShortName: GetShortName(basic.name, prefix), + Status: basic.state, + StatusDetails: basic.status, + CreatedAt: basic.createdAt, + LastActivity: "-", + GitStatus: "-", + } + + // For running containers, fetch detailed info in parallel + if basic.state == "running" { + var detailWg sync.WaitGroup + var mu sync.Mutex + + // Branch name + detailWg.Add(1) + go func() { + defer detailWg.Done() + branch := GetBranchName(basic.name) + mu.Lock() + info.Branch = branch + mu.Unlock() + }() + + // Bell status + detailWg.Add(1) + go func() { + defer detailWg.Done() + needsAttention := CheckBellStatus(basic.name) + mu.Lock() + info.NeedsAttention = needsAttention + mu.Unlock() + }() + + // Claude running check + detailWg.Add(1) + go func() { + defer detailWg.Done() + isDormant := !IsClaudeRunning(basic.name) + mu.Lock() + info.IsDormant = isDormant + mu.Unlock() + }() + + // Auth status + detailWg.Add(1) + go func() { + defer detailWg.Done() + authStatus := GetAuthStatus(basic.name) + mu.Lock() + info.AuthStatus = authStatus + mu.Unlock() + }() + + // Last activity + detailWg.Add(1) + go func() { + defer detailWg.Done() + lastActivity := GetLastActivity(basic.name) + mu.Lock() + info.LastActivity = lastActivity + mu.Unlock() + }() + + // Git status + detailWg.Add(1) + go func() { + defer detailWg.Done() + gitStatus := GetGitStatus(basic.name) + mu.Lock() + info.GitStatus = gitStatus + mu.Unlock() + }() + + detailWg.Wait() + } else { + // For stopped containers, just get branch name + info.Branch = GetBranchName(basic.name) + } + + containers[idx] = info + }(i, b) + } + + wg.Wait() return containers, nil } diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index 70ce573..3331965 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -36,6 +36,7 @@ type Config struct { NotifyOn []string QuietHoursStart string QuietHoursEnd string + ContainerPrefix string } // Daemon manages background monitoring and auto-refresh @@ -118,15 +119,23 @@ func (d *Daemon) Start() error { d.logInfo("Continuing without notifications...") } else { // Log notification configuration - if d.hasTerminalNotifier { - d.logInfo("Using terminal-notifier for notifications") + switch runtime.GOOS { + case "darwin": + if d.hasTerminalNotifier { + d.logInfo("Using terminal-notifier for notifications") + if d.iconPath != "" { + d.logInfo("Custom icon path: %s", d.iconPath) + } else { + d.logInfo("No custom icon configured") + } + } else { + d.logInfo("Using osascript for notifications (no custom icon)") + } + case "linux": + d.logInfo("Using notify-send for notifications") if d.iconPath != "" { d.logInfo("Custom icon path: %s", d.iconPath) - } else { - d.logInfo("No custom icon configured") } - } else { - d.logInfo("Using osascript for notifications (no custom icon)") } // Send welcome notification @@ -225,7 +234,7 @@ func (d *Daemon) checkTokenExpiry(container string, state *ContainerState) { // Send notification if enabled if d.shouldNotify("token_expiring", state) { d.notify("Token Expiring", fmt.Sprintf("Container %s token expires in %.1fh and auto-refresh failed", - getShortName(container), timeLeft.Hours())) + d.getShortName(container), timeLeft.Hours())) state.LastNotified = timeNow() } } else { @@ -244,7 +253,7 @@ func (d *Daemon) checkAttentionStatus(container string, state *ContainerState) { now := time.Now() state.AttentionStarted = &now state.NotificationSent = false - d.logInfo("Container %s needs attention", getShortName(container)) + d.logInfo("Container %s needs attention", d.getShortName(container)) } // Check if we should notify @@ -253,7 +262,7 @@ func (d *Daemon) checkAttentionStatus(container string, state *ContainerState) { if d.shouldNotify("attention_needed", state) { d.notify("Container Needs Attention", fmt.Sprintf("Container %s has needed attention for %s", - getShortName(container), formatDuration(attentionDuration))) + d.getShortName(container), formatDuration(attentionDuration))) state.NotificationSent = true state.LastNotified = timeNow() } @@ -261,7 +270,7 @@ func (d *Daemon) checkAttentionStatus(container string, state *ContainerState) { } else { // Clear attention state if state.AttentionStarted != nil { - d.logInfo("Container %s attention resolved", getShortName(container)) + d.logInfo("Container %s attention resolved", d.getShortName(container)) } state.AttentionStarted = nil state.NotificationSent = false @@ -340,10 +349,12 @@ func (d *Daemon) notify(title, message string) { } case "linux": // Linux notification via notify-send - args := []string{fmt.Sprintf("MCL - %s", title), message} + // Note: --icon must come before title and message + var args []string if d.iconPath != "" { args = append(args, "--icon", d.iconPath) } + args = append(args, fmt.Sprintf("Maestro - %s", title), message) cmd := exec.Command("notify-send", args...) if err := cmd.Run(); err != nil { d.logError("Failed to send Linux notification: %v", err) @@ -402,10 +413,15 @@ func (d *Daemon) getRunningContainers() ([]string, error) { return nil, err } + prefix := d.config.ContainerPrefix + if prefix == "" { + prefix = "maestro-" // Default prefix + } + var containers []string for _, line := range strings.Split(string(output), "\n") { name := strings.TrimSpace(line) - if name != "" && strings.HasPrefix(name, "mcl-") { + if name != "" && strings.HasPrefix(name, prefix) { containers = append(containers, name) } } @@ -487,9 +503,13 @@ func readCredentials(path string) (*Credentials, error) { return &creds, nil } -func getShortName(containerName string) string { - if strings.HasPrefix(containerName, "mcl-") { - return containerName[4:] +func (d *Daemon) getShortName(containerName string) string { + prefix := d.config.ContainerPrefix + if prefix == "" { + prefix = "maestro-" + } + if strings.HasPrefix(containerName, prefix) { + return containerName[len(prefix):] } return containerName } diff --git a/pkg/paths/paths.go b/pkg/paths/paths.go index 8143f31..94706f4 100644 --- a/pkg/paths/paths.go +++ b/pkg/paths/paths.go @@ -63,6 +63,13 @@ func GitHubAuthDir() string { return filepath.Join(GetConfigDir(), "gh") } +// CertificatesDir returns the path to the SSL certificates directory. +// Unix/macOS: ~/.maestro/certificates +// Windows: %APPDATA%\maestro\certificates +func CertificatesDir() string { + return filepath.Join(GetConfigDir(), "certificates") +} + // LegacyConfigFile returns the old config file path for migration detection. // Returns empty string on Windows (no legacy path on Windows). func LegacyConfigFile() string { diff --git a/pkg/tui/model.go b/pkg/tui/model.go index d6d9b78..66b5b83 100644 --- a/pkg/tui/model.go +++ b/pkg/tui/model.go @@ -137,6 +137,11 @@ func isFirstRun() bool { return true // No config file = first run } + // Skip credential check if Bedrock is enabled (uses AWS auth instead) + if viper.GetBool("bedrock.enabled") { + return false + } + // Check if credentials exist credPath := viper.GetString("claude.auth_path") if credPath == "" { @@ -343,7 +348,7 @@ func NewWithCache(containerPrefix string, cached *CachedState) *Model { } else { // Normal mode: If we have cached state, initialize with it for instant render if cached != nil && len(cached.Containers) > 0 { - m.homeView = views.NewHomeModel(cached.Containers, false) + m.homeView = views.NewHomeModel(cached.Containers, false, viper.GetBool("bedrock.enabled")) m.ready = true // Skip "Loading..." m.cachedCursorPos = cached.CursorPos } else { @@ -720,7 +725,7 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } // Initialize home view with loaded data - m.homeView = views.NewHomeModel(msg.containers, false) + m.homeView = views.NewHomeModel(msg.containers, false, viper.GetBool("bedrock.enabled")) if m.width > 0 && m.height > 0 { // Subtract 9 lines: title banner (6) + help (1) + blank line (1) + statusbar (1) m.homeView.SetSize(m.width, m.height-9) @@ -1493,6 +1498,9 @@ func createContainerCreateModal() *Modal { ti.Cursor.Style = lipgloss.NewStyle().Foreground(style.OceanSurge) // Note: textinput doesn't have BlurredStyle, we'll handle prompt color in the blur/focus methods + // Get default value for "Return to TUI" from config + defaultReturnToTUI := viper.GetBool("containers.default_return_to_tui") + modal := &Modal{ Type: ModalForm, Title: "Create New Container", @@ -1500,8 +1508,8 @@ func createContainerCreateModal() *Modal { Height: 30, textarea: &ta, textinputs: []textinput.Model{ti}, - checkboxes: []bool{false, false}, // [0]=no-connect, [1]=exact - focusedField: 0, // Start with textarea focused + checkboxes: []bool{defaultReturnToTUI, false}, // [0]=no-connect, [1]=exact + focusedField: 0, // Start with textarea focused fieldLabels: []string{ "Task Description:", "Branch Name:", diff --git a/pkg/tui/views/home.go b/pkg/tui/views/home.go index b91bb6f..e9c525b 100644 --- a/pkg/tui/views/home.go +++ b/pkg/tui/views/home.go @@ -30,29 +30,34 @@ type columnConfig struct { minSize int // Minimum width for this column } -var ( - // Maximum table width before centering kicks in - maxTableWidth = 160 +// Maximum table width before centering kicks in +const maxTableWidth = 160 - // Column definitions with base sizes and minimums - columnConfigs = []columnConfig{ +// getColumnConfigs returns column definitions based on whether AWS auth is used +func getColumnConfigs(useAWSAuth bool) []columnConfig { + configs := []columnConfig{ {title: "NAME", baseSize: 25, minSize: 15}, {title: "STATUS", baseSize: 14, minSize: 12}, {title: "BRANCH", baseSize: 25, minSize: 15}, {title: "GIT", baseSize: 10, minSize: 8}, {title: "ACTIVITY", baseSize: 12, minSize: 10}, - {title: "AUTH", baseSize: 12, minSize: 10}, } + // Only show AUTH column when not using AWS/Bedrock auth + if !useAWSAuth { + configs = append(configs, columnConfig{title: "AUTH", baseSize: 12, minSize: 10}) + } + configs = append(configs, columnConfig{title: "CREATED", baseSize: 12, minSize: 10}) + return configs +} - // Total base width for proportion calculations - totalBaseWidth = func() int { - total := 0 - for _, c := range columnConfigs { - total += c.baseSize - } - return total - }() -) +// getTotalBaseWidth calculates total base width for the given column configs +func getTotalBaseWidth(configs []columnConfig) int { + total := 0 + for _, c := range configs { + total += c.baseSize + } + return total +} // HomeModel is the main container list view type HomeModel struct { @@ -62,10 +67,14 @@ type HomeModel struct { animState int containers []container.Info daemonRunning bool + useAWSAuth bool // Whether AWS/Bedrock auth is being used (hides AUTH column) } // calculateColumnWidths returns column widths scaled to fit the given width -func calculateColumnWidths(availableWidth int) []table.Column { +func calculateColumnWidths(availableWidth int, useAWSAuth bool) []table.Column { + columnConfigs := getColumnConfigs(useAWSAuth) + totalBaseWidth := getTotalBaseWidth(columnConfigs) + // Account for table borders and padding (roughly 4 chars for borders + spacing) usableWidth := availableWidth - 4 if usableWidth < totalBaseWidth { @@ -105,9 +114,12 @@ func calculateColumnWidths(availableWidth int) []table.Column { } // NewHomeModel creates a new home view -func NewHomeModel(containers []container.Info, daemonRunning bool) *HomeModel { +func NewHomeModel(containers []container.Info, daemonRunning bool, useAWSAuth bool) *HomeModel { + columnConfigs := getColumnConfigs(useAWSAuth) + totalBaseWidth := getTotalBaseWidth(columnConfigs) + // Start with base column widths - columns := calculateColumnWidths(totalBaseWidth) + columns := calculateColumnWidths(totalBaseWidth, useAWSAuth) t := table.New( table.WithColumns(columns), @@ -135,6 +147,7 @@ func NewHomeModel(containers []container.Info, daemonRunning bool) *HomeModel { table: t, containers: containers, daemonRunning: daemonRunning, + useAWSAuth: useAWSAuth, } h.updateTableRows() @@ -239,7 +252,7 @@ func (h *HomeModel) SetSize(width, height int) { } // Update column widths proportionally - columns := calculateColumnWidths(effectiveWidth) + columns := calculateColumnWidths(effectiveWidth, h.useAWSAuth) h.table.SetColumns(columns) // Only set table viewport width if we're filling the space @@ -274,8 +287,12 @@ func (h *HomeModel) updateTableRows() { h.formatBranch(c), h.formatGit(c), h.formatActivity(c), - h.formatAuth(c), } + // Only include AUTH column when not using AWS auth + if !h.useAWSAuth { + row = append(row, h.formatAuth(c)) + } + row = append(row, h.formatCreated(c)) rows = append(rows, row) } @@ -288,12 +305,16 @@ func (h *HomeModel) formatName(c container.Info) string { } // formatStatus returns the status indicator +// Using plain text without colors to avoid ANSI bleeding issues in the table func (h *HomeModel) formatStatus(c container.Info) string { switch c.Status { case "running": + if c.NeedsAttention { + return "⚠ Waiting" + } return "● Running" case "exited": - return "■ Stopped" + return "○ Stopped" default: return "? " + c.Status } @@ -331,6 +352,14 @@ func (h *HomeModel) formatAuth(c container.Info) string { return c.AuthStatus } +// formatCreated returns when the container was created +func (h *HomeModel) formatCreated(c container.Info) string { + if c.CreatedAt.IsZero() { + return "—" + } + return c.CreatedAt.Format("Jan 2 15:04") +} + // GetContainers returns the current container list for caching func (h *HomeModel) GetContainers() []container.Info { return h.containers