Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/scripts/doc-generator.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,33 @@ run_doc_generation() {
fi
echo "::endgroup::"

echo "::group::Running node-update-monitoring-docker-compose"
if npm run node-update-monitoring-docker-compose; then
echo "βœ… Updated alloy service in monitoring page"
else
echo "❌ node-update-monitoring-docker-compose failed"
failed=true
fi
echo "::endgroup::"

echo "::group::Running node-update-monitoring-alloy-config"
if npm run node-update-monitoring-alloy-config; then
echo "βœ… Updated alloy config in monitoring page"
else
echo "❌ node-update-monitoring-alloy-config failed"
failed=true
fi
echo "::endgroup::"

echo "::group::Running node-update-greybox"
if npm run node-update-greybox; then
echo "βœ… Updated greybox section in genvm configuration"
else
echo "❌ node-update-greybox failed"
failed=true
fi
echo "::endgroup::"

echo "::group::Running node-generate-api-docs"
if npm run node-generate-api-docs; then
echo "βœ… Generated API documentation"
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/check-node-docs-sync.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ jobs:
npm run node-update-setup-guide
npm run node-update-config
npm run node-update-docker-compose
npm run node-update-monitoring-docker-compose
npm run node-update-monitoring-alloy-config
npm run node-update-greybox
npm run node-generate-api-docs

- name: Check for uncommitted changes
Expand Down
30 changes: 28 additions & 2 deletions .github/workflows/sync-docs-from-node.yml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ jobs:
needs: prepare
strategy:
matrix:
sync_type: [changelog, config, config_asimov, config_bradbury, docker_compose, api_gen, api_debug, api_ops]
sync_type: [changelog, config, config_asimov, config_bradbury, docker_compose, docker_compose_monitoring, alloy_config, greybox_setup, api_gen, api_debug, api_ops]
fail-fast: false
steps:
- name: Checkout documentation repository
Expand All @@ -118,6 +118,8 @@ jobs:
configs/node/asimov.yaml.example
configs/node/bradbury.yaml.example
release/docker-compose.yaml
release/alloy-config.river
release/greybox-setup-guide.md
sparse-checkout-cone-mode: true
path: source-repo
ref: ${{ needs.prepare.outputs.version }}
Expand Down Expand Up @@ -156,6 +158,24 @@ jobs:
echo "target_path=content/validators/docker-compose.yaml" >> $GITHUB_OUTPUT
echo "filter_pattern=.*" >> $GITHUB_OUTPUT
;;
"docker_compose_monitoring")
echo "title=Docker Compose File (Monitoring)" >> $GITHUB_OUTPUT
echo "source_path=source-repo/release/docker-compose.yaml" >> $GITHUB_OUTPUT
echo "target_path=content/validators/docker-compose-monitoring.yaml" >> $GITHUB_OUTPUT
echo "filter_pattern=.*" >> $GITHUB_OUTPUT
;;
"alloy_config")
echo "title=Alloy Config File" >> $GITHUB_OUTPUT
echo "source_path=source-repo/release/alloy-config.river" >> $GITHUB_OUTPUT
echo "target_path=content/validators/alloy-config.river" >> $GITHUB_OUTPUT
echo "filter_pattern=.*" >> $GITHUB_OUTPUT
;;
"greybox_setup")
echo "title=Greybox Setup Guide" >> $GITHUB_OUTPUT
echo "source_path=source-repo/release/greybox-setup-guide.md" >> $GITHUB_OUTPUT
echo "target_path=content/validators/greybox-setup-guide.md" >> $GITHUB_OUTPUT
echo "filter_pattern=.*" >> $GITHUB_OUTPUT
;;
"api_gen")
echo "title=API Gen Methods" >> $GITHUB_OUTPUT
echo "source_path=source-repo/${{ github.event.inputs.api_gen_path || github.event.client_payload.api_gen_path || 'docs/api/rpc' }}" >> $GITHUB_OUTPUT
Expand Down Expand Up @@ -421,6 +441,9 @@ jobs:
- \`npm run node-update-setup-guide\`
- \`npm run node-update-config\`
- \`npm run node-update-docker-compose\`
- \`npm run node-update-monitoring-docker-compose\`
- \`npm run node-update-monitoring-alloy-config\`
- \`npm run node-update-greybox\`
- \`npm run node-generate-api-docs\`

Please review the changes and merge if everything looks correct.
Expand Down Expand Up @@ -488,14 +511,17 @@ jobs:
echo "" >> $GITHUB_STEP_SUMMARY

# Process each sync type report
for sync_type in changelog config config_asimov config_bradbury docker_compose api_gen api_debug api_ops; do
for sync_type in changelog config config_asimov config_bradbury docker_compose docker_compose_monitoring alloy_config greybox_setup api_gen api_debug api_ops; do
# Get proper title
case "$sync_type" in
"changelog") title="πŸ“ Changelog Sync" ;;
"config") title="βš™οΈ Config File Sync" ;;
"config_asimov") title="βš™οΈ Config File Sync (Asimov)" ;;
"config_bradbury") title="βš™οΈ Config File Sync (Bradbury)" ;;
"docker_compose") title="🐳 Docker Compose Sync" ;;
"docker_compose_monitoring") title="🐳 Docker Compose Sync (Monitoring)" ;;
"alloy_config") title="πŸ“Š Alloy Config Sync" ;;
"greybox_setup") title="πŸ”§ Greybox Setup Guide Sync" ;;
"api_gen") title="πŸ”§ API Gen Methods Sync" ;;
"api_debug") title="πŸ› API Debug Methods Sync" ;;
"api_ops") title="πŸ“Š API Ops Methods Sync" ;;
Expand Down
160 changes: 160 additions & 0 deletions content/validators/alloy-config.river
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
// Grafana Alloy Configuration for GenLayer Node Telemetry
// Handles both log collection and metrics forwarding

// ==========================================
// Log Collection and Forwarding
// ==========================================

// Discovery component to find log files using local.file_match
// Supports different log file patterns:
// - Single node: "/var/log/genlayer/node.log"
// - Multi-node: "/var/log/genlayer/*/logs/node.log" (each node in subdirectory)
// - Custom pattern via LOG_FILE_PATTERN env var
local.file_match "genlayer_logs" {
path_targets = [{
__path__ = coalesce(sys.env("LOG_FILE_PATTERN"), "/var/log/genlayer/node*.log"),
}]
}

// Relabel to add metadata labels to log entries
discovery.relabel "add_labels" {
targets = local.file_match.genlayer_logs.targets

// Add instance label from environment variable
rule {
target_label = "instance"
replacement = sys.env("NODE_ID")
}

// Add validator_name label from environment variable
rule {
target_label = "validator_name"
replacement = sys.env("VALIDATOR_NAME")
}

// Add component label
rule {
target_label = "component"
replacement = "alloy"
}

// Add job label
rule {
target_label = "job"
replacement = "genlayer-node"
}
}

// Source component to read log files
loki.source.file "genlayer" {
targets = discovery.relabel.add_labels.output
forward_to = [loki.write.central.receiver]

// Tail from end to avoid ingesting entire log history on startup
tail_from_end = true
}

// Write logs to central Loki instance
loki.write "central" {
endpoint {
url = sys.env("CENTRAL_LOKI_URL")

// HTTP Basic Authentication
basic_auth {
username = sys.env("CENTRAL_LOKI_USERNAME")
password = sys.env("CENTRAL_LOKI_PASSWORD")
}

// Enable retry with default exponential backoff
// Note: Alloy's loki.write doesn't have a retry block; retries are handled automatically
// with exponential backoff by default when the endpoint is unreachable

// Configurable batch settings for efficient log sending
batch_size = coalesce(sys.env("LOKI_BATCH_SIZE"), "1MiB") // Maximum batch size before sending
batch_wait = coalesce(sys.env("LOKI_BATCH_WAIT"), "60s") // Maximum wait time before sending partial batch
}
}

// ==========================================
// Prometheus Metrics Collection and Forwarding
// ==========================================

// Scrape metrics from GenLayer node(s)
// Supports both single node and multi-node configurations
//
// Single Node Mode:
// Set NODE_METRICS_ENDPOINT, NODE_ID, VALIDATOR_NAME
//
// Multi-Node Mode:
// Set SCRAPE_TARGETS_JSON with JSON array of target objects
// Example: [{"__address__":"host.docker.internal:9250","instance":"0x...","validator_name":"node-1"}]
//
// Note: The "network" label is emitted by the node itself (auto-detected from consensus address),
// so it does not need to be configured here.
prometheus.scrape "genlayer_node" {
// Dynamic targets based on environment variable
// If SCRAPE_TARGETS_JSON is set, use it (multi-node mode)
// Otherwise, build single target from individual env vars (single node mode)
targets = encoding.from_json(coalesce(sys.env("SCRAPE_TARGETS_JSON"), string.format("[{\"__address__\":\"%s\",\"instance\":\"%s\",\"validator_name\":\"%s\"}]", coalesce(sys.env("NODE_METRICS_ENDPOINT"), "host.docker.internal:9153"), coalesce(sys.env("NODE_ID"), "local"), coalesce(sys.env("VALIDATOR_NAME"), "default"))))

forward_to = [prometheus.relabel.metrics.receiver]

// Configurable scrape intervals
scrape_interval = coalesce(sys.env("METRICS_SCRAPE_INTERVAL"), "60s")
scrape_timeout = coalesce(sys.env("METRICS_SCRAPE_TIMEOUT"), "10s")
}

// Relabel metrics to filter before forwarding
prometheus.relabel "metrics" {
forward_to = [prometheus.remote_write.central.receiver]

// Option 1: Forward all metrics (default)
// Currently forwarding all metrics from the node.

// Option 2: Only keep genlayer_node_* metrics to reduce bandwidth (recommended)
// To enable filtering and reduce bandwidth, uncomment the following rule:
/*
rule {
source_labels = ["__name__"]
regex = "genlayer_node_.*"
action = "keep"
}
*/
}

// Remote write configuration for sending metrics to central Prometheus
prometheus.remote_write "central" {
endpoint {
url = sys.env("CENTRAL_MONITORING_URL")

// HTTP Basic Authentication
basic_auth {
username = sys.env("CENTRAL_MONITORING_USERNAME")
password = sys.env("CENTRAL_MONITORING_PASSWORD")
}

// Queue configuration for reliability
queue_config {
capacity = 10000
max_shards = 5
max_samples_per_send = 500
batch_send_deadline = coalesce(sys.env("METRICS_BATCH_SEND_DEADLINE"), "60s")
}
}
}

// ==========================================
// Alloy Self-Monitoring
// ==========================================

// Alloy internal exporter for health monitoring
prometheus.exporter.self "alloy" {}

// Expose Alloy's own metrics on the HTTP server
prometheus.scrape "alloy" {
targets = prometheus.exporter.self.alloy.targets
forward_to = [] // Not forwarding Alloy metrics to reduce noise

// Configurable scrape interval for Alloy's internal health monitoring
scrape_interval = coalesce(sys.env("ALLOY_SELF_MONITORING_INTERVAL"), "60s")
}
18 changes: 18 additions & 0 deletions content/validators/changelog/v0.5.7.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
## v0.5.7

### New features

- Add LLM greyboxing with configurable chain order via YAML meta

### Bug fixes

- Use PendingAt for idle activator rotation
- Remove unnecessary workaround from view calls
- Restart sync pipeline after RPC failures
- Suppress shutdown errors in watchers
- Reduce FindAcceptanceBlock range to fit RPC limit

### Misc

- Include Alloy healthcheck script in release tarball
- Increase Alloy push intervals to 60s
Loading
Loading