From d77175d9cd9eb052a96361a535d412f643961299 Mon Sep 17 00:00:00 2001 From: Vlad <13818348+walldiss@users.noreply.github.com> Date: Wed, 29 Apr 2026 16:13:26 +0200 Subject: [PATCH] feat(tools/talis): vendor talis deployment tool + Fibre experiment runner MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Brings the celestia-app talis multi-cloud deploy tool into ev-node, plus a long-lived ev-node aggregator runner that wires the existing celestia-node-fiber adapter behind ev-node's DA client interface. Verified end-to-end on AWS — talis up → genesis → deploy → setup-fibre → start-fibre → fibre-bootstrap-evnode reaches 24.57 MB/s @ 99.7 % ok on a 60 s sustained loadgen (3 × c6in.4xlarge validators + c6in.2xlarge bridge + c6in.8xlarge ev-node + c6in.2xlarge load-gen, us-east-1). What this adds: • tools/talis/ — vendored from celestia-app's feat/fibre-payments. Provisions AWS / DO / GCP boxes for validators + bridge + ev-node + load-gen, deploys binaries + init scripts, drives the Fibre setup-fibre + start-fibre flow, and ships a fibre-bootstrap-evnode step that scp's the bridge JWT and Fibre payment keyring onto each ev-node before its init script starts the daemon. • tools/celestia-node-fiber/cmd/evnode-fibre/ — the long-lived aggregator runner. Wires block.NewFiberDAClient on top of the celestia-node-fiber adapter that julien/fiber already ships, plus the in-memory executor + HTTP /tx ingress used by evnode-txsim. Distinct from the existing fiber-bench cmd. • tools/talis/cmd/evnode-txsim/ — small Go load-gen that pumps the runner's HTTP /tx ingress for a fixed duration; deployed to load-gen boxes and prints a single TXSIM: line on completion. Two small ev-node-side helpers the runner calls: • block/public.go: SetMaxBlobSize(n) — overrides the per-blob byte cap so the runner can lift Celestia's 5 MiB default to Fibre's 120 MiB headroom. • pkg/config/config.go: Config.ApplyFiberDefaults() — flips the DA config to Fibre-friendly settings (adaptive batching, 1 s DA.BlockTime, 50-deep pending-cache window) when the Fiber profile is enabled, so a runner can opt in with one call. setup-fibre robustness fixes uncovered during the verified run: • bash script for set-host now retries until the validator's host appears in `query valaddr providers`. The previous one- shot call relied on `--yes` returning the txhash before block inclusion; if the chain wasn't ready, the tx silently bounced. The Fibre client cached the partial set on startup and uploads cascaded to "host not found" → "voting power: collected 0". • talis-CLI side polls `query valaddr providers` after the per- validator scripts finish and refuses to return until all validators are registered (5-minute deadline). External dependency (documented in tools/talis/fibre.md): • Sibling clone of celestia-app on a branch with feat/fibre-payments + sysrex/fibre_url_fix cherry-picked. Without the URL-parse fix the Fibre client rejects every host:port registration. Tested: - go build ./... — clean - go test ./block/internal/submitting ./pkg/config (the two pre-existing test failures on julien/fiber — TestAddFlags and TestFiberClient_Submit_BlobTooLarge — are not introduced by this PR and reproduce on raw julien/fiber) - End-to-end AWS deploy from this branch — 24.57 MB/s, 99.7 % ok --- block/public.go | 8 + go.work | 6 + go.work.sum | 637 +++++++++ pkg/config/config.go | 27 + .../cmd/evnode-fibre/main.go | 553 ++++++++ tools/talis/.gitignore | 2 + tools/talis/Makefile | 175 +++ tools/talis/README.md | 400 ++++++ tools/talis/add.go | 142 ++ tools/talis/aws.go | 1033 ++++++++++++++ tools/talis/client.go | 232 +++ tools/talis/cmd/evnode-txsim/main.go | 242 ++++ tools/talis/config.go | 469 +++++++ tools/talis/deployment.go | 928 ++++++++++++ tools/talis/digital_ocean.go | 529 +++++++ tools/talis/download.go | 261 ++++ tools/talis/download_monitoring.go | 93 ++ tools/talis/env.go | 137 ++ tools/talis/execution.go | 159 +++ tools/talis/fibre.md | 196 +++ tools/talis/fibre_bootstrap_evnode.go | 212 +++ tools/talis/fibre_setup.go | 161 +++ tools/talis/fibre_throughput.go | 240 ++++ tools/talis/fibre_txsim.go | 178 +++ tools/talis/genesis.go | 779 ++++++++++ tools/talis/go.mod | 289 ++++ tools/talis/go.sum | 1248 +++++++++++++++++ tools/talis/google_cloud.go | 825 +++++++++++ tools/talis/init.go | 314 +++++ tools/talis/kpi_reproduction_steps.md | 210 +++ tools/talis/latency_monitor.go | 382 +++++ tools/talis/main.go | 48 + tools/talis/monitor.go | 98 ++ tools/talis/network.go | 354 +++++ tools/talis/observability_payload.go | 143 ++ tools/talis/observability_targets.go | 108 ++ tools/talis/reset.go | 141 ++ tools/talis/s3.go | 168 +++ tools/talis/scripts/monitor.sh | 215 +++ tools/talis/scripts/promtail.sh | 25 + tools/talis/scripts/upload_traces.sh | 45 + tools/talis/scripts/validator_init.sh | 173 +++ tools/talis/scripts/vars.sh | 2 + tools/talis/start_fibre.go | 91 ++ tools/talis/status.go | 74 + tools/talis/sync_node_cmd.go | 677 +++++++++ tools/talis/txsim.go | 140 ++ tools/talis/upload_data.go | 54 + tools/talis/util_test.go | 35 + 49 files changed, 13658 insertions(+) create mode 100644 go.work create mode 100644 go.work.sum create mode 100644 tools/celestia-node-fiber/cmd/evnode-fibre/main.go create mode 100644 tools/talis/.gitignore create mode 100644 tools/talis/Makefile create mode 100644 tools/talis/README.md create mode 100644 tools/talis/add.go create mode 100644 tools/talis/aws.go create mode 100644 tools/talis/client.go create mode 100644 tools/talis/cmd/evnode-txsim/main.go create mode 100644 tools/talis/config.go create mode 100644 tools/talis/deployment.go create mode 100644 tools/talis/digital_ocean.go create mode 100644 tools/talis/download.go create mode 100644 tools/talis/download_monitoring.go create mode 100644 tools/talis/env.go create mode 100644 tools/talis/execution.go create mode 100644 tools/talis/fibre.md create mode 100644 tools/talis/fibre_bootstrap_evnode.go create mode 100644 tools/talis/fibre_setup.go create mode 100644 tools/talis/fibre_throughput.go create mode 100644 tools/talis/fibre_txsim.go create mode 100644 tools/talis/genesis.go create mode 100644 tools/talis/go.mod create mode 100644 tools/talis/go.sum create mode 100644 tools/talis/google_cloud.go create mode 100644 tools/talis/init.go create mode 100644 tools/talis/kpi_reproduction_steps.md create mode 100644 tools/talis/latency_monitor.go create mode 100644 tools/talis/main.go create mode 100644 tools/talis/monitor.go create mode 100644 tools/talis/network.go create mode 100644 tools/talis/observability_payload.go create mode 100644 tools/talis/observability_targets.go create mode 100644 tools/talis/reset.go create mode 100644 tools/talis/s3.go create mode 100644 tools/talis/scripts/monitor.sh create mode 100644 tools/talis/scripts/promtail.sh create mode 100644 tools/talis/scripts/upload_traces.sh create mode 100644 tools/talis/scripts/validator_init.sh create mode 100644 tools/talis/scripts/vars.sh create mode 100644 tools/talis/start_fibre.go create mode 100644 tools/talis/status.go create mode 100644 tools/talis/sync_node_cmd.go create mode 100644 tools/talis/txsim.go create mode 100644 tools/talis/upload_data.go create mode 100644 tools/talis/util_test.go diff --git a/block/public.go b/block/public.go index e871bd0b9d..695983a3ad 100644 --- a/block/public.go +++ b/block/public.go @@ -19,6 +19,14 @@ func DefaultBlockOptions() BlockOptions { return common.DefaultBlockOptions() } +// SetMaxBlobSize overrides the per-blob byte cap used by the executor +// and DA submitter when sizing batches and validating individual blobs. +// Intended for one-shot startup wiring (e.g. to lift Celestia's 5 MiB +// default to Fibre's 120 MiB headroom). +func SetMaxBlobSize(n uint64) { + common.DefaultMaxBlobSize = n +} + // Expose Metrics for constructor type Metrics = common.Metrics diff --git a/go.work b/go.work new file mode 100644 index 0000000000..6eea3baeca --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.25.7 + +use ( + . + ./core +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000000..0c2fedefea --- /dev/null +++ b/go.work.sum @@ -0,0 +1,637 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go/accessapproval v1.8.8 h1:gq8OS+rQWgGRo91D2qztN+ion6AZ2T1CxBIu0ifCmVo= +cloud.google.com/go/accessapproval v1.8.8/go.mod h1:RFwPY9JDKseP4gJrX1BlAVsP5O6kI8NdGlTmaeDefmk= +cloud.google.com/go/accesscontextmanager v1.9.7 h1:aKIfg7Jyc73pe8bzx0zypNdS5gfFdSvFvB8YNA9k2kA= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= +cloud.google.com/go/aiplatform v1.120.0 h1:jKWTpEs+xoUhDa1FMdSuhMcEQYyUiMdufGyX3zvtLVQ= +cloud.google.com/go/aiplatform v1.120.0/go.mod h1:6mDthfmy0oS1EQhVFdijoxkVdI2+HIZkpuGTBpedeCg= +cloud.google.com/go/analytics v0.30.1 h1:souLxu9tQHzF+0NDpKoIw4pl2WQ9K2JfkdPPs36BfXw= +cloud.google.com/go/analytics v0.30.1/go.mod h1:V/FnINU5kMOsttZnKPnXfKi6clJUHTEXUKQjHxcNK8A= +cloud.google.com/go/apigateway v1.7.7 h1:ehKUTy+QFsb3n07fEi18S2dpDDjCV4UlRyrbwfZV3Zk= +cloud.google.com/go/apigateway v1.7.7/go.mod h1:j1bCmrUK1BzVHpiIyTApxB7cRyhivKzltqLmp6j6i7U= +cloud.google.com/go/apigeeconnect v1.7.7 h1:S6s2zojwMymx0fyZYKm0eK1TdDxrriIBAlNVvRAOzug= +cloud.google.com/go/apigeeconnect v1.7.7/go.mod h1:ftGK3nca0JePiVLl0A6alaMjKdOc5C+sAkFMyH2RH8U= +cloud.google.com/go/apigeeregistry v0.10.0 h1:QziFVsuPU2lhy40Ht9uWEyciV23SH9GETWiwcu3qzdg= +cloud.google.com/go/apigeeregistry v0.10.0/go.mod h1:SAlF5OhKvyLDuwWAaFAIVJjrEqKRrGTPkJs+TWNnSqg= +cloud.google.com/go/appengine v1.9.7 h1:IxGz6j5xv0nTJX285wu95Vn6KEi2CeV9vbyRgCSEAoU= +cloud.google.com/go/appengine v1.9.7/go.mod h1:y1XpGVeAhbsNzHida79cHbr3pFRsym0ob8xnC8yphbo= +cloud.google.com/go/area120 v0.10.0 h1:8oNFb5jJZLh0/prs0yJiYJpIC5qDcQ8u+Mfhe30pSx0= +cloud.google.com/go/area120 v0.10.0/go.mod h1:Xg3fKl4xU3UVai9wsI1FXwNU8wSCDYT7dFZfwJKViAM= +cloud.google.com/go/artifactregistry v1.20.0 h1:j/XQiQfaeTyQeNj3HNk4iDFREVnY/fxkHIjsxpaDs8A= +cloud.google.com/go/artifactregistry v1.20.0/go.mod h1:0G9wdbGyDFkvrYH+2AlQs9MuTJdbY8Vg45M8VjlI8rc= +cloud.google.com/go/asset v1.22.1 h1:wimPPWu5gjBkPY1576vr+YxfoLKVhAK9zM2XrEpdKQ4= +cloud.google.com/go/asset v1.22.1/go.mod h1:NlvWwmca7CX6BIBEdRNxOocH6DowmBghAAHucOHuHng= +cloud.google.com/go/assuredworkloads v1.13.0 h1:NQXyyGLksPmiapE1Oc64a3cMwYIBAoDBg6cWR+B3eaY= +cloud.google.com/go/assuredworkloads v1.13.0/go.mod h1:o/oHEOnUlribR+uJWTKQo8A5RhSl9K9FNeMOew4TJ3M= +cloud.google.com/go/automl v1.15.0 h1:YRwLbsBv4yApX64pkrdyy4emhWE6lHEnljX4b1aTQC4= +cloud.google.com/go/automl v1.15.0/go.mod h1:U9zOtQb8zVrFNGTuW3BfxeqmLyeleLgT9B12EaXfODg= +cloud.google.com/go/baremetalsolution v1.4.0 h1:g67fjVdrNCHZl8jDWdZvo+6zGTTMMuvNWO7HSgG8lnI= +cloud.google.com/go/baremetalsolution v1.4.0/go.mod h1:K6C6g4aS8LW95I0fEHZiBsBlh0UxwDLGf+S/vyfXbvg= +cloud.google.com/go/batch v1.14.0 h1:r5DEMPNXZk1as36Le3DaNQTRhhnR+E95a99SFxwF52o= +cloud.google.com/go/batch v1.14.0/go.mod h1:oeQveyG6NDS/ks2ilOP4LzKRmuIaI7GLe0CkR7WF6pk= +cloud.google.com/go/beyondcorp v1.2.0 h1:mre997ya7QHFWSU+O5cT/FhBKTMy6Riqf1EXFxN46zw= +cloud.google.com/go/beyondcorp v1.2.0/go.mod h1:sszcgxpPPBEfLzbI0aYCTg6tT1tyt3CmKav3NZIUcvI= +cloud.google.com/go/bigquery v1.74.0 h1:Q6bAMv+eyvufOpIrfrYxhM46qq1D3ZQTdgUDQqKS+n8= +cloud.google.com/go/bigquery v1.74.0/go.mod h1:iViO7Cx3A/cRKcHNRsHB3yqGAMInFBswrE9Pxazsc90= +cloud.google.com/go/bigtable v1.42.0 h1:SREvT4jLhJQZXUjsLmFs/1SMQJ+rKEj1cJuPE9liQs8= +cloud.google.com/go/bigtable v1.42.0/go.mod h1:oZ30nofVB6/UYGg7lBwGLWSea7NZUvw/WvBBgLY07xU= +cloud.google.com/go/billing v1.21.0 h1:nbQjTXkpgB/E4XnYZQwcZnR63QFsbFwJ9DGsNg61Ghg= +cloud.google.com/go/billing v1.21.0/go.mod h1:ZGairB3EVnb3i09E2SxFxo50p5unPaMTuo1jh6jW9js= +cloud.google.com/go/binaryauthorization v1.10.0 h1:YYK0BwiZv9uA6z+Ict908AykX4OBfDECMTE476OnS3A= +cloud.google.com/go/binaryauthorization v1.10.0/go.mod h1:WOuiaQkI4PU/okwrcREjSAr2AUtjQgVe+PlrXKOmKKw= +cloud.google.com/go/certificatemanager v1.9.6 h1:v5X8X+THKrS9OFZb6k0GRDP1WQxLXTdMko7OInBliw4= +cloud.google.com/go/certificatemanager v1.9.6/go.mod h1:vWogV874jKZkSRDFCMM3r7wqybv8WXs3XhyNff6o/Zo= +cloud.google.com/go/channel v1.21.0 h1:ThoAmHBd9WkX2SSuF6n6uEOvbBNoTuhBT7Rk6bFS5ho= +cloud.google.com/go/channel v1.21.0/go.mod h1:8v3TwHtgLmFxTpL2U+e10CLFOQN8u/Vr9RhYcJUS3y8= +cloud.google.com/go/cloudbuild v1.25.0 h1:Fkg+iJdN7bfICZJzLr/XV+k9aVxXS/hakIlhjDIRIDw= +cloud.google.com/go/cloudbuild v1.25.0/go.mod h1:lCu+T6IPkobPo2Nw+vCE7wuaAl9HbXLzdPx/tcF+oWo= +cloud.google.com/go/clouddms v1.8.8 h1:YWsmRXTyK6Ba0hm4qTBak5g1oLhryuM8rSBxHWC8iq4= +cloud.google.com/go/clouddms v1.8.8/go.mod h1:QtCyw+a73dlkDb2q20aTAPvfaTZCepDDi6Gb1AKq0a4= +cloud.google.com/go/cloudtasks v1.13.7 h1:H2v8GEolNtMFfYzUpZBaZbydqU7drpyo99GtAgA+m4I= +cloud.google.com/go/cloudtasks v1.13.7/go.mod h1:H0TThOUG+Ml34e2+ZtW6k6nt4i9KuH3nYAJ5mxh7OM4= +cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= +cloud.google.com/go/compute v1.54.0/go.mod h1:RfBj0L1x/pIM84BrzNX2V21oEv16EKRPBiTcBRRH1Ww= +cloud.google.com/go/contactcenterinsights v1.17.4 h1:wA4j99BhsoeYlLx6xEIqrNN1aOTtUme0wimHZegg80s= +cloud.google.com/go/contactcenterinsights v1.17.4/go.mod h1:kZe6yOnKDfpPz2GphDHynxk/Spx+53UX/pGf+SmWAKM= +cloud.google.com/go/container v1.46.0 h1:xX94Lo3xrS5OkdMWKvpEVAbBwjN9uleVv6vOi02fL4s= +cloud.google.com/go/container v1.46.0/go.mod h1:A7gMqdQduTk46+zssWDTKbGS2z46UsJNXfKqvMI1ZO4= +cloud.google.com/go/containeranalysis v0.14.2 h1:OW2dlMPtR5VnjQGyAP+uJlZahc1l+JFxFlH/J3+l7gw= +cloud.google.com/go/containeranalysis v0.14.2/go.mod h1:FjppROiUtP9cyMegdWdY/TsBSGc6kqh1GjA2NOJXXL8= +cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCblhgC2k= +cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= +cloud.google.com/go/dataflow v0.11.1 h1:Z+UYlGrE+IoB+5IAN4/qWdPKO0IpIK9bs2Dy40HK6lg= +cloud.google.com/go/dataflow v0.11.1/go.mod h1:3s6y/h5Qz7uuxTmKJKBifkYZ3zs63jS+6VGtSu8Cf7Y= +cloud.google.com/go/dataform v0.13.0 h1:z4nzTOqGSkJ5ePyJLQiUDTBsPHdokzvNNDhGebGQEUM= +cloud.google.com/go/dataform v0.13.0/go.mod h1:U3fqrPY5jAcFh1a8rQb4a+PQ7zKlc5qfgotFZ+luKPo= +cloud.google.com/go/datafusion v1.8.7 h1:tLCV+xYuOrSjdrRTkc9Cqsb5mBSQEsNfFmuTNYl5/rA= +cloud.google.com/go/datafusion v1.8.7/go.mod h1:4dkFb1la41qCEXh1AzYtFwl842bu2ikTUXyKhjvFCb0= +cloud.google.com/go/datalabeling v0.9.7 h1:wwoct7mw38s75XvEmLoItQ2TY0RFsGiRDb0iNbXUcX4= +cloud.google.com/go/datalabeling v0.9.7/go.mod h1:EEUVn+wNn3jl19P2S13FqE1s9LsKzRsPuuMRq2CMsOk= +cloud.google.com/go/dataplex v1.28.0 h1:rROI3iqMVI9nXT701ULoFRETQVAOAPC3mPSWFDxXFl0= +cloud.google.com/go/dataplex v1.28.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA= +cloud.google.com/go/dataproc/v2 v2.16.0 h1:0g2hnjlQ8SQTnNeu+Bqqa61QPssfSZF3t+9ldRmx+VQ= +cloud.google.com/go/dataproc/v2 v2.16.0/go.mod h1:HlzFg8k1SK+bJN3Zsy2z5g6OZS1D4DYiDUgJtF0gJnE= +cloud.google.com/go/dataqna v0.9.8 h1:3FREvU+sjaEHSjlKrKF6KjUmafdOvM8CbZ897rttxNs= +cloud.google.com/go/dataqna v0.9.8/go.mod h1:2lHKmGPOqzzuqCc5NI0+Xrd5om4ulxGwPpLB4AnFgpA= +cloud.google.com/go/datastore v1.22.0 h1:FOyx2Ag6ibD2wFkz9S8EiNrmBugia8pQOfpyJxi2yqA= +cloud.google.com/go/datastore v1.22.0/go.mod h1:aopSX+Whx0lHspWWBj+AjWt68/zjYsPfDe3LjWtqZg8= +cloud.google.com/go/datastream v1.15.1 h1:7PKeDpksi8nbOR4gspmNokzsr0q/uRzDIt20bR3BtRs= +cloud.google.com/go/datastream v1.15.1/go.mod h1:aV1Grr9LFon0YvqryE5/gF1XAhcau2uxN2OvQJPpqRw= +cloud.google.com/go/deploy v1.27.3 h1:QU8gLXsXDRqLyEWNrI6zJiVzuuOBX/WpMi4p0oexV+c= +cloud.google.com/go/deploy v1.27.3/go.mod h1:7LFIYYTSSdljYRqY3n+JSmIFdD4lv6aMD5xg0crB5iw= +cloud.google.com/go/dialogflow v1.76.0 h1:hP9GY9TSVlZ277IGCPQjem9RW1PDtfYJw98DkRGt+t4= +cloud.google.com/go/dialogflow v1.76.0/go.mod h1:mdLkMmSCghfcP85X9dFBlirC1OssS65KE5hrrSz2GXY= +cloud.google.com/go/dlp v1.28.0 h1:+aMQYODOxCCZHpdzKvv/rIc9CbKd6XVmjVBRjaF8UvQ= +cloud.google.com/go/dlp v1.28.0/go.mod h1:C3od1fIK8lf7Kr62aU1Uh0z4OL5Z8s3do3znAiEupAw= +cloud.google.com/go/documentai v1.42.0 h1:FErf7mEjf3TBGiwcXQCsLrQ3mUqryTKa09NiO1L8Y90= +cloud.google.com/go/documentai v1.42.0/go.mod h1:CABOUzRNOuvb/QwJS2LS80Hpqbu3UW2afyRKTYuW7bo= +cloud.google.com/go/domains v0.10.7 h1:G3kUq0vKBMhyOj5GqAfEYbVuez05U+ENHZUAtrEp/pI= +cloud.google.com/go/domains v0.10.7/go.mod h1:T3WG/QUAO/52z4tUPooKS8AY7yXaFxPYn1V3F0/JbNQ= +cloud.google.com/go/edgecontainer v1.4.4 h1:6KTQo6Qf0iEtfPVotlG7orazEO1I93Ham0PMlkHYpdQ= +cloud.google.com/go/edgecontainer v1.4.4/go.mod h1:yyNVHsCKtsX/0mqFdbljQw0Uo660q2dlMPaiqYiC2Tg= +cloud.google.com/go/errorreporting v0.4.0 h1:uLcasn2hKpj6iSPvHrzRjkJcaNVaKx8yKQcP3VTS6aI= +cloud.google.com/go/errorreporting v0.4.0/go.mod h1:dZGEhqzdHZSRxxWLVjC3Ue5CVaROzvP58D9rU6zbBfw= +cloud.google.com/go/essentialcontacts v1.7.7 h1:v9sO4IHFuwplaOuDnEXZFtfOrjw2bi11TSIVp5PnAU4= +cloud.google.com/go/essentialcontacts v1.7.7/go.mod h1:ytycWAEn/aKUMRKQPMVgMrAtphEMgjbzL8vFwM3tqXs= +cloud.google.com/go/eventarc v1.18.0 h1:8WWG1/ogInYur1NQjML6EMHQ0ZBzAdMDGlUVpLD56cI= +cloud.google.com/go/eventarc v1.18.0/go.mod h1:/6SDoqh5+9QNUqCX4/oQcJVK16fG/snHBSXu7lrJtO8= +cloud.google.com/go/filestore v1.10.3 h1:3KZifUVTqGhNNv6MLeONYth1HjlVM4vDhaH+xrdPljU= +cloud.google.com/go/filestore v1.10.3/go.mod h1:94ZGyLTx9j+aWKozPQ6Wbq1DuImie/L/HIdGMshtwac= +cloud.google.com/go/firestore v1.21.0 h1:BhopUsx7kh6NFx77ccRsHhrtkbJUmDAxNY3uapWdjcM= +cloud.google.com/go/firestore v1.21.0/go.mod h1:1xH6HNcnkf/gGyR8udd6pFO4Z7GWJSwLKQMx/u6UrP4= +cloud.google.com/go/functions v1.19.7 h1:7LcOD18euIVGRUPaeCmgO6vfWSLNIsi6STWRQcdANG8= +cloud.google.com/go/functions v1.19.7/go.mod h1:xbcKfS7GoIcaXr2FSwmtn9NXal1JR4TV6iYZlgXffwA= +cloud.google.com/go/gkebackup v1.8.1 h1:gUgI3lZJYALZsHXE7YJOKI8bMpoAX/tF6jnNugvzT1g= +cloud.google.com/go/gkebackup v1.8.1/go.mod h1:GAaAl+O5D9uISH5MnClUop2esQW4pDa2qe/95A4l7YQ= +cloud.google.com/go/gkeconnect v0.12.5 h1:EFql3zRaFw74yATt5lf+mcPDqPZ4EeLvoIJ+0NaEkag= +cloud.google.com/go/gkeconnect v0.12.5/go.mod h1:wMD2RXcsAWlkREZWJDVeDV70PYka1iEb9stFmgpw+5o= +cloud.google.com/go/gkehub v0.16.0 h1:Jk5pAXG54FlQzTRXhuKyym/NzOgS8oWRs0XNatZYDf4= +cloud.google.com/go/gkehub v0.16.0/go.mod h1:ADp27Ucor8v81wY+x/5pOxTorxkPj/xswH3AUpN62GU= +cloud.google.com/go/gkemulticloud v1.6.0 h1:m0FX9o7t7xVmSZhqzm/m8nEZn8LnC5Kh60Wg4Yx1lyQ= +cloud.google.com/go/gkemulticloud v1.6.0/go.mod h1:bGpd4o/Z5Z/XFlaojkgdVisHRwb+fLJvUPzsmV0I9ok= +cloud.google.com/go/gsuiteaddons v1.7.8 h1:Dayrv57XW8kZIvmQjAc89Tp7Kr3O9Am/hf6pXkTjYFY= +cloud.google.com/go/gsuiteaddons v1.7.8/go.mod h1:DBKNHH4YXAdd/rd6zVvtOGAJNGo0ekOh+nIjTUDEJ5U= +cloud.google.com/go/iap v1.11.3 h1:Nheb77nO0/pECm/thoE3wHVAbkQSI+G8KBWviqBepiA= +cloud.google.com/go/iap v1.11.3/go.mod h1:+gXO0ClH62k2LVlfhHzrpiHQNyINlEVmGAE3+DB4ShU= +cloud.google.com/go/ids v1.5.7 h1:V0pSk+KKW+5/AVpeQMhM9D1VI7aMZkayj5jddNETJos= +cloud.google.com/go/ids v1.5.7/go.mod h1:N3ZQOIgIBwwOu2tzyhmh3JDT+kt8PcoKkn2BRT9Qe4A= +cloud.google.com/go/iot v1.8.7 h1:PDUtxCzlFwFHODEFAgaGJy/Zv4tdvLbZ+lvZ1mKQXE4= +cloud.google.com/go/iot v1.8.7/go.mod h1:HvVcypV8LPv1yTXSLCNK+YCtqGHhq+p0F3BXETfpN+U= +cloud.google.com/go/language v1.14.6 h1:/0Fbd3/T4oNmpPqIq5/hrWdHc/eoYGtVH5lDNkuHH3k= +cloud.google.com/go/language v1.14.6/go.mod h1:7y3J9OexQsfkWNGCxhT+7lb64pa60e12ZCoWDOHxJ1M= +cloud.google.com/go/lifesciences v0.10.7 h1:MO5aBahcYv7JeuCpHbg/11h7KL/BYt1+PpgHhleLDbI= +cloud.google.com/go/lifesciences v0.10.7/go.mod h1:v3AbTki9iWttEls/Wf4ag3EqeLRHofploOcpsLnu7iY= +cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= +cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= +cloud.google.com/go/managedidentities v1.7.7 h1:vC/q7D+97PZfb0UNf7r/+/clHauuaf1PqWwP7neuaeg= +cloud.google.com/go/managedidentities v1.7.7/go.mod h1:nwNlMxtBo2YJMvsKXRtAD1bL41qiCI9npS7cbqrsJUs= +cloud.google.com/go/maps v1.29.0 h1:iAlFpnckCAshFpmHPDUYpasXn0pH4OVMDfkb3jB/fDQ= +cloud.google.com/go/maps v1.29.0/go.mod h1:FNATcM5ziB2TDE2IVWH4f/yeXc+SbUk1X+bmKjR8HEA= +cloud.google.com/go/mediatranslation v0.9.7 h1:JXbjms+JxgaWkj/YuaQm1OeCzuF+IZCDV17uUcZgFOU= +cloud.google.com/go/mediatranslation v0.9.7/go.mod h1:mz3v6PR7+Fd/1bYrRxNFGnd+p4wqdc/fyutqC5QHctw= +cloud.google.com/go/memcache v1.11.7 h1:ZDIfIMZsKKPzwdbvTMOL1il0shX24J7B9DC+sEt4Yj4= +cloud.google.com/go/memcache v1.11.7/go.mod h1:AU1jYlUqCihxapcJ1GGMtlMWDVhzjbfUWBXqsXa4rBg= +cloud.google.com/go/metastore v1.14.8 h1:nfyUDD9AeKIs6btY5buQ1No0OVco20WpX9wIruL8UOA= +cloud.google.com/go/metastore v1.14.8/go.mod h1:h1XI2LpD4ohJhQYn9TwXqKb5sVt6KSo47ft96SiFF1s= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/networkconnectivity v1.21.0 h1:WS5XTNWyLODLO5YmftQmDIZtAa2DYYmRf/neRCLIWHA= +cloud.google.com/go/networkconnectivity v1.21.0/go.mod h1:XC1UJ+tqBsLWz73dqrMc7kUvdTv0FIxtDGv6YntTBO0= +cloud.google.com/go/networkmanagement v1.23.0 h1:PiteUY9H2u+wMgT1dQjD93PKI90o10RgL8+OhUov970= +cloud.google.com/go/networkmanagement v1.23.0/go.mod h1:QTYCWp5UxUnU280SqF7AX/mf6NhsqKblmLeCALQmx5c= +cloud.google.com/go/networksecurity v0.11.0 h1:+ahtCqEqwHw3a3UIeG21vT817xt9kkDDAO6k9+LCc18= +cloud.google.com/go/networksecurity v0.11.0/go.mod h1:JLgDsg4tOyJ3eMO8lypjqMftbfd60SJ+P7T+DUmWBsM= +cloud.google.com/go/notebooks v1.12.7 h1:g5LTI1LHa/86abDTWd8nrq7/4qq8oFhVx1SmnNpZLVg= +cloud.google.com/go/notebooks v1.12.7/go.mod h1:uR9pxAkKmlNloibMr9Q1t8WhIu4P2JeqJs7c064/0Mo= +cloud.google.com/go/optimization v1.7.7 h1:dMtxINB6G7wULbdm8nZ/x1NMa579Q+GfJc5gaN8VeDw= +cloud.google.com/go/optimization v1.7.7/go.mod h1:OY2IAlX23o52qwMAZ0w65wibKuV12a4x6IHDTCq6kcU= +cloud.google.com/go/orchestration v1.11.10 h1:TVWDiZyvcflLFeTQH2GexHmtJ6iUSjzr0zsSiT338dA= +cloud.google.com/go/orchestration v1.11.10/go.mod h1:tz7m1s4wNEvhNNIM3JOMH0lYxBssu9+7si5MCPw/4/0= +cloud.google.com/go/orgpolicy v1.15.1 h1:0hq12wxNwcfUMojr5j3EjWECSInIuyYDhkAWXTomRhc= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.16.0 h1:0L635e0OSdWylzE/v40Riko6p142PVmWL8Rt+9fbPO4= +cloud.google.com/go/osconfig v1.16.0/go.mod h1:PRmLgZ1loD1hGaqnTBww1nETbqcqAvmTQOLYiIZ7Nvk= +cloud.google.com/go/oslogin v1.14.7 h1:YQ8P/+MLwH0tpENYU9QOgwKQxe8DYfAKxIfm6y+OBtA= +cloud.google.com/go/oslogin v1.14.7/go.mod h1:NB6NqBHfDMwznePdBVX+ILllc1oPCdNSGp5u/WIyndY= +cloud.google.com/go/phishingprotection v0.9.7 h1:ZJqHirY2/H6s+uTq1y1iiVASzm3ZuDiMglT5NXywPBE= +cloud.google.com/go/phishingprotection v0.9.7/go.mod h1:JTI4HNGyAbWolBoNOoCyCF0e3cqPNrYnlievHU49EwE= +cloud.google.com/go/policytroubleshooter v1.11.7 h1:Bbj1EiVh96u9mfO2p+JNoHrvvyC0Ms6zP+vxqQnsaG8= +cloud.google.com/go/policytroubleshooter v1.11.7/go.mod h1:JP/aQ+bUkt4Gz6lQXBi/+A/6nyNRZ0Pvxui5Xl9ieyk= +cloud.google.com/go/privatecatalog v0.10.8 h1:yOdy85WDvSCPxAMixkhs5X0Z96D74kosgOTp7aJEYvU= +cloud.google.com/go/privatecatalog v0.10.8/go.mod h1:BkLHi+rtAGYBt5DocXLytHhF0n6F03Tegxgty40Y7aA= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= +cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= +cloud.google.com/go/pubsublite v1.8.2 h1:jLQozsEVr+c6tOU13vDugtnaBSUy/PD5zK6mhm+uF1Y= +cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= +cloud.google.com/go/recaptchaenterprise/v2 v2.21.0 h1:zHaPdgmV3LmzaUfn9Xiiqp5zE1Y16f0O8XCwERrAs2E= +cloud.google.com/go/recaptchaenterprise/v2 v2.21.0/go.mod h1:HxQYqZC2/zl2CvKN7jJEv71vEdDi1GMGNUiZxnpiuVI= +cloud.google.com/go/recommendationengine v0.9.7 h1:NH89CyKQP8e98kpdKLwV0jXkQGzSEEZia0V867vkoy8= +cloud.google.com/go/recommendationengine v0.9.7/go.mod h1:snZ/FL147u86Jqpv1j95R+CyU5NvL/UzYiyDo6UByTM= +cloud.google.com/go/recommender v1.13.6 h1:ZVZg4wr1G7yzjIPcYUNSUJAaz9+2o78rmBU4QJgC7kg= +cloud.google.com/go/recommender v1.13.6/go.mod h1:y5/5womtdOaIM3xx+76vbsiA+8EBTIVfWnxHDFHBGJM= +cloud.google.com/go/redis v1.18.3 h1:6LI8zSt+vmE3WQ7hE5GsJ13CbJBLV1qUw6B7CY31Wcw= +cloud.google.com/go/redis v1.18.3/go.mod h1:x8HtXZbvMBDNT6hMHaQ022Pos5d7SP7YsUH8fCJ2Wm4= +cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= +cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= +cloud.google.com/go/resourcesettings v1.8.3 h1:13HOFU7v4cEvIHXSAQbinF4wp2Baybbq7q9FMctg1Ek= +cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw= +cloud.google.com/go/retail v1.26.0 h1:yOoyJs/IlLmohXzgDgF9N8xQYbJKIKtCw4oGAoYZpNY= +cloud.google.com/go/retail v1.26.0/go.mod h1:gMfh6s174Mvy1rK4g50J9TH5sRim8px+Krml25kdrqo= +cloud.google.com/go/run v1.15.0 h1:4cwyNv9SUQEsQOf5/DfPKyMWYSA52p38/o119BgMhO4= +cloud.google.com/go/run v1.15.0/go.mod h1:rgFHMdAopLl++57vzeqA+a1o2x0/ILZnEacRD6nC0EA= +cloud.google.com/go/scheduler v1.11.8 h1:BoXY2BvBsaRw3ggVMzC9tborZqJBu+NcJcD9PqeC5Kc= +cloud.google.com/go/scheduler v1.11.8/go.mod h1:bNKU7/f04eoM6iKQpwVLvFNBgGyJNS87RiFN73mIPik= +cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= +cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/security v1.19.2 h1:cF3FkCRRbRC1oXuaGZFl3qU2sdu2gP3iOAHKzL5y04Y= +cloud.google.com/go/security v1.19.2/go.mod h1:KXmf64mnOsLVKe8mk/bZpU1Rsvxqc0Ej0A6tgCeN93w= +cloud.google.com/go/securitycenter v1.38.1 h1:D9zpeguY4frQU35GBw8+M6Gw79CiuTF9iVs4sFm3FDY= +cloud.google.com/go/securitycenter v1.38.1/go.mod h1:Ge2D/SlG2lP1FrQD7wXHy8qyeloRenvKXeB4e7zO6z0= +cloud.google.com/go/servicedirectory v1.12.7 h1:je2yZlVcVFI/TshPXjjF9ZAlWedj0s5EbO2kozJrzBo= +cloud.google.com/go/servicedirectory v1.12.7/go.mod h1:gOtN+qbuCMH6tj2dqlDY3qQL7w3V0+nkWaZElnJK8Ps= +cloud.google.com/go/shell v1.8.7 h1:K1C9sh9EuNNhGpyCoqRdeudcU9zmfYTA95bhF5cokK8= +cloud.google.com/go/shell v1.8.7/go.mod h1:OTke7qc3laNEW5Jr5OV9VR3IwU5x5VqGOE6705zFex4= +cloud.google.com/go/spanner v1.88.0 h1:HS+5TuEYZOVOXj9K+0EtrbTw7bKBLrMe3vgGsbnehmU= +cloud.google.com/go/spanner v1.88.0/go.mod h1:MzulBwuuYwQUVdkZXBBFapmXee3N+sQrj2T/yup6uEE= +cloud.google.com/go/speech v1.30.0 h1:R+KGIbRMrj8jA4U6Qea8hqCMsAEdg576ShNsmRr4gcQ= +cloud.google.com/go/speech v1.30.0/go.mod h1:F2+NJujR8uzDLd6bwy5kgtVycxvEq06nzvzz5eQ/gMo= +cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI= +cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU= +cloud.google.com/go/storagetransfer v1.13.1 h1:Sjukr1LtUt7vLTHNvGc2gaAqlXNFeDFRIRmWGrFaJlY= +cloud.google.com/go/storagetransfer v1.13.1/go.mod h1:S858w5l383ffkdqAqrAA+BC7KlhCqeNieK3sFf5Bj4Y= +cloud.google.com/go/talent v1.8.4 h1:1kJJ+WCY5LZ1A4rCa32zKh3N2xT3I8koiS63+vV0WC4= +cloud.google.com/go/talent v1.8.4/go.mod h1:3yukBXUTVFNyKcJpUExW/k5gqEy8qW6OCNj7WdN0MWo= +cloud.google.com/go/texttospeech v1.16.0 h1:Ra4w+6qmaeb12ozlPBqGw8Jzdge1yfzhvZgcXWdXw30= +cloud.google.com/go/texttospeech v1.16.0/go.mod h1:AeSkoH3ziPvapsuyI07TWY4oGxluAjntX+pF4PJ2jy0= +cloud.google.com/go/tpu v1.8.4 h1:5DDheA1f7yZ/KUbVT/9lL+Yhgd3IqHDSVVrSqDVkAFY= +cloud.google.com/go/tpu v1.8.4/go.mod h1:ul0cyWSHr6jHGZYElZe6HvQn35VY93RAlwpDiSBRnPA= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= +cloud.google.com/go/translate v1.12.7 h1:aSxMbfJ3MVmEdQzu5jGXmPPxCAb1ySsor2yBMCI5MT4= +cloud.google.com/go/translate v1.12.7/go.mod h1:wwJp14NZyWvcrFANhIXutXj0pOBkYciBHwSlUOykcjI= +cloud.google.com/go/video v1.27.1 h1:Hp+2AeM7b3AagdHcyh2820UTzSbGyqpFJVMu0nHbBcw= +cloud.google.com/go/video v1.27.1/go.mod h1:xzfAC77B4vtnbi/TT3UUxEjCa/+Ehy5EA8w470ytOig= +cloud.google.com/go/videointelligence v1.12.7 h1:FisUrSZ+y3oLuGdlFQQgZoNTDm7FAfb2hwSTsSqX+9g= +cloud.google.com/go/videointelligence v1.12.7/go.mod h1:XAk5hCMY+GihxJ55jNoMdwdXSNZnCl3wGs2+94gK7MA= +cloud.google.com/go/vision/v2 v2.9.6 h1:9UtOINPF8p9VACQ6KAyR/ZtkpuBHGmJsprutYupDcN0= +cloud.google.com/go/vision/v2 v2.9.6/go.mod h1:lJC+vP15D5znJvHQYjEoTKnpToX1L93BUlvBmzM0gyg= +cloud.google.com/go/vmmigration v1.10.0 h1:6AvttGxASQTiuIsNKUKOKsRiQG4qTMOY4KMyBhdZa1w= +cloud.google.com/go/vmmigration v1.10.0/go.mod h1:LDztCWEb+RwS1bPg4Xzt0fcJS9kVrFxa3ejhH7OW9vg= +cloud.google.com/go/vmwareengine v1.3.6 h1:TKvULKbk44QrIx674cnoVjcZueXhyCAm2sNAJu/S1ds= +cloud.google.com/go/vmwareengine v1.3.6/go.mod h1:ps0rb+Skgpt9ppHYC0o5DqtJ5ld2FyS8sAqtbHH8t9s= +cloud.google.com/go/vpcaccess v1.8.7 h1:K6siDR1T4HgSTv6sy6CAwupY7UGza6TQ1O8jtvEYoX4= +cloud.google.com/go/vpcaccess v1.8.7/go.mod h1:9RYw5bVvk4Z51Rc8vwXT63yjEiMD/l7XyEaDyrNHgmk= +cloud.google.com/go/webrisk v1.11.2 h1:q6zEdVgD8Ka+4fQl3azDcSNRug8clNnQ9iVS2iLh+MM= +cloud.google.com/go/webrisk v1.11.2/go.mod h1:yH44GeXz5iz4HFsIlGeoVvnjwnmfbni7Lwj1SelV4f0= +cloud.google.com/go/websecurityscanner v1.7.7 h1:udhvvDDRryM3nrITJk/eQe74D06KK2N3SF60/FH2njQ= +cloud.google.com/go/websecurityscanner v1.7.7/go.mod h1:ng/PzARaus3Bj4Os4LpUnyYHsbtJky1HbBDmz148v1o= +cloud.google.com/go/workflows v1.14.3 h1:FGF6QEl3rtOSIHPOMZofWRVy3KNx26jDdgoYzJZ6ZhY= +cloud.google.com/go/workflows v1.14.3/go.mod h1:CC9+YdVI2Kvp0L58WajHpEfKJxhrtRh3uQ0SYWcmAk4= +codeberg.org/go-fonts/liberation v0.5.0 h1:SsKoMO1v1OZmzkG2DY+7ZkCL9U+rrWI09niOLfQ5Bo0= +codeberg.org/go-fonts/liberation v0.5.0/go.mod h1:zS/2e1354/mJ4pGzIIaEtm/59VFCFnYC7YV6YdGl5GU= +codeberg.org/go-latex/latex v0.1.0 h1:hoGO86rIbWVyjtlDLzCqZPjNykpWQ9YuTZqAzPcfL3c= +codeberg.org/go-latex/latex v0.1.0/go.mod h1:LA0q/AyWIYrqVd+A9Upkgsb+IqPcmSTKc9Dny04MHMw= +codeberg.org/go-pdf/fpdf v0.10.0 h1:u+w669foDDx5Ds43mpiiayp40Ov6sZalgcPMDBcZRd4= +codeberg.org/go-pdf/fpdf v0.10.0/go.mod h1:Y0DGRAdZ0OmnZPvjbMp/1bYxmIPxm0ws4tfoPOc4LjU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= +git.sr.ht/~sbinet/gg v0.6.0 h1:RIzgkizAk+9r7uPzf/VfbJHBMKUr0F5hRFxTUGMnt38= +git.sr.ht/~sbinet/gg v0.6.0/go.mod h1:uucygbfC9wVPQIfrmwM2et0imr8L7KQWywX0xpFMm94= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= +github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6 h1:5kUcJJAKWWI82Xnp/CaU0eu5hLlHkmm9acjowSkwCd0= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1 h1:w/fPGB0t5rWwA43mux4e9ozFSH5zF1moQemlA131PWc= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU= +github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= +github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= +github.com/casbin/casbin/v2 v2.37.0 h1:/poEwPSovi4bTOcP752/CsTQiRz2xycyVKFG7GUhbDw= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk= +github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= +github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= +github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034 h1:BuCyszxPxUjBrYW2HNVrimC0rBUs2U27jCJGVh0IKTM= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM= +github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gammazero/chanqueue v1.1.2 h1:dZEsxlyANZMyeTRemABqZF8QM9BnE4NBI43Oh3y5fIU= +github.com/gammazero/chanqueue v1.1.2/go.mod h1:XDN1X/jjAbmSceNFOQbtKToeSkxtdVdpKu90LiEdBEE= +github.com/gammazero/deque v1.2.1 h1:9fnQVFCCZ9/NOc7ccTNqzoKd1tCWOqeI05/lPqFPMGQ= +github.com/gammazero/deque v1.2.1/go.mod h1:5nSFkzVm+afG9+gy0VIowlqVAW4N8zNcMne+CMQVD2g= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= +github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/goccmack/gocc v1.0.2 h1:PHv20lcM1Erz+kovS+c07DnDFp6X5cvghndtTXuEyfE= +github.com/goccmack/gocc v1.0.2/go.mod h1:LXX2tFVUggS/Zgx/ICPOr3MLyusuM7EcbfkPvNsjdO8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/go-pkcs11 v0.3.0 h1:PVRnTgtArZ3QQqTGtbtjtnIkzl2iY2kt24yqbrf7td8= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/guillaumemichel/reservedpool v0.3.0 h1:eqqO/QvTllLBrit7LVtVJBqw4cD0WdV9ajUe7WNTajw= +github.com/guillaumemichel/reservedpool v0.3.0/go.mod h1:sXSDIaef81TFdAJglsCFCMfgF5E5Z5xK1tFhjDhvbUc= +github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= +github.com/hashicorp/consul/api v1.14.0/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= +github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/serf v0.10.0 h1:89qvvpfMQnz6c2y4pv7j2vUUmeT1+5TSZMexuTbtsPs= +github.com/hashicorp/serf v0.10.0/go.mod h1:bXN03oZc5xlH46k/K1qTrpXb9ERKyY1/i/N5mxvgrZw= +github.com/hudl/fargo v1.4.0 h1:ZDDILMbB37UlAVLlWcJ2Iz1XuahZZTDZfdCKeclfq2s= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= +github.com/ipfs/go-cidutil v0.1.1 h1:COuby6H8C2ml0alvHYX3WdbFM4F07YtbY0UlT5j+sgI= +github.com/ipfs/go-cidutil v0.1.1/go.mod h1:SCoUftGEUgoXe5Hjeyw5CiLZF8cwYn/TbtpFQXJCP6k= +github.com/ipfs/go-ds-pebble v0.5.9 h1:D1FEuMxjbEmDADNqsyT74n9QHVAn12nv9i9Qa15AFYc= +github.com/ipfs/go-ds-pebble v0.5.9/go.mod h1:XmUBN05l6B+tMg7mpMS75ZcKW/CX01uZMhhWw85imQA= +github.com/ipfs/go-dsqueue v0.2.0 h1:MBi9w3oSiX98Xc+Y7NuJ9G8MI6mAT4IGdO9dHEMCZzU= +github.com/ipfs/go-dsqueue v0.2.0/go.mod h1:8FfNQC4DMF/KkzBXRNB9Rb3MKDW0Sh98HMtXYl1mLQE= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-pq v0.0.4 h1:U7jjENWJd1jhcrR8X/xHTaph14PTAK9O+yaLJbjqgOw= +github.com/ipfs/go-ipfs-pq v0.0.4/go.mod h1:9UdLOIIb99IFrgT0Fc53pvbvlJBhpUb4GJuAQf3+O2A= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-cbor v0.2.1 h1:H05yEJbK/hxg0uf2AJhyerBDbjOuHX4yi+1U/ogRa7E= +github.com/ipfs/go-ipld-cbor v0.2.1/go.mod h1:x9Zbeq8CoE5R2WicYgBMcr/9mnkQ0lHddYWJP2sMV3A= +github.com/ipfs/go-ipld-format v0.6.3 h1:9/lurLDTotJpZSuL++gh3sTdmcFhVkCwsgx2+rAh4j8= +github.com/ipfs/go-ipld-format v0.6.3/go.mod h1:74ilVN12NXVMIV+SrBAyC05UJRk0jVvGqdmrcYZvCBk= +github.com/ipfs/go-ipld-legacy v0.2.2 h1:DThbqCPVLpWBcGtU23KDLiY2YRZZnTkXQyfz8aOfBkQ= +github.com/ipfs/go-ipld-legacy v0.2.2/go.mod h1:hhkj+b3kG9b2BcUNw8IFYAsfeNo8E3U7eYlWeAOPyDU= +github.com/ipfs/go-libdht v0.5.0 h1:ZN+eCqwahZvUeT0e4DsIxRtm78Mc9UR5tmZUiMsrGjQ= +github.com/ipfs/go-libdht v0.5.0/go.mod h1:L3YiuFXecLeZZFuuVRM0hjg1GgVhARzUdahFsuqSa7w= +github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= +github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= +github.com/ipfs/go-peertaskqueue v0.8.3 h1:tBPpGJy+A92RqtRFq5amJn0Uuj8Pw8tXi0X3eHfHM8w= +github.com/ipfs/go-peertaskqueue v0.8.3/go.mod h1:OqVync4kPOcXEGdj/LKvox9DCB5mkSBeXsPczCxLtYA= +github.com/ipfs/go-unixfsnode v1.10.3 h1:c8sJjuGNkxXAQH75P+f5ngPda/9T+DrboVA0TcDGvGI= +github.com/ipfs/go-unixfsnode v1.10.3/go.mod h1:2Jlc7DoEwr12W+7l8Hr6C7XF4NHST3gIkqSArLhGSxU= +github.com/ipld/go-car/v2 v2.16.0 h1:LWe0vmN/QcQmUU4tr34W5Nv5mNraW+G6jfN2s+ndBco= +github.com/ipld/go-car/v2 v2.16.0/go.mod h1:RqFGWN9ifcXVmCrTAVnfnxiWZk1+jIx67SYhenlmL34= +github.com/ipld/go-codec-dagpb v1.7.0 h1:hpuvQjCSVSLnTnHXn+QAMR0mLmb1gA6wl10LExo2Ts0= +github.com/ipld/go-codec-dagpb v1.7.0/go.mod h1:rD3Zg+zub9ZnxcLwfol/OTQRVjaLzXypgy4UqHQvilM= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jordanlewis/gcassert v0.0.0-20250430164644-389ef753e22e h1:a+PGEeXb+exwBS3NboqXHyxarD9kaboBbrSp+7GuBuc= +github.com/jordanlewis/gcassert v0.0.0-20250430164644-389ef753e22e/go.mod h1:ZybsQk6DWyN5t7An1MuPm1gtSZ1xDaTXS9ZjIOxvQrk= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/libp2p/go-doh-resolver v0.5.0 h1:4h7plVVW+XTS+oUBw2+8KfoM1jF6w8XmO7+skhePFdE= +github.com/libp2p/go-doh-resolver v0.5.0/go.mod h1:aPDxfiD2hNURgd13+hfo29z9IC22fv30ee5iM31RzxU= +github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= +github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU= +github.com/libp2p/go-yamux/v4 v4.0.2/go.mod h1:C808cCRgOs1iBwY4S71T5oxgMxgLmqUw56qh4AeBW2o= +github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI= +github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= +github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= +github.com/nats-io/nats.go v1.15.0 h1:3IXNBolWrwIUf2soxh6Rla8gPzYWEZQBUBK6RV21s+o= +github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/performancecopilot/speed/v4 v4.0.0 h1:VxEDCmdkfbQYDlcr/GC9YoN9PQ6p8ulk9xVsepYy9ZY= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0= +github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20= +github.com/rabbitmq/amqp091-go v1.2.0 h1:1pHBxAsQh54R9eX/xo679fUEAfv3loMqi0pvRFOj2nk= +github.com/rabbitmq/amqp091-go v1.2.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M= +github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/slok/go-http-metrics v0.13.0 h1:lQDyJJx9wKhmbliyUsZ2l6peGnXRHjsjoqPt5VYzcP8= +github.com/slok/go-http-metrics v0.13.0/go.mod h1:HIr7t/HbN2sJaunvnt9wKP9xoBBVZFo1/KiHU3b0w+4= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e h1:mOtuXaRAbVZsxAHVdPR3IjfmN8T1h2iczJLynhLybf8= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= +github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= +go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= +go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ= +golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= +gonum.org/v1/plot v0.15.2 h1:Tlfh/jBk2tqjLZ4/P8ZIwGrLEWQSPDLRm/SNWKNXiGI= +gonum.org/v1/plot v0.15.2/go.mod h1:DX+x+DWso3LTha+AdkJEv5Txvi+Tql3KAGkehP0/Ubg= +gonum.org/v1/tools v0.0.0-20200318103217-c168b003ce8c h1:cJWOvXtcaFSGXz2F4z2AMM0VV7edDDGrxb5GLQH7ayQ= +gonum.org/v1/tools v0.0.0-20200318103217-c168b003ce8c/go.mod h1:fy6Otjqbk477ELp8IXTpw1cObQtLbRCBVonY+bTTfcM= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260319201613-d00831a3d3e7 h1:6jJ8xNMxB3mmH3HsxEMPjJU+WeFOiwBJ+cLm60OvSZs= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:6TABGosqSqU2l1+fJ3jdvOYPPVryeKybxYF0cCZkTBE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= +gopkg.in/vmihailenco/msgpack.v2 v2.9.2 h1:gjPqo9orRVlSAH/065qw3MsFCDpH7fa1KpiizXyllY4= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= diff --git a/pkg/config/config.go b/pkg/config/config.go index 6a8bd48d05..7cbb780a21 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -343,6 +343,33 @@ func (d *DAConfig) IsFiberEnabled() bool { return d.Fiber.Enabled } +// ApplyFiberDefaults flips the DA client to Fiber-friendly defaults +// when the Fiber profile is enabled — adaptive batching, a 1 s +// DA.BlockTime so inclusion-tracking keeps pace with Fibre's +// settlement, and a bounded pending-cache window so a Fibre stall +// can't grow memory unbounded. Caller-provided non-zero values for +// the tunables (BatchSizeThreshold, BatchMinItems) are preserved. +// +// Intended to be invoked once at runner startup, after parsing the +// usual config but before constructing the DA client. +func (c *Config) ApplyFiberDefaults() { + if !c.DA.IsFiberEnabled() { + return + } + + c.DA.BatchingStrategy = "adaptive" + if c.DA.BatchSizeThreshold <= 0 || c.DA.BatchSizeThreshold > 1 { + c.DA.BatchSizeThreshold = 0.5 + } + c.DA.BatchMaxDelay = DurationWrapper{Duration: 8 * time.Second} + if c.DA.BatchMinItems == 0 { + c.DA.BatchMinItems = 1 + } + + c.DA.BlockTime = DurationWrapper{Duration: 1 * time.Second} + c.Node.MaxPendingHeadersAndData = 50 +} + // GetNamespace returns the namespace for header submissions. func (d *DAConfig) GetNamespace() string { return d.Namespace diff --git a/tools/celestia-node-fiber/cmd/evnode-fibre/main.go b/tools/celestia-node-fiber/cmd/evnode-fibre/main.go new file mode 100644 index 0000000000..7b8b73080f --- /dev/null +++ b/tools/celestia-node-fiber/cmd/evnode-fibre/main.go @@ -0,0 +1,553 @@ +// Command evnode-fibre runs a long-lived ev-node aggregator wired to +// a celestia-node-fiber adapter. It is the binary that ships to the +// ev-node instance during a `talis deploy` for the multi-FSP +// throughput experiment. +// +// Topology (smallest variant of the experiment): +// +// [ load-gen ] +// │ POST /tx +// ▼ +// [ evnode-fibre (this binary) aggregator + InMem executor ] +// │ block.NewFiberDAClient → cnfiber.New +// ▼ +// [ celestia-node bridge ] +// │ blob.Subscribe / blob.Submit +// ▼ +// [ Fibre Server (per validator) ] + [ celestia-app validators ] +// +// CLI flags map to talis/SSM-friendly env vars; everything that +// changes per-deploy can be set via flag *or* CELES_* env var. +// +// This binary is intentionally not part of the testapp tree — testapp +// is the canonical small-chain example and we don't want to drag the +// celestia-node-fiber adapter (with its celestia-node + celestia-app +// deps) into testapp's go.mod. By living under tools/celestia-node-fiber +// the runner reuses the adapter's existing dep set as-is. +package main + +import ( + "context" + "crypto/rand" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + "path/filepath" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/rs/zerolog" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + "github.com/celestiaorg/celestia-node/api/client" + cnp2p "github.com/celestiaorg/celestia-node/nodebuilder/p2p" + + "github.com/evstack/ev-node/block" + coreexecution "github.com/evstack/ev-node/core/execution" + "github.com/evstack/ev-node/node" + "github.com/evstack/ev-node/pkg/config" + genesispkg "github.com/evstack/ev-node/pkg/genesis" + "github.com/evstack/ev-node/pkg/p2p" + "github.com/evstack/ev-node/pkg/p2p/key" + "github.com/evstack/ev-node/pkg/sequencers/solo" + pkgsigner "github.com/evstack/ev-node/pkg/signer" + "github.com/evstack/ev-node/pkg/signer/file" + "github.com/evstack/ev-node/pkg/store" + + cnfiber "github.com/evstack/ev-node/tools/celestia-node-fiber" +) + +type cliFlags struct { + homeDir string + chainID string + headerNS string + dataNS string + bridgeAddr string + bridgeTokenFp string + coreGRPCAddr string + coreNetwork string + keyringPath string + keyName string + signerPpFp string + httpListen string + rpcListen string + p2pListen string + pprofListen string + blockTime time.Duration + scrapeInterval time.Duration + logLevel string +} + +func parseFlags() cliFlags { + var c cliFlags + + flag.StringVar(&c.homeDir, "home", envOr("EVNODE_HOME", filepath.Join(os.Getenv("HOME"), ".evnode-fibre")), + "ev-node home directory (datastore, signer, node key live here)") + flag.StringVar(&c.chainID, "chain-id", envOr("CHAIN_ID", ""), + "app chain id (must match validators' chain id)") + flag.StringVar(&c.headerNS, "header-namespace", envOr("HEADER_NS", "ev-fib-ht"), + "DA header namespace string") + flag.StringVar(&c.dataNS, "data-namespace", envOr("DATA_NS", "ev-fib-da"), + "DA data namespace string") + flag.StringVar(&c.bridgeAddr, "bridge-addr", envOr("BRIDGE_ADDR", ""), + "celestia-node bridge RPC address (host:port, no scheme)") + flag.StringVar(&c.bridgeTokenFp, "bridge-token-file", envOr("BRIDGE_TOKEN_FILE", "/root/bridge-jwt.txt"), + "path to a file containing the bridge admin JWT") + flag.StringVar(&c.coreGRPCAddr, "core-grpc-addr", envOr("CORE_GRPC_ADDR", ""), + "celestia-app validator gRPC address (host:port) used by Fibre's submit path for state queries") + flag.StringVar(&c.coreNetwork, "core-network", envOr("CORE_NETWORK", "private"), + "celestia-node Network identifier (matches bridge's --p2p.network)") + flag.StringVar(&c.keyringPath, "keyring-path", envOr("KEYRING_PATH", ""), + "directory holding the cosmos-sdk file keyring with the Fibre payment account") + flag.StringVar(&c.keyName, "key-name", envOr("KEY_NAME", "default-fibre"), + "keyring entry name for the Fibre payment account") + flag.StringVar(&c.signerPpFp, "signer-passphrase-file", envOr("SIGNER_PASSPHRASE_FILE", ""), + "path to a file holding the file-backed signer passphrase") + flag.StringVar(&c.httpListen, "http-listen", envOr("HTTP_LISTEN", "0.0.0.0:7777"), + "listen addr for the tx-injection HTTP endpoint (POST /tx)") + flag.StringVar(&c.rpcListen, "rpc-listen", envOr("RPC_LISTEN", "0.0.0.0:7331"), + "ev-node RPC listen addr") + flag.StringVar(&c.p2pListen, "p2p-listen", envOr("P2P_LISTEN", "/ip4/0.0.0.0/tcp/7676"), + "libp2p listen address (kept up for shutdown symmetry; never gossips when Fiber is on)") + flag.StringVar(&c.pprofListen, "pprof-listen", envOr("PPROF_LISTEN", ""), + "if set (e.g. 127.0.0.1:6060), serve net/http/pprof on this addr — heap/goroutine/profile useful for diagnosing memory growth") + flag.DurationVar(&c.blockTime, "block-time", durFromEnv("BLOCK_TIME", 200*time.Millisecond), + "ev-node BlockTime") + flag.DurationVar(&c.scrapeInterval, "scrape-interval", durFromEnv("SCRAPE_INTERVAL", 100*time.Millisecond), + "reaper scrape interval (lower = smaller per-block batches)") + flag.StringVar(&c.logLevel, "log-level", envOr("LOG_LEVEL", "info"), "log level") + + flag.Parse() + return c +} + +func envOr(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +func durFromEnv(name string, def time.Duration) time.Duration { + if v := os.Getenv(name); v != "" { + if d, err := time.ParseDuration(v); err == nil { + return d + } + } + return def +} + +func main() { + cfg := parseFlags() + if err := run(cfg); err != nil { + fmt.Fprintln(os.Stderr, "fatal:", err) + os.Exit(1) + } +} + +func run(cli cliFlags) error { + // Validate the inputs that have no sensible default. + if cli.chainID == "" { + return errors.New("--chain-id is required") + } + if cli.bridgeAddr == "" { + return errors.New("--bridge-addr is required") + } + if cli.coreGRPCAddr == "" { + return errors.New("--core-grpc-addr is required (validator-0 gRPC, host:port)") + } + if cli.keyringPath == "" { + return errors.New("--keyring-path is required") + } + + level, err := zerolog.ParseLevel(cli.logLevel) + if err != nil { + level = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(level) + logger := zerolog.New(os.Stderr).With().Timestamp().Str("component", "evnode-fibre").Logger() + + if err := os.MkdirAll(cli.homeDir, 0o755); err != nil { + return fmt.Errorf("create home dir: %w", err) + } + + // Bridge JWT: read once at startup. The bridge_init.sh writes it + // to /root/bridge-jwt.txt on the bridge box; the ev-node init + // script scp's it onto this box at the same path by default. + authBytes, err := os.ReadFile(cli.bridgeTokenFp) + if err != nil { + return fmt.Errorf("read bridge token from %s: %w", cli.bridgeTokenFp, err) + } + authToken := string(authBytes) + for len(authToken) > 0 && (authToken[len(authToken)-1] == '\n' || authToken[len(authToken)-1] == '\r' || authToken[len(authToken)-1] == ' ') { + authToken = authToken[:len(authToken)-1] + } + + // Cosmos-sdk file keyring with the Fibre payment account. + // The deploy step copies this from a validator (or a dedicated + // pre-funded account) so the keyring already contains cli.keyName. + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + kr, err := keyring.New(app.Name, keyring.BackendTest, cli.keyringPath, nil, encCfg.Codec) + if err != nil { + return fmt.Errorf("open keyring at %s: %w", cli.keyringPath, err) + } + if _, err := kr.Key(cli.keyName); err != nil { + return fmt.Errorf("keyring entry %q not found in %s: %w", cli.keyName, cli.keyringPath, err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Construct the celestia-node-fiber adapter. We don't override + // SubmitConfig.Fibre — the Fibre client defaults (UploadMemoryBudget + // 512 MiB, RPCTimeout 15 s) are sized for the FSP-side concurrency + // the validators can actually absorb. We tried bumping the budget + // to 4 GiB to allow more in-flight blobs; with 16 upload workers + // the FSPs couldn't keep up and the box OOM'd at 63.9 GB. Leaving + // the defaults in place means the upload pipeline self-bounds at + // roughly what the FSPs can sustain. + adapter, err := cnfiber.New(ctx, cnfiber.Config{ + Client: client.Config{ + ReadConfig: client.ReadConfig{ + BridgeDAAddr: cli.bridgeAddr, + DAAuthToken: authToken, + EnableDATLS: false, + }, + SubmitConfig: client.SubmitConfig{ + DefaultKeyName: cli.keyName, + Network: cnp2p.Network(cli.coreNetwork), + CoreGRPCConfig: client.CoreGRPCConfig{ + Addr: cli.coreGRPCAddr, + }, + }, + }, + }, kr) + if err != nil { + return fmt.Errorf("construct fiber adapter: %w", err) + } + defer adapter.Close() + + // File-backed signer for ev-node block-signing key (separate from + // the cosmos-sdk keyring used to sign Fibre payment promises). + signerDir := filepath.Join(cli.homeDir, "signer") + if err := os.MkdirAll(signerDir, 0o755); err != nil { + return fmt.Errorf("create signer dir: %w", err) + } + passphrase, err := readSignerPassphrase(cli.signerPpFp) + if err != nil { + return fmt.Errorf("read signer passphrase: %w", err) + } + if !signerExists(signerDir) { + if _, err := file.CreateFileSystemSigner(signerDir, []byte(passphrase)); err != nil { + return fmt.Errorf("create file signer: %w", err) + } + } + + // Generate a libp2p node key. With the syncer's P2P worker gated + // off in Fiber mode, this key is mostly cosmetic — the host comes + // up but never gossips. Keeping it ephemeral per restart is fine. + nodePrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return fmt.Errorf("generate libp2p key: %w", err) + } + nodeKey := &key.NodeKey{PrivKey: nodePrivKey} + + // File signer factory needs the address before genesis is built, + // so construct it here and read the address back. + fs, err := file.LoadFileSystemSigner(signerDir, []byte(passphrase)) + if err != nil { + return fmt.Errorf("load signer: %w", err) + } + signerAddr, err := fs.GetAddress() + if err != nil { + return fmt.Errorf("get signer address: %w", err) + } + + // Fresh genesis per-run is fine: the chain we're talking to via + // Fibre is the celestia-app testnet; ev-node's own genesis is + // self-consistent and never gossiped. + genesis := genesispkg.NewGenesis(cli.chainID, 1, time.Now(), signerAddr) + if err := genesis.Validate(); err != nil { + return fmt.Errorf("validate genesis: %w", err) + } + + cfg := config.DefaultConfig() + cfg.RootDir = cli.homeDir + cfg.DBPath = "data" + cfg.Node.Aggregator = true + cfg.Node.BlockTime = config.DurationWrapper{Duration: cli.blockTime} + cfg.Node.LazyMode = false + cfg.Node.ScrapeInterval = config.DurationWrapper{Duration: cli.scrapeInterval} + cfg.DA.Namespace = cli.headerNS + cfg.DA.DataNamespace = cli.dataNS + cfg.DA.Fiber.Enabled = true + cfg.DA.Fiber.ConsensusAddress = cli.coreGRPCAddr + cfg.DA.Fiber.ConsensusChainID = cli.chainID + cfg.DA.Fiber.BridgeAddress = cli.bridgeAddr + cfg.DA.Fiber.KeyringPath = cli.keyringPath + cfg.DA.Fiber.KeyName = cli.keyName + cfg.DA.RequestTimeout = config.DurationWrapper{Duration: 60 * time.Second} + // Fiber-tuned profile: BatchingStrategy=adaptive, BatchMaxDelay=1.5s, + // DA.BlockTime=1s, MaxPendingHeadersAndData=0, plus 120 MiB blob cap. + cfg.ApplyFiberDefaults() + block.SetMaxBlobSize(120 * 1024 * 1024) + cfg.P2P.ListenAddress = cli.p2pListen + cfg.P2P.DisableConnectionGater = true + cfg.RPC.Address = cli.rpcListen + cfg.Log.Level = cli.logLevel + cfg.Signer.SignerType = "file" + cfg.Signer.SignerPath = signerDir + + signer, err := pkgsigner.NewSigner(ctx, &cfg, passphrase) + if err != nil { + return fmt.Errorf("construct signer via factory: %w", err) + } + + ds, err := store.NewDefaultKVStore(cli.homeDir, cfg.DBPath, "evnode-fibre") + if err != nil { + return fmt.Errorf("create datastore: %w", err) + } + + executor := newInMemExecutor() + sequencer := solo.NewSoloSequencer(logger, []byte(genesis.ChainID), executor) + daClient := block.NewFiberDAClient(adapter, cfg, logger, 0) + p2pClient, err := p2p.NewClient(cfg.P2P, nodeKey.PrivKey, datastore.NewMapDatastore(), genesis.ChainID, logger, nil) + if err != nil { + return fmt.Errorf("create p2p client: %w", err) + } + + rollnode, err := node.NewNode( + cfg, + executor, + sequencer, + daClient, + signer, + p2pClient, + genesis, + ds, + node.DefaultMetricsProvider(cfg.Instrumentation), + logger, + node.NodeOptions{}, + ) + if err != nil { + return fmt.Errorf("create ev-node: %w", err) + } + + // pprof on a separate listener (off by default). The `_ "net/http/pprof"` + // import registers handlers on http.DefaultServeMux; we serve that + // mux on cli.pprofListen so heap / goroutine / profile dumps don't + // share a port with the tx-ingress mux. Used to diagnose where the + // daemon's RSS goes — the AWS run held ~49 GiB at steady state and + // we don't yet have a breakdown. + if cli.pprofListen != "" { + pprofSrv := &http.Server{Addr: cli.pprofListen, ReadHeaderTimeout: 5 * time.Second} + go func() { + logger.Info().Str("addr", cli.pprofListen).Msg("starting pprof HTTP server") + if err := pprofSrv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + logger.Warn().Err(err).Msg("pprof server exited") + } + }() + } + + // HTTP tx ingestion endpoint. + httpServer := &http.Server{ + Addr: cli.httpListen, + Handler: txIngressHandler(executor, logger), + } + httpDone := make(chan error, 1) + go func() { + logger.Info().Str("addr", cli.httpListen).Msg("starting tx-ingress HTTP server") + err := httpServer.ListenAndServe() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + httpDone <- err + } else { + httpDone <- nil + } + }() + + // Run the node and trap signals. + nodeDone := make(chan error, 1) + go func() { + nodeDone <- rollnode.Run(ctx) + }() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + select { + case <-sigCh: + logger.Info().Msg("signal received, shutting down") + case err := <-nodeDone: + if err != nil { + logger.Error().Err(err).Msg("ev-node exited with error") + } + case err := <-httpDone: + logger.Error().Err(err).Msg("HTTP server exited") + } + cancel() + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() + _ = httpServer.Shutdown(shutdownCtx) + + return nil +} + +func readSignerPassphrase(path string) (string, error) { + if path == "" { + // Default: deterministic-but-non-empty passphrase. This is a + // long-lived testnet daemon, not a custodial wallet. + return "evnode-fibre-passphrase", nil + } + b, err := os.ReadFile(path) + if err != nil { + return "", err + } + s := string(b) + for len(s) > 0 && (s[len(s)-1] == '\n' || s[len(s)-1] == '\r' || s[len(s)-1] == ' ') { + s = s[:len(s)-1] + } + if s == "" { + return "", errors.New("passphrase file is empty") + } + return s, nil +} + +func signerExists(dir string) bool { + _, err := os.Stat(filepath.Join(dir, "signer.json")) + return err == nil +} + +// ─────────────────────────── tx ingress ──────────────────────────────── + +func txIngressHandler(exec *inMemExecutor, logger zerolog.Logger) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/tx", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + w.Header().Set("Allow", "POST") + http.Error(w, "POST only", http.StatusMethodNotAllowed) + return + } + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(body) == 0 { + http.Error(w, "empty body", http.StatusBadRequest) + return + } + exec.InjectTx(body) + w.WriteHeader(http.StatusAccepted) + }) + mux.HandleFunc("/stats", func(w http.ResponseWriter, _ *http.Request) { + s := exec.Stats() + fmt.Fprintf(w, "blocks=%d txs=%d\n", s.BlocksProduced, s.TotalExecutedTxs) + }) + return mux +} + +// ─────────────────────── in-memory executor ──────────────────────────── +// +// Mirrors the test executor in tools/celestia-node-fiber/testing — accepts +// arbitrary tx bytes, drains a buffered channel into blocks, and tracks +// counts for /stats. Not a real chain; just a generic blob factory for +// the experiment. + +type inMemExecutor struct { + mu sync.Mutex + txChan chan []byte + maxBlockTxs int + blocks atomic.Uint64 + totalTxs atomic.Uint64 +} + +// txChan capacity caps in-flight memory: at 10 KB tx and 500 slots +// we hold ≤ 5 MB queued before /tx blocks the ingress goroutine — +// which is exactly the backpressure we want against a hot loadgen. +// Reaper drains every 100 ms into the solo sequencer, which then +// accumulates batches between block-production ticks; without a tight +// cap a single block can balloon past the 120 MiB DA blob limit and +// the rest of the daemon's per-block allocations push the box past +// its RAM budget within seconds. +// +// maxBlockTxs caps GetTxs's per-call return so reaper-cycle batches +// are bounded too. With 500 ≤ 5 MB per block at 10 KB tx-size, we +// stay an order of magnitude under the DA cap so headers/data signing +// + envelope cache + retry buffers all fit. +func newInMemExecutor() *inMemExecutor { + return &inMemExecutor{ + txChan: make(chan []byte, 500), + maxBlockTxs: 500, + } +} + +func (e *inMemExecutor) InjectTx(tx []byte) { + e.txChan <- tx +} + +type execStats struct { + BlocksProduced uint64 + TotalExecutedTxs uint64 +} + +func (e *inMemExecutor) Stats() execStats { + return execStats{ + BlocksProduced: e.blocks.Load(), + TotalExecutedTxs: e.totalTxs.Load(), + } +} + +func (e *inMemExecutor) InitChain(_ context.Context, _ time.Time, _ uint64, _ string) ([]byte, error) { + return []byte("evnode-fibre-genesis-root"), nil +} + +func (e *inMemExecutor) GetTxs(_ context.Context) ([][]byte, error) { + var txs [][]byte + for len(txs) < e.maxBlockTxs { + select { + case tx := <-e.txChan: + txs = append(txs, tx) + default: + return txs, nil + } + } + return txs, nil +} + +func (e *inMemExecutor) ExecuteTxs(_ context.Context, txs [][]byte, height uint64, _ time.Time, _ []byte) ([]byte, error) { + e.blocks.Add(1) + e.totalTxs.Add(uint64(len(txs))) + root := make([]byte, 32) + binary.BigEndian.PutUint64(root, height) + binary.BigEndian.PutUint64(root[8:], uint64(len(txs))) + return root, nil +} + +func (e *inMemExecutor) SetFinal(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) Rollback(_ context.Context, _ uint64) error { return nil } +func (e *inMemExecutor) GetExecutionInfo(_ context.Context) (coreexecution.ExecutionInfo, error) { + return coreexecution.ExecutionInfo{MaxGas: 0}, nil +} + +func (e *inMemExecutor) FilterTxs(_ context.Context, txs [][]byte, _, _ uint64, _ bool) ([]coreexecution.FilterStatus, error) { + st := make([]coreexecution.FilterStatus, len(txs)) + for i := range st { + st[i] = coreexecution.FilterOK + } + return st, nil +} + +var _ coreexecution.Executor = (*inMemExecutor)(nil) diff --git a/tools/talis/.gitignore b/tools/talis/.gitignore new file mode 100644 index 0000000000..f48fd8e806 --- /dev/null +++ b/tools/talis/.gitignore @@ -0,0 +1,2 @@ +/talis +/cmd/evnode-txsim/evnode-txsim diff --git a/tools/talis/Makefile b/tools/talis/Makefile new file mode 100644 index 0000000000..291b6d36dd --- /dev/null +++ b/tools/talis/Makefile @@ -0,0 +1,175 @@ +# Build helpers for the talis fibre-experiment deploy. +# +# Targets: +# make build-bins build all three: celestia-appd, celestia, evnode-fibre +# make build-app celestia-appd from $(CELESTIA_APP_REPO) at $(CELESTIA_APP_REF) +# make build-node celestia from $(CELESTIA_NODE_REPO) at $(CELESTIA_NODE_REF) +# make build-evnode evnode-fibre runner from this repo (tools/celestia-node-fiber/cmd/evnode-fibre) +# make clean wipe build/ +# +# All binaries are cross-compiled to linux/amd64 — talis instances are +# Ubuntu 24.04 amd64 regardless of provider — and dropped into ./build/. +# Pass that directory to `talis genesis -b ./build` and the validator, +# bridge, and ev-node deploys all pick up the same artefacts. +# +# Repo locations and refs are overridable via env / make var, with +# sensible defaults that match the rest of fibre-experiment: +# +# CELESTIA_APP_REPO default: ../../../celestia-app (sibling clone) +# CELESTIA_APP_REF default: feat/fibre-payments +# CELESTIA_NODE_REPO default: ../../../celestia-node (sibling clone) +# CELESTIA_NODE_REF default: feature/fibre-experimental +# +# If the repos are not cloned next to this checkout, override the path +# variables at the make invocation: +# +# make build-bins CELESTIA_APP_REPO=/path/to/celestia-app + +CELESTIA_APP_REPO ?= ../../../celestia-app +CELESTIA_APP_REF ?= feat/fibre-payments + +CELESTIA_NODE_REPO ?= ../../../celestia-node +CELESTIA_NODE_REF ?= feature/fibre-experimental + +EVNODE_RUNNER_PATH ?= ../celestia-node-fiber/cmd/evnode-fibre +TXSIM_PATH ?= ./cmd/evnode-txsim + +BUILD_DIR := $(CURDIR)/build + +GOOS ?= linux +GOARCH ?= amd64 + +# -trimpath strips local paths from binaries so they're reproducible +# across machines. -s -w drops debug + symbol tables — saves ~30% size, +# matters for upload over slow links. +GOFLAGS_COMMON := -trimpath -ldflags='-s -w' + +# The fibre / valaddr modules in celestia-app + celestia-node are +# gated behind the `fibre` build tag. Without it, the keepers are +# stubbed out and the gRPC Query services aren't registered, which +# breaks both setup-fibre's host registry lookup and the bridge's +# blob.Subscribe path. +GOTAGS_FIBRE := -tags fibre + +.PHONY: build-bins build-app build-fibre build-fibre-txsim build-node build-evnode build-txsim clean check-repos + +build-bins: build-app build-fibre build-fibre-txsim build-node build-evnode build-txsim + @echo + @echo "✅ All binaries built into $(BUILD_DIR):" + @ls -lh $(BUILD_DIR) + +$(BUILD_DIR): + @mkdir -p $(BUILD_DIR) + +# celestia-appd from celestia-app's feat/fibre-payments. We compile +# from cmd/celestia-appd; the make target in celestia-app's Makefile +# does the same plus version-stamps but we don't need that for the +# experiment (any commit on feat/fibre-payments will do). +build-app: $(BUILD_DIR) check-repos + @echo "==> Building celestia-appd ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + git fetch --quiet origin $(CELESTIA_APP_REF) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || { \ + echo " NOTE: branch checked out in another worktree; building from current ref $$current"; \ + }; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/celestia-appd ./cmd/celestia-appd + @echo " -> $(BUILD_DIR)/celestia-appd" + +# fibre server (FSP) — separate cmd in celestia-app's repo at +# fibre/cmd/. Each validator runs one of these colocated; talis +# start-fibre tmuxs it on port 7980. Required at deploy time; +# without it `start-fibre` fails with "fibre: command not found". +build-fibre: $(BUILD_DIR) check-repos + @echo "==> Building fibre server ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || true; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/fibre ./fibre/cmd + @echo " -> $(BUILD_DIR)/fibre" + +# fibre-txsim — celestia-app's blob load generator. Used by talis +# fibre-txsim. Not strictly required for the ev-node experiment but +# baked into talis genesis's encoder-payload staging, so build it for +# completeness. +build-fibre-txsim: $(BUILD_DIR) check-repos + @echo "==> Building fibre-txsim ($(CELESTIA_APP_REF))" + @cd $(CELESTIA_APP_REPO) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_APP_REF)" ]; then \ + git checkout --quiet $(CELESTIA_APP_REF) || true; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/fibre-txsim ./tools/fibre-txsim + @echo " -> $(BUILD_DIR)/fibre-txsim" + +# celestia (bridge / light node binary) from celestia-node's +# feature/fibre-experimental. +build-node: $(BUILD_DIR) check-repos + @echo "==> Building celestia ($(CELESTIA_NODE_REF))" + @cd $(CELESTIA_NODE_REPO) && \ + git fetch --quiet origin $(CELESTIA_NODE_REF) && \ + current=$$(git rev-parse --abbrev-ref HEAD); \ + if [ "$$current" != "$(CELESTIA_NODE_REF)" ]; then \ + git checkout --quiet $(CELESTIA_NODE_REF) || { \ + echo " NOTE: branch checked out in another worktree; building from current ref $$current"; \ + }; \ + fi && \ + GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/celestia ./cmd/celestia + @echo " -> $(BUILD_DIR)/celestia" + +# evnode-fibre is the long-lived ev-node aggregator wired to +# celestia-node-fiber. Lives next to the adapter so it inherits the +# adapter's go.mod and avoids dragging celestia-node into testapp's +# tree. We rename the artefact to "evnode" so talis genesis --build-dir +# picks it up under that name (same convention as celestia / celestia-appd). +build-evnode: $(BUILD_DIR) + @echo "==> Building evnode-fibre (this repo, current HEAD)" + @cd $(EVNODE_RUNNER_PATH) && \ + GOWORK=off GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/evnode . + @echo " -> $(BUILD_DIR)/evnode" + +# evnode-baseline: same wiring as evnode-fibre but uses +# block.NewDAClient (celestia-node bridge JSON-RPC) instead of the +# Fibre adapter. Lives next to evnode-fibre so it shares the cnf +# go.mod and replace directives. +build-evnode-baseline: $(BUILD_DIR) + @echo "==> Building evnode-baseline (this repo, current HEAD)" + @cd $(EVNODE_RUNNER_PATH)/../evnode-baseline && \ + GOWORK=off GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) $(GOTAGS_FIBRE) -o $(BUILD_DIR)/evnode-baseline . + @echo " -> $(BUILD_DIR)/evnode-baseline" + +# evnode-txsim is the load generator that ships to loadgen-* nodes. +# Stdlib-only, so the build is fast and has no sibling-repo deps. +build-txsim: $(BUILD_DIR) + @echo "==> Building evnode-txsim (this repo, current HEAD)" + @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 \ + go build $(GOFLAGS_COMMON) -o $(BUILD_DIR)/evnode-txsim $(TXSIM_PATH) + @echo " -> $(BUILD_DIR)/evnode-txsim" + +# Sanity check: make sure the sibling clones exist so we fail with a +# friendly message instead of a deep go-tool error. .git can be either a +# directory (regular clone) or a file (git worktree), so test for either. +check-repos: + @if [ ! -e "$(CELESTIA_APP_REPO)/.git" ]; then \ + echo "ERROR: celestia-app repo not found at $(CELESTIA_APP_REPO)"; \ + echo " set CELESTIA_APP_REPO=/path/to/celestia-app or clone the repo there"; \ + exit 1; \ + fi + @if [ ! -e "$(CELESTIA_NODE_REPO)/.git" ]; then \ + echo "ERROR: celestia-node repo not found at $(CELESTIA_NODE_REPO)"; \ + echo " set CELESTIA_NODE_REPO=/path/to/celestia-node or clone the repo there"; \ + exit 1; \ + fi + +clean: + @rm -rf $(BUILD_DIR) diff --git a/tools/talis/README.md b/tools/talis/README.md new file mode 100644 index 0000000000..d89e1f8463 --- /dev/null +++ b/tools/talis/README.md @@ -0,0 +1,400 @@ +# talis + +## Prerequisites + +Talis supports DigitalOcean and Google Cloud. **Use only one provider per experiment.** + +### DigitalOcean Setup + +#### DigitalOcean Account + +- If you're part of the Celestia engineering team, ask for access to Celestia's DigitalOcean account or alternatively use a personal account. +- **Generate the API token:** Go to Settings → API → Generate New Token. +- Save the token somewhere that's easily accessible. + +### Google Cloud Setup + +#### Google Cloud Account + +- If you're part of the Celestia engineering team, ask for access to Celestia's Google Cloud account. **Make sure to use Google Cloud on when the experiment requires beefy hardware and high bandwidth. Otherwise, use DO** +- Create a service account with Compute Engine Admin permissions. +- Download the service account key JSON file. + +#### Firewall + +Firewall rules are automatically created when spinning up instances. They allow all incoming and outgoing traffic. + +### SSH Key + +- For quick and easy testing, create a new SSH key without a passphrase: + +```sh +ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519_no_passphrase -N "" +``` + +- Upload the SSH key to DigitalOcean: +- Navigate to Settings → Security → SSH Keys. +- Click "Add SSH Key". +- Paste your public key. + +```sh +cat ~/.ssh/id_ed25519_no_passphrase.pub +``` + +- Add your name into the name for quick and easy access we'll need this later. Now your key should appear in "SSH Keys" list. + +## Running talis + +You have two options when it comes to running talis. You can run it on your local machine which has high RAM requirements or you can run it inside of a DigitalOcean droplet. The guide for this will be at the end of the file. + +## Install + +```sh +go install ./tools/talis/ +``` + +All binaries used by nodes in the network are compiled on the user's local machine. Either change the target when compiling celestia-app, or use the docker image to ensure complete compatibility. + +```sh +make build-talis-bins +``` + +Note that this doesn't install binaries in the `$GOPATH/bin`, so you must specify the path when creating the payload with the `genesis` subcommand using `-b` (`--build-dir`) to copy an entire build directory, or the per-binary flags such as `-a` (`--app-binary`) and `-t` (`--txsim-binary`). See `genesis` subcommand usage below. + +## Usage + +If the relevant binaries are installed via go, and the celestia-app repo is +downloaded, then the talis defaults should work. Your `$GOPATH` is used to copy the scripts from this repo to the payload, along with default locations for the binaries. + +### init-env (Optional) + +Generate a `.env` template file with required environment variables for your provider. + +```sh +# Generate .env template for Google Cloud +talis init-env --provider googlecloud + +# Generate .env template for DigitalOcean (default) +talis init-env --provider digitalocean +``` + +This creates a `.env` file with all required and optional fields for the provider for you to fill in. + +### init + +Talis supports setting up an observability stack (Prometheus, Grafana, and Loki) for monitoring your network. Observability nodes can be deployed on either **DigitalOcean** or **Google Cloud**. + +**Note:** Set environment variables (or create `.env` file with `talis init-env`) **before** running `talis init` for automatic config population. + +```sh +# initializes the repo w/ editable scripts and configs +talis init -c -e + +# with observability node in case you want to view the metrics (DigitalOcean by default) +talis init -c -e --with-observability + +# with observability node on Google Cloud +talis init -c -e --with-observability --provider googlecloud +``` + +This will initialize the directory that contains directory structure used for conducting an experiment. + +```text +. +├── app.toml +├── config.json +├── config.toml +├── data/ +├── payload/ +└── scripts/ +``` + +the celestia-app configs (config.toml and app.toml) can be manually edited here, and they will be copied to each node. `config.json` is the talis specific configuration file that contains all info related to spinning up the network. This is updated after the nodes have been spun up. Basic defaults are set, but the relevant fields can either be edited after generation or via using a flag. At this point, it looks something like this: + +```json +{ + "validators": [], + "chain_id": "talis-test-3", + "experiment": "test-3", + "ssh_pub_key_path": "/home/HOSTNAME/.ssh/id_ed25519.pub", + "ssh_key_name": "HOSTNAME", + "digitalocean_token": "pulled from env var if available", + "google_cloud_project": "pulled from env var if available", + "google_cloud_key_json_path": "pulled from env var if available", + "s3_config": { + "region": "pulled from AWS_DEFAULT_REGION env var if available", + "access_key_id": "pulled from AWS_ACCESS_KEY_ID env var if available", + "secret_access_key": "pulled from AWS_SECRET_ACCESS_KEY env var if available", + "bucket_name": "pulled from AWS_S3_BUCKET env var if available", + "endpoint": "pulled from AWS_S3_ENDPOINT env var if available. Can be left empty if targeting an AWS S3 bucket" + } +} +``` + +Notes: + +- **Only use one cloud provider per experiment.** Fill out either DigitalOcean or Google Cloud fields, not both. Filling them both might end up ruining other experiments or having stuck experiments that need to be removed by hand. +- The AWS config supports any S3-compatible bucket. So it can be used with Digital Ocean and other cloud providers. +- Example: The S3 endpoint for Digital Ocean is: `https://.digitaloceanspaces.com/`. + +### add + +```sh +# adds specific nodes to the config (see flags for further configuration) +talis add -t -c + +# specify provider (digitalocean or googlecloud) +talis add -t -c --provider +``` + +If we call: + +```sh +talis add -t validator -c 1 +``` + +`node-type` options: `validator`, `observability` (bridges/lights are still not supported). +`provider` options: `digitalocean` (default), `googlecloud`. + +The config will look like: + +```json +{ + "validators": [ + { + "node_type": "validator", + "public_ip": "TBD", + "private_ip": "TBD", + "provider": "digitalocean", + "slug": "c2-16vcpu-32gb", + "region": "nyc3", // randomly determined unless specified. + "name": "validator-0", + "tags": [ + "talis", + "validator", + "validator-0", + "chainID" + ] + } + ], + ... + "chain_id": "talis-test", + "experiment": "test", + "ssh_pub_key_path": "/home/HOSTNAME/.ssh/id_ed25519.pub", + "ssh_key_name": "HOSTNAME", + ... +} +``` + +### up + +`up` uses the configuration to spin up the cloud instances. Note that this doesn't start the network! + +```sh +# uses the config to spin up nodes on the relevant cloud services +talis up + +# use more workers for faster instance creation. DigitalOcean has a 5000 requests/hour rate limit per API token. +# For droplet creation, each worker makes ~3-5 API calls per droplet, so ~20 workers should be safe for most use cases. +talis up --workers 20 +``` + +### genesis + +Before we can start the network, we need to create a payload that contains everything each instance needs to actually start the network. This includes all the required keys, configs, genesis.json, and startup scripts. The `--square-size` flag will change the `GovMaxSquareSize`. By default, the binaries in the $GOPATH/bin will be used, however if specific binaries are needed (likely unless you are running some flavor of debian), use the `-b` (`--build-dir`) flag to copy every binary from a build directory, or the individual flags such as `-a` (`--app-binary`) and `-t` (`--txsim-binary`) when you only need to override specific executables. + +```sh +# creates the payload for the network. This contains all addresses, configs, binaries (from your local GOPATH if not specified), genesis.json, and startup scripts. The `--square-size` flag will change the `GovMaxSquareSize` +talis genesis -s 256 -b /home/$HOSTNAME/go/src/github.com/celestiaorg/celestia-app/build +``` + +Keep in mind that we can still edit anything in the payload before deploying the network. + +Note: When increasing the genesis square size, ensure you also increase the `SquareSizeUpperBound` constant to allow blocks to be created at the new size. + +### deploy + +This step is when the network is actually started. The payload is uploaded to each instance in the network directly from the user's machine. After delivering the payload, the start script is executed in a tmux session called "app" on each machine. + +```sh +# sends the payload to each node and boots the network by executing the relevant startup scripts +talis deploy + +# use more workers for faster deployment (when using direct upload) +talis deploy --direct-payload-upload --workers 20 +``` + +Note: By default, the `deploy` command will upload the payload to the configured S3 bucket, and then download it in the nodes. To upload the payload directly without passing by S3, use the `--direct-payload-upload` flag. The `--workers` flag only affects the direct upload method. + +### txsim + +To load the network we can use `talis` to start txsim on as many validator nodes as we want for that experiment. + +```sh +# start txsim on some number of the validator instances +talis txsim -i -s --min-blob-size --max-blob-size +``` + +### status + +Often, it's useful to quickly check if all the nodes have caught up to the tip of the chain. This can be done via the status command, which simply prints the height of each validator after querying the `Status` endpoint. + +```sh +# check which height each validator is at +talis status +``` + +### traces + +To download traces from the network, we can use `talis` to download traces from as many validator nodes as we want for that experiment. + +```sh +# download some number of traces directly from nodes to your machine via sftp +talis download -n -t [flags] + +# use more workers for faster downloads from many nodes +talis download -n -t
--workers 20 +``` + +To quickly view block times, assuming this table was being traced we can run: + +```sh +talis download -n validator-0 -t consensus_block +``` + +or if we needed to quickly see all of the mempool traces: + +```sh +talis download -n validator-* -t mempool_tx +``` + +or if we want to check on the logs we can call: + +```sh +talis download -n validator-* -t logs +``` + +### Collecting all traces to an s3 bucket + +At the end of the experiment, we can quickly save all of the traces to an s3 bucket assuming that we filled out the s3 config in the config.json. + +```sh +talis upload-data +``` + +This could take a few minutes if there is a ton of trace data, but often is completed in <30s. To download this data from the s3 bucket, we can use the s3 subcommand: + +```sh +talis download s3 +``` + +### Modifying the nodes in place + +Instead of shutting down all of the nodes, if we want to run a slightly modified experiment, we can simply run the [reset](#reset) command then rerun the `genesis` and `deploy` commands. This will create a new payload and restart the network without tearing down the cloud instances. This will delete any trace data. + +### reset + +This command allows you to stop running services and clean up files created by the `deploy` command for either specific validators or all validators in the network. + +```sh +# Reset all validators in the network +talis reset + +# Reset specific validators +talis reset -v validator-0,validator-1 +``` + +### down + +Finally, remember to tear down the cloud instances. This should work first try, but it's a good habit to re-run or check the webUI for large experiments to make sure nodes were shut down successfully. + +```sh +# tears down the network +talis down + +# use more workers for faster teardown of many instances +talis down --workers 20 +``` + +## Running Talis inside of a DigitalOcean droplet + +Create a new droplet: + +- Recommended Size: 32GB RAM 16CPU +- SSH Keys: Add your SSH key + +SSH into the Droplet: + +```sh +ssh root@YOUR_DROPLET_IP +``` + +Install Deps: + +```sh +# Install Go +snap install go --channel=1.26/stable --classic + +# Install Docker +apt install docker.io -y +systemctl start docker +usermod -aG docker $USER + +# Install misc tools +apt install git curl jq -y +``` + +Set up Go env: + +```sh +echo 'export GOPATH="$HOME/go"' >> ~/.profile +echo 'export GOBIN="$GOPATH/bin"' >> ~/.profile +echo 'export PATH="$GOBIN:$PATH"' >> ~/.profile +source ~/.profile +``` + +Clone and build: + +```sh +# Clone celestia-app and cd into it +git clone https://github.com/celestiaorg/celestia-app.git +cd celestia-app + +# Build binaries (celestia, celestia-appd, txsim) +make build-talis-bins + +# Install talis +go install ./tools/talis/ +``` + +Set env variables: + +```sh +export DIGITALOCEAN_TOKEN="your_api_token_here" +export TALIS_SSH_KEY_PATH="~/.ssh/id_ed25519_no_passphrase" +``` + +**Run Talis:** + +Talis assumes that you're your default ssh key so if you created a new key above you need to specify it in the commands. + +```sh +# Initialize +talis init -c your-chain-id -e your-experiment + +# Add validators +talis add -t validator -c + +# Spin up talis (use more workers if creating many instances) +talis up -n -s --workers 20 + +# Create payload +talis genesis -s 128 -b build + +# Deploy (use more workers for faster direct deployment) +talis deploy -s --direct-payload-upload --workers 20 +``` + +**Save Snapshot:** + +After you're done running experiments, make sure to take a snapshot of your deployment droplet and destroy the original. diff --git a/tools/talis/add.go b/tools/talis/add.go new file mode 100644 index 0000000000..b6eeb0ca48 --- /dev/null +++ b/tools/talis/add.go @@ -0,0 +1,142 @@ +package main + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" +) + +func addCmd() *cobra.Command { + var ( + rootDir string + count int + nodeType string + provider string + region string + slug string + ) + cmd := &cobra.Command{ + Use: "add", + Short: "Adds a new instances to the configuration", + Aliases: []string{"a"}, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config %q: %w", rootDir, err) + } + + if provider == "" { + provider = "digitalocean" + } + + switch nodeType { + case "validator": + start := len(cfg.Validators) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanValidator(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudValidator(region) + case "aws": + cfg = cfg.WithAWSValidator(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Validators, start, slug) + case "encoder": + start := len(cfg.Encoders) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanEncoder(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudEncoder(region) + case "aws": + cfg = cfg.WithAWSEncoder(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Encoders, start, slug) + case "bridge": + start := len(cfg.Bridges) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanBridge(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudBridge(region) + case "aws": + cfg = cfg.WithAWSBridge(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Bridges, start, slug) + case "evnode": + start := len(cfg.Evnodes) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanEvnode(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudEvnode(region) + case "aws": + cfg = cfg.WithAWSEvnode(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Evnodes, start, slug) + case "loadgen": + start := len(cfg.Loadgens) + for range count { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanLoadgen(region) + case "googlecloud": + cfg = cfg.WithGoogleCloudLoadgen(region) + case "aws": + cfg = cfg.WithAWSLoadgen(region) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + } + applySlug(cfg.Loadgens, start, slug) + case "light": + log.Println("light nodes are not yet supported") + return nil + default: + return fmt.Errorf("unknown node type %q", nodeType) + } + + return cfg.Save(rootDir) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().IntVarP(&count, "count", "c", 0, "Number of nodes to deploy") + _ = cmd.MarkFlagRequired("count") + cmd.Flags().StringVarP(&nodeType, "type", "t", "", "Type of the node (validator, encoder, bridge, evnode, loadgen, light)") + _ = cmd.MarkFlagRequired("type") + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "Provider for the node (digitalocean, googlecloud, aws)") + cmd.Flags().StringVarP(®ion, "region", "r", "random", "the region to deploy the instance in (random if blank)") + cmd.Flags().StringVar(&slug, "slug", "", "provider-specific instance type override (e.g. c6in.4xlarge). Empty = provider default for the node type.") + + return cmd +} + +// applySlug overrides the Slug field on the just-added instances in the +// slice. It only touches entries at index [start, len(instances)) so a +// second `add` with a different `--slug` does not re-stamp earlier ones. +func applySlug(instances []Instance, start int, slug string) { + if slug == "" { + return + } + for i := start; i < len(instances); i++ { + instances[i].Slug = slug + } +} diff --git a/tools/talis/aws.go b/tools/talis/aws.go new file mode 100644 index 0000000000..249dc89bbd --- /dev/null +++ b/tools/talis/aws.go @@ -0,0 +1,1033 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "math/rand" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" +) + +const ( + // c6in.4xlarge: 16 vCPU / 32 GiB / 25 Gbps baseline network with ENA + // Express (SRD). The "n" suffix marks network-enhanced variants, which + // is what talis fibre experiments care about — they're networking-bound. + AWSDefaultValidatorInstanceType = "c6in.4xlarge" + // c6in.2xlarge: 8 vCPU / 16 GiB — encoders submit blobs via gRPC and + // don't need the full validator footprint. + AWSDefaultEncoderInstanceType = "c6in.2xlarge" + // Bridge nodes are network-bound (relay headers / blob events from + // validators to ev-node). c6in.2xlarge gives the same 25 Gbps as + // the validators while halving CPU. + AWSDefaultBridgeInstanceType = "c6in.2xlarge" + // ev-node aggregators are CPU + network bound (block production + // + DA submit pipeline). Same shape as bridges; can be sized up + // per-experiment via `--slug`. + AWSDefaultEvnodeInstanceType = "c6in.2xlarge" + // Load generators are network-bound; same shape as evnode/bridge. + AWSDefaultLoadgenInstanceType = "c6in.2xlarge" + AWSDefaultObservabilityInstanceType = "t3.medium" + AWSDefaultRootVolumeGB = int32(400) + + // AWSSecurityGroupName is the name of the security group used by every + // talis instance. It is created per-region on demand and permits all + // inbound traffic — same posture as the GCP firewall rule. + AWSSecurityGroupName = "talis-allow-all" + // AWSPlacementGroupName is the name of the cluster placement group used + // by every talis instance in a region. Cluster strategy gives the lowest + // inter-instance latency within an AZ — critical for fibre/p2p. + AWSPlacementGroupName = "talis-cluster" + + // AWSCanonicalOwnerID is Canonical's AWS account ID. It owns the + // official Ubuntu AMIs we filter against. + AWSCanonicalOwnerID = "099720109477" + // AWSUbuntuImageNamePattern matches Ubuntu 24.04 LTS amd64 EBS SSD + // images (matches talis' default OS image for DO / GCP). + AWSUbuntuImageNamePattern = "ubuntu/images/hvm-ssd*/ubuntu-noble-24.04-amd64-server-*" + + // AWSDefaultZone is the AZ used for launches when Config.AWSZone is + // unset. Single-AZ launches keep all cross-instance traffic intra-AZ + // (free) and enable a cluster placement group for minimum latency. + AWSDefaultZone = "us-east-1a" +) + +// AWSRegions is the pool used when "random" is selected for an AWS +// instance. We ship a single region by default: cross-region traffic on +// AWS is billed at $0.09/GB (~9× DO), so running networking-heavy +// experiments across regions is wildly expensive. Operators who need +// multi-region can set an explicit Region on each Instance. +var AWSRegions = []string{"us-east-1"} + +// amiCache memoises the resolved Ubuntu AMI per region — AMIs are +// region-scoped and resolving them costs an API round-trip. +var amiCache sync.Map // map[region]string + +type AWSClient struct { + ClientInfo + defaultRegion string +} + +func NewAWSClient(cfg Config) (*AWSClient, error) { + if cfg.AWSRegion == "" { + return nil, errors.New("AWS region is required") + } + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at %s: %w", cfg.SSHPubKeyPath, err) + } + return &AWSClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + defaultRegion: cfg.AWSRegion, + }, nil +} + +func (c *AWSClient) Up(ctx context.Context, workers int) error { + zone := c.cfg.AWSZone + if zone == "" { + zone = AWSDefaultZone + } + + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != AWS { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomAWSRegion() + } + if v.Zone == "" { + v.Zone = zone + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + insts, err := CreateAWSInstances(ctx, insts, string(c.sshKey), c.cfg.SSHKeyName, workers) + if err != nil { + return fmt.Errorf("failed to create instances: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + return nil +} + +func (c *AWSClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != AWS { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomAWSRegion() + } + insts = append(insts, v) + } + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + _, err := DestroyAWSInstances(ctx, insts, workers) + return err +} + +func (c *AWSClient) List(ctx context.Context) error { + cnt := 0 + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + return fmt.Errorf("failed to create EC2 client in %s: %w", region, err) + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + return fmt.Errorf("describe instances in %s: %w", region, err) + } + for _, inst := range insts { + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Region", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + state := "" + if inst.State != nil { + state = string(inst.State.Name) + } + publicIP := "" + if inst.PublicIpAddress != nil { + publicIP = *inst.PublicIpAddress + } + created := "" + if inst.LaunchTime != nil { + created = inst.LaunchTime.Format(time.RFC3339) + } + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + instanceNameFromTags(inst.Tags), state, region, publicIP, created) + cnt++ + } + } + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *AWSClient) GetConfig() Config { + return c.cfg +} + +func NewAWSValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Validator) + i.Provider = AWS + i.Slug = AWSDefaultValidatorInstanceType + i.Region = region + return i +} + +func NewAWSEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Encoder) + i.Provider = AWS + i.Slug = AWSDefaultEncoderInstanceType + i.Region = region + return i +} + +func NewAWSBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Bridge) + i.Provider = AWS + i.Slug = AWSDefaultBridgeInstanceType + i.Region = region + return i +} + +func NewAWSEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Evnode) + i.Provider = AWS + i.Slug = AWSDefaultEvnodeInstanceType + i.Region = region + return i +} + +func NewAWSLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = AWS + i.Slug = AWSDefaultLoadgenInstanceType + i.Region = region + return i +} + +func NewAWSObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomAWSRegion() + } + i := NewBaseInstance(Observability) + i.Provider = AWS + i.Slug = AWSDefaultObservabilityInstanceType + i.Region = region + return i +} + +func RandomAWSRegion() string { + return AWSRegions[rand.Intn(len(AWSRegions))] +} + +// awsRegionFromEnv returns the region stamped into Config when +// `--provider aws` is selected. Falls back to us-east-1 to match AWS +// SDK's historical implicit default. +func awsRegionFromEnv() string { + if r := os.Getenv(EnvVarAWSRegion); r != "" { + return r + } + return "us-east-1" +} + +// resolveAWSZone returns the given zone or AWSDefaultZone. +func resolveAWSZone(zone string) string { + if zone != "" { + return zone + } + return AWSDefaultZone +} + +// newEC2Client constructs a regional EC2 client using the SDK default +// credential chain (env vars, shared credentials file, IAM role, ...). +func newEC2Client(ctx context.Context, region string) (*ec2.Client, error) { + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region)) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config for region %s: %w", region, err) + } + return ec2.NewFromConfig(awsCfg), nil +} + +// CreateAWSInstances launches EC2 instances in parallel, each pinned to +// its Instance.Zone + the cluster placement group (where supported), +// waits for public + private IPs, and returns the filled-in slice. +func CreateAWSInstances(ctx context.Context, insts []Instance, sshKey, keyName string, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingAWSInstances(ctx, insts) + if err != nil { + return nil, err + } + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + total := len(insts) + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(total) + + for _, v := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + start := time.Now() + log.Println("Creating instance", inst.Name, "in region", inst.Region, start.Format(time.RFC3339)) + + pubIP, privIP, err := createAWSInstance(ctx, inst, sshKey, keyName) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("create %s: %w", inst.Name, err)} + return + } + inst.PublicIP = pubIP + inst.PrivateIP = privIP + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(v) + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + return created, nil +} + +// createAWSInstance runs the full per-instance provisioning: resolve +// AMI, ensure key pair + security group + placement group, resolve +// default subnet in the target AZ, RunInstances, wait for IPs. +func createAWSInstance(ctx context.Context, inst Instance, sshKey, keyName string) (string, string, error) { + client, err := newEC2Client(ctx, inst.Region) + if err != nil { + return "", "", err + } + + amiID, err := resolveUbuntuAMI(ctx, client, inst.Region) + if err != nil { + return "", "", fmt.Errorf("resolve AMI: %w", err) + } + if err := ensureAWSKeyPair(ctx, client, keyName, sshKey); err != nil { + return "", "", fmt.Errorf("ensure key pair: %w", err) + } + sgID, err := ensureAWSSecurityGroup(ctx, client) + if err != nil { + return "", "", fmt.Errorf("ensure security group: %w", err) + } + + useCPG := supportsClusterPlacement(inst.Slug) + if useCPG { + if err := ensureAWSPlacementGroup(ctx, client); err != nil { + return "", "", fmt.Errorf("ensure placement group: %w", err) + } + } + + zone := inst.Zone + if zone == "" { + zone = AWSDefaultZone + } + subnetID, err := defaultSubnetInAZ(ctx, client, zone) + if err != nil { + return "", "", fmt.Errorf("resolve subnet in %s: %w", zone, err) + } + + tags := awsTagsFromInstance(inst) + userData := base64.StdEncoding.EncodeToString([]byte(awsRootSSHUserData(sshKey, inst.Name))) + + placement := &ec2types.Placement{AvailabilityZone: aws.String(zone)} + if useCPG { + placement.GroupName = aws.String(AWSPlacementGroupName) + } + + runOut, err := client.RunInstances(ctx, &ec2.RunInstancesInput{ + ImageId: aws.String(amiID), + InstanceType: ec2types.InstanceType(inst.Slug), + MinCount: aws.Int32(1), + MaxCount: aws.Int32(1), + KeyName: aws.String(keyName), + UserData: aws.String(userData), + // Use a single NIC so we can force public-IP assignment regardless + // of the subnet's MapPublicIpOnLaunch setting. SubnetId and + // SecurityGroupIds must live on the interface — the API rejects + // both top-level and interface-level settings together. + NetworkInterfaces: []ec2types.InstanceNetworkInterfaceSpecification{{ + DeviceIndex: aws.Int32(0), + SubnetId: aws.String(subnetID), + Groups: []string{sgID}, + AssociatePublicIpAddress: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + }}, + Placement: placement, + BlockDeviceMappings: []ec2types.BlockDeviceMapping{{ + DeviceName: aws.String("/dev/sda1"), + Ebs: &ec2types.EbsBlockDevice{ + VolumeSize: aws.Int32(AWSDefaultRootVolumeGB), + VolumeType: ec2types.VolumeTypeGp3, + DeleteOnTermination: aws.Bool(true), + }, + }}, + MetadataOptions: &ec2types.InstanceMetadataOptionsRequest{ + HttpTokens: ec2types.HttpTokensStateRequired, + HttpEndpoint: ec2types.InstanceMetadataEndpointStateEnabled, + }, + TagSpecifications: []ec2types.TagSpecification{ + {ResourceType: ec2types.ResourceTypeInstance, Tags: tags}, + {ResourceType: ec2types.ResourceTypeVolume, Tags: tags}, + }, + }) + if err != nil { + return "", "", fmt.Errorf("run instance: %w", err) + } + if len(runOut.Instances) == 0 || runOut.Instances[0].InstanceId == nil { + return "", "", fmt.Errorf("RunInstances returned no instances") + } + + return waitForAWSNetworkIP(ctx, client, *runOut.Instances[0].InstanceId) +} + +// supportsClusterPlacement reports whether the given EC2 instance type +// can join a cluster placement group. Cluster placement groups require +// compute/network-optimised families; burstable (t*) is explicitly +// rejected by the API. Observability nodes default to t3.medium, which +// falls back to AZ-only placement. +func supportsClusterPlacement(slug string) bool { + return slug != "" && !strings.HasPrefix(slug, "t") +} + +func waitForAWSNetworkIP(ctx context.Context, client *ec2.Client, instanceID string) (string, string, error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + out, err := client.DescribeInstances(ctx, &ec2.DescribeInstancesInput{ + InstanceIds: []string{instanceID}, + }) + if err != nil { + return "", "", err + } + inst, ok := firstInstance(out) + if !ok { + continue + } + var pubIP, privIP string + if inst.PublicIpAddress != nil { + pubIP = *inst.PublicIpAddress + } + if inst.PrivateIpAddress != nil { + privIP = *inst.PrivateIpAddress + } + if pubIP != "" && privIP != "" { + return pubIP, privIP, nil + } + } + } +} + +func firstInstance(out *ec2.DescribeInstancesOutput) (ec2types.Instance, bool) { + for _, r := range out.Reservations { + for _, i := range r.Instances { + return i, true + } + } + return ec2types.Instance{}, false +} + +// filterExistingAWSInstances removes instances whose experiment tag +// already exists in any region covered by the request. Groups by region +// so each region is queried once. +func filterExistingAWSInstances(ctx context.Context, insts []Instance) ([]Instance, []Instance, error) { + regions := make(map[string]struct{}) + for _, inst := range insts { + regions[inst.Region] = struct{}{} + } + + existingTags := make(map[string]bool) + for region := range regions { + client, err := newEC2Client(ctx, region) + if err != nil { + return nil, nil, err + } + tags, err := collectTalisTagKeys(ctx, client) + if err != nil { + return nil, nil, fmt.Errorf("list existing tags in %s: %w", region, err) + } + for tag := range tags { + existingTags[tag] = true + } + } + + var newInsts, existing []Instance + for _, inst := range insts { + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" || !existingTags[experimentTag] { + newInsts = append(newInsts, inst) + } else { + existing = append(existing, inst) + } + } + return newInsts, existing, nil +} + +func collectTalisTagKeys(ctx context.Context, client *ec2.Client) (map[string]bool, error) { + out := make(map[string]bool) + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag-key"), Values: []string{"talis"}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + for _, r := range page.Reservations { + for _, i := range r.Instances { + for _, t := range i.Tags { + if t.Key != nil { + out[*t.Key] = true + } + } + } + } + } + return out, nil +} + +func DestroyAWSInstances(ctx context.Context, insts []Instance, workers int) ([]Instance, error) { + return destroyAWSInstancesInternal(ctx, insts, workers) +} + +func destroyAWSInstancesInternal(ctx context.Context, insts []Instance, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + fmt.Println("⏳ Deleting instance", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + region := inst.Region + if region == "" { + found, err := findAWSInstanceRegion(delCtx, inst.Name) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find region for %s: %w", inst.Name, err)} + return + } + region = found + } + + client, err := newEC2Client(delCtx, region) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("ec2 client %s: %w", region, err)} + return + } + + instanceID, err := findAWSInstanceID(delCtx, client, inst) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find instance %s: %w", inst.Name, err)} + return + } + + if _, err := client.TerminateInstances(delCtx, &ec2.TerminateInstancesInput{ + InstanceIds: []string{instanceID}, + }); err != nil { + results <- result{inst: inst, err: fmt.Errorf("terminate %s: %w", inst.Name, err)} + return + } + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s terminated (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + return removed, nil +} + +// findAWSInstanceID resolves an Instance (by its experiment tag if +// present, otherwise by Name) to an EC2 instance ID. Filters out +// already-terminated instances so repeated calls don't return ghosts. +func findAWSInstanceID(ctx context.Context, client *ec2.Client, inst Instance) (string, error) { + filters := []ec2types.Filter{ + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + } + if experimentTag := GetExperimentTag(inst.Tags); experimentTag != "" { + filters = append(filters, ec2types.Filter{Name: aws.String("tag-key"), Values: []string{experimentTag}}) + } else { + filters = append(filters, ec2types.Filter{Name: aws.String("tag:Name"), Values: []string{inst.Name}}) + } + + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{Filters: filters}) + var ids []string + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return "", err + } + for _, r := range page.Reservations { + for _, i := range r.Instances { + if i.InstanceId != nil { + ids = append(ids, *i.InstanceId) + } + } + } + } + + switch len(ids) { + case 0: + return "", fmt.Errorf("no instances found for %s", inst.Name) + case 1: + return ids[0], nil + default: + return "", fmt.Errorf("multiple instances match %s: %v", inst.Name, ids) + } +} + +func findAWSInstanceRegion(ctx context.Context, name string) (string, error) { + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + continue + } + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag:Name"), Values: []string{name}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + break + } + for _, r := range page.Reservations { + if len(r.Instances) > 0 { + return region, nil + } + } + } + } + return "", fmt.Errorf("instance %s not found in any known AWS region", name) +} + +// destroyAllTalisAWSInstances terminates every EC2 instance tagged +// "talis" across every known region. Called via `down --all`. +func destroyAllTalisAWSInstances(ctx context.Context, workers int) ([]Instance, error) { + var talisInstances []Instance + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + log.Printf("⚠️ failed to build EC2 client for %s: %v", region, err) + continue + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + log.Printf("⚠️ failed to describe instances in %s: %v", region, err) + continue + } + for _, i := range insts { + publicIP := "" + if i.PublicIpAddress != nil { + publicIP = *i.PublicIpAddress + } + talisInstances = append(talisInstances, Instance{ + Name: instanceNameFromTags(i.Tags), + PublicIP: publicIP, + Region: region, + }) + } + } + + if len(talisInstances) == 0 { + log.Println("No talis AWS instances found to destroy") + return nil, nil + } + return destroyAWSInstancesInternal(ctx, talisInstances, workers) +} + +func describeTalisInstances(ctx context.Context, client *ec2.Client) ([]ec2types.Instance, error) { + var out []ec2types.Instance + paginator := ec2.NewDescribeInstancesPaginator(client, &ec2.DescribeInstancesInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("tag-key"), Values: []string{"talis"}}, + {Name: aws.String("instance-state-name"), Values: []string{"pending", "running", "stopping", "stopped"}}, + }, + }) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + for _, r := range page.Reservations { + out = append(out, r.Instances...) + } + } + return out, nil +} + +func checkForRunningAWSExperiments(ctx context.Context, awsRegionConfigured bool, experimentID, chainID string) (bool, error) { + if !awsRegionConfigured { + return false, nil + } + for _, region := range AWSRegions { + client, err := newEC2Client(ctx, region) + if err != nil { + return false, fmt.Errorf("failed to create EC2 client in %s: %w", region, err) + } + insts, err := describeTalisInstances(ctx, client) + if err != nil { + return false, fmt.Errorf("describe instances in %s: %w", region, err) + } + for _, i := range insts { + for _, t := range i.Tags { + if t.Key == nil { + continue + } + if hasAWSExperimentTag(*t.Key, experimentID, chainID) { + return true, nil + } + } + } + } + return false, nil +} + +func hasAWSExperimentTag(tag, experimentID, chainID string) bool { + if !strings.HasPrefix(tag, "validator-") && + !strings.HasPrefix(tag, "bridge-") && + !strings.HasPrefix(tag, "light-") && + !strings.HasPrefix(tag, "encoder-") { + return false + } + return strings.Contains(tag, experimentID) && strings.Contains(tag, chainID) +} + +// resolveUbuntuAMI finds the most recent Ubuntu 24.04 AMI in the region. +// Results are cached in-process since AMI IDs rarely change and the +// lookup costs an API round-trip. +func resolveUbuntuAMI(ctx context.Context, client *ec2.Client, region string) (string, error) { + if cached, ok := amiCache.Load(region); ok { + return cached.(string), nil + } + + out, err := client.DescribeImages(ctx, &ec2.DescribeImagesInput{ + Owners: []string{AWSCanonicalOwnerID}, + Filters: []ec2types.Filter{ + {Name: aws.String("name"), Values: []string{AWSUbuntuImageNamePattern}}, + {Name: aws.String("state"), Values: []string{"available"}}, + {Name: aws.String("architecture"), Values: []string{"x86_64"}}, + {Name: aws.String("virtualization-type"), Values: []string{"hvm"}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe images: %w", err) + } + if len(out.Images) == 0 { + return "", fmt.Errorf("no Ubuntu AMIs found in %s", region) + } + + sort.Slice(out.Images, func(i, j int) bool { + a, b := "", "" + if out.Images[i].CreationDate != nil { + a = *out.Images[i].CreationDate + } + if out.Images[j].CreationDate != nil { + b = *out.Images[j].CreationDate + } + return a > b + }) + + amiID := "" + if out.Images[0].ImageId != nil { + amiID = *out.Images[0].ImageId + } + if amiID == "" { + return "", fmt.Errorf("selected AMI has no ID in %s", region) + } + amiCache.Store(region, amiID) + return amiID, nil +} + +// ensureAWSKeyPair imports the SSH public key under keyName if it's not +// already registered in the region. EC2 key pairs are region-scoped, so +// this runs once per region. +func ensureAWSKeyPair(ctx context.Context, client *ec2.Client, keyName, publicKey string) error { + if keyName == "" { + return errors.New("SSH key name is required for AWS — set via --ssh-key-name or TALIS_SSH_KEY_NAME") + } + if _, err := client.DescribeKeyPairs(ctx, &ec2.DescribeKeyPairsInput{ + KeyNames: []string{keyName}, + }); err == nil { + return nil + } + // Any error is treated as "not found"; let ImportKeyPair surface the + // real problem if something else is wrong. + if _, err := client.ImportKeyPair(ctx, &ec2.ImportKeyPairInput{ + KeyName: aws.String(keyName), + PublicKeyMaterial: []byte(strings.TrimSpace(publicKey)), + }); err != nil { + // `talis up` parallelises CreateAWSInstances, so each + // goroutine races to import the same key. Only one wins; + // the rest see Duplicate. Treat that as success — the key + // is now available for everyone. + if strings.Contains(err.Error(), "InvalidKeyPair.Duplicate") { + return nil + } + return fmt.Errorf("import key pair: %w", err) + } + return nil +} + +// ensureAWSSecurityGroup creates (or looks up) a security group in the +// region's default VPC that allows all inbound traffic from 0.0.0.0/0 — +// same posture as the GCP firewall rule. +func ensureAWSSecurityGroup(ctx context.Context, client *ec2.Client) (string, error) { + vpcID, err := defaultVPCID(ctx, client) + if err != nil { + return "", err + } + + desc, err := client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("group-name"), Values: []string{AWSSecurityGroupName}}, + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + }, + }) + if err == nil && len(desc.SecurityGroups) > 0 && desc.SecurityGroups[0].GroupId != nil { + return *desc.SecurityGroups[0].GroupId, nil + } + + create, err := client.CreateSecurityGroup(ctx, &ec2.CreateSecurityGroupInput{ + GroupName: aws.String(AWSSecurityGroupName), + Description: aws.String("Talis: allow all inbound traffic on all ports"), + VpcId: aws.String(vpcID), + }) + if err != nil { + // Another goroutine may have raced us; try to look it up again. + if desc2, err2 := client.DescribeSecurityGroups(ctx, &ec2.DescribeSecurityGroupsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("group-name"), Values: []string{AWSSecurityGroupName}}, + {Name: aws.String("vpc-id"), Values: []string{vpcID}}, + }, + }); err2 == nil && len(desc2.SecurityGroups) > 0 && desc2.SecurityGroups[0].GroupId != nil { + return *desc2.SecurityGroups[0].GroupId, nil + } + return "", fmt.Errorf("create security group: %w", err) + } + if create.GroupId == nil { + return "", fmt.Errorf("CreateSecurityGroup returned empty group id") + } + groupID := *create.GroupId + + if _, err := client.AuthorizeSecurityGroupIngress(ctx, &ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(groupID), + IpPermissions: []ec2types.IpPermission{{ + IpProtocol: aws.String("-1"), // all protocols + IpRanges: []ec2types.IpRange{{CidrIp: aws.String("0.0.0.0/0")}}, + }}, + }); err != nil && !strings.Contains(err.Error(), "InvalidPermission.Duplicate") { + return "", fmt.Errorf("authorize ingress: %w", err) + } + return groupID, nil +} + +// ensureAWSPlacementGroup creates a cluster placement group in the +// region if one doesn't already exist. Idempotent and race-safe. +func ensureAWSPlacementGroup(ctx context.Context, client *ec2.Client) error { + out, err := client.DescribePlacementGroups(ctx, &ec2.DescribePlacementGroupsInput{ + GroupNames: []string{AWSPlacementGroupName}, + }) + if err == nil && len(out.PlacementGroups) > 0 { + return nil + } + if _, err := client.CreatePlacementGroup(ctx, &ec2.CreatePlacementGroupInput{ + GroupName: aws.String(AWSPlacementGroupName), + Strategy: ec2types.PlacementStrategyCluster, + }); err != nil && !strings.Contains(err.Error(), "InvalidPlacementGroup.Duplicate") { + return fmt.Errorf("create placement group: %w", err) + } + return nil +} + +// defaultSubnetInAZ returns the SubnetId of the default VPC's default +// subnet in the given AZ. Relies on default-VPC semantics (every account +// has one unless explicitly deleted) rather than managing subnets. +func defaultSubnetInAZ(ctx context.Context, client *ec2.Client, az string) (string, error) { + out, err := client.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("default-for-az"), Values: []string{"true"}}, + {Name: aws.String("availability-zone"), Values: []string{az}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe subnets: %w", err) + } + if len(out.Subnets) == 0 || out.Subnets[0].SubnetId == nil { + return "", fmt.Errorf("no default subnet in %s — the account may be missing a default VPC/subnet", az) + } + return *out.Subnets[0].SubnetId, nil +} + +func defaultVPCID(ctx context.Context, client *ec2.Client) (string, error) { + out, err := client.DescribeVpcs(ctx, &ec2.DescribeVpcsInput{ + Filters: []ec2types.Filter{ + {Name: aws.String("is-default"), Values: []string{"true"}}, + }, + }) + if err != nil { + return "", fmt.Errorf("describe default VPC: %w", err) + } + if len(out.Vpcs) == 0 || out.Vpcs[0].VpcId == nil { + return "", fmt.Errorf("no default VPC found — create one or extend talis to accept an explicit VPC") + } + return *out.Vpcs[0].VpcId, nil +} + +func awsTagsFromInstance(inst Instance) []ec2types.Tag { + tags := make([]ec2types.Tag, 0, len(inst.Tags)+1) + tags = append(tags, ec2types.Tag{Key: aws.String("Name"), Value: aws.String(inst.Name)}) + for _, t := range inst.Tags { + tags = append(tags, ec2types.Tag{Key: aws.String(t), Value: aws.String("true")}) + } + return tags +} + +func instanceNameFromTags(tags []ec2types.Tag) string { + for _, t := range tags { + if t.Key != nil && *t.Key == "Name" && t.Value != nil { + return *t.Value + } + } + return "" +} + +// awsRootSSHUserData returns cloud-init user-data that (1) sets the +// instance hostname to the talis name (validator_init.sh parses +// `hostname` to pick per-validator keys — AWS's default `ip-172-…` +// format breaks that parser), and (2) installs the operator's SSH +// public key into /root/.ssh/authorized_keys so deployment.go can keep +// using `root@`. +func awsRootSSHUserData(sshKey, instanceName string) string { + key := strings.TrimSpace(sshKey) + return fmt.Sprintf(`#cloud-config +disable_root: false +preserve_hostname: false +hostname: %s +fqdn: %s +runcmd: + - hostnamectl set-hostname %s + - mkdir -p /root/.ssh + - 'echo "%s" > /root/.ssh/authorized_keys' + - chmod 700 /root/.ssh + - chmod 600 /root/.ssh/authorized_keys + - chown -R root:root /root/.ssh +`, + instanceName, + instanceName, + instanceName, + strings.ReplaceAll(key, `"`, `\"`), + ) +} diff --git a/tools/talis/client.go b/tools/talis/client.go new file mode 100644 index 0000000000..5402d86bcd --- /dev/null +++ b/tools/talis/client.go @@ -0,0 +1,232 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "os" + + "github.com/digitalocean/godo" + "golang.org/x/oauth2" +) + +const ( + DODropletLimit = 100 +) + +type Client interface { + Up(ctx context.Context, workers int) error + Down(ctx context.Context, workers int) error + List(ctx context.Context) error + GetConfig() Config +} + +type ClientInfo struct { + sshKey []byte + cfg Config +} + +type DOClient struct { + ClientInfo + do *godo.Client + doSSHKey godo.Key +} + +func NewClient(cfg Config) (Client, error) { + if cfg.DigitalOceanToken != "" { + return NewDOClient(cfg) + } + if cfg.GoogleCloudProject != "" { + return NewGCClient(cfg) + } + if cfg.AWSRegion != "" { + return NewAWSClient(cfg) + } + return nil, errors.New("no cloud provider credentials found") +} + +func NewDOClient(cfg Config) (*DOClient, error) { + if cfg.DigitalOceanToken == "" { + return nil, errors.New("DigitalOcean token is required") + } + + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + client := godo.NewClient(oauth2.NewClient(context.Background(), tokenSource)) + + if client == nil { + return nil, errors.New("failed to create DigitalOcean client") + } + + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at: %s %w", cfg.SSHPubKeyPath, err) + } + + key, err := GetDOSSHKeyMeta(context.Background(), client, string(sshKey)) + if err != nil { + return nil, fmt.Errorf("failed to get SSH key ID: %w", err) + } + + return &DOClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + do: client, + doSSHKey: key, + }, nil +} + +func (c *DOClient) Up(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != DigitalOcean { + log.Println("unexpectedly skipping instance since only DO is supported", v.Name, "in region", v.Region) + continue + } + + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomDORegion() + } + + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + // Check if spinning up these instances would exceed the 100-droplet limit + currentCount, err := c.countRunningDroplets(ctx) + if err != nil { + log.Printf("⚠️ Warning: failed to count running droplets: %v", err) + } else { + totalAfterUp := currentCount + len(insts) + if totalAfterUp > DODropletLimit { + excess := totalAfterUp - DODropletLimit + return fmt.Errorf("cannot spin up %d instances: would exceed DigitalOcean's %d droplet limit (currently %d running, would be %d total). Please reduce the number of instances by %d", len(insts), DODropletLimit, currentCount, totalAfterUp, excess) + } + } + + insts, err = CreateDroplets(ctx, c.do, insts, c.doSSHKey, workers) + if err != nil { + return fmt.Errorf("failed to create droplets: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + + return err +} + +func (c *DOClient) countRunningDroplets(ctx context.Context) (int, error) { + opts := &godo.ListOptions{} + count := 0 + for { + droplets, resp, err := c.do.Droplets.List(ctx, opts) + if err != nil { + return 0, fmt.Errorf("failed to list droplets: %w", err) + } + + count += len(droplets) + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + page, err := resp.Links.CurrentPage() + if err != nil { + return 0, fmt.Errorf("failed to paginate droplets list: %w", err) + } + + opts.Page = page + 1 + } + + return count, nil +} + +func (c *DOClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != DigitalOcean { + log.Println("unexpectedly skipping instance since only DO is supported", v.Name, "in region", v.Region) + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomDORegion() + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + + _, err := DestroyDroplets(ctx, c.do, insts, workers) + return err +} + +func (c *DOClient) List(ctx context.Context) error { + opts := &godo.ListOptions{} + cnt := 0 + for { + droplets, resp, err := c.do.Droplets.List(ctx, opts) + if err != nil { + return fmt.Errorf("failed to list droplets: %w", err) + } + + for _, droplet := range droplets { + if hasAllTags(droplet.Tags, []string{"talis"}) { + publicIP := "" + privateIP := "" + if len(droplet.Networks.V4) > 0 { + for _, network := range droplet.Networks.V4 { + if network.Type == "public" && publicIP == "" { + publicIP = network.IPAddress + } + if network.Type == "private" && privateIP == "" { + privateIP = network.IPAddress + } + } + } + + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Region", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + droplet.Name, + droplet.Status, + droplet.Region.Slug, + publicIP, + droplet.Created) + cnt++ + } + } + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + page, err := resp.Links.CurrentPage() + if err != nil { + return fmt.Errorf("failed to paginate droplets list: %w", err) + } + + opts.Page = page + 1 + } + + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *DOClient) GetConfig() Config { + return c.cfg +} diff --git a/tools/talis/cmd/evnode-txsim/main.go b/tools/talis/cmd/evnode-txsim/main.go new file mode 100644 index 0000000000..d8fd1a9435 --- /dev/null +++ b/tools/talis/cmd/evnode-txsim/main.go @@ -0,0 +1,242 @@ +// Command evnode-txsim drives the ev-node aggregator's HTTP /tx endpoint +// at a fixed rate for a fixed duration. Stdlib-only; deployed by talis +// onto a dedicated load-gen instance that lives separately from +// ev-node and bridge boxes so its own CPU / network do not bias the +// measurement. +// +// Output format (final line, machine-grep'able): +// +// TXSIM: target=http://X:7777/tx duration=30s tx_size=10240 +// concurrency=8 sent=300000 ok=300000 err=0 +// wall_s=30.00 sent_per_s=10000 mb_per_s=97.66 +// rtt_p50_us=145 rtt_p99_us=820 +// +// Concurrency model: N goroutines, each posting txs at most as fast as +// the server accepts them. There's no client-side rate cap by design — +// the goal is to back-pressure the server and measure its absorption +// rate, not to simulate a paced client. +package main + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/binary" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sort" + "sync" + "sync/atomic" + "syscall" + "time" +) + +type cliFlags struct { + target string + txSize int + concurrency int + duration time.Duration + timeout time.Duration + verbose bool +} + +func parseFlags() cliFlags { + var c cliFlags + flag.StringVar(&c.target, "target", envOr("TARGET", "http://127.0.0.1:7777/tx"), + "ev-node tx-ingest endpoint (POST raw bytes)") + flag.IntVar(&c.txSize, "tx-size", intFromEnv("TX_SIZE", 10*1024), + "per-tx payload size in bytes") + flag.IntVar(&c.concurrency, "concurrency", intFromEnv("CONCURRENCY", 8), + "number of concurrent posters") + flag.DurationVar(&c.duration, "duration", durFromEnv("DURATION", 30*time.Second), + "how long to pump (0 = until SIGTERM)") + flag.DurationVar(&c.timeout, "timeout", durFromEnv("TIMEOUT", 5*time.Second), + "per-request HTTP timeout") + flag.BoolVar(&c.verbose, "verbose", false, "log every error to stderr") + flag.Parse() + return c +} + +func envOr(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +func intFromEnv(name string, def int) int { + if v := os.Getenv(name); v != "" { + var n int + if _, err := fmt.Sscanf(v, "%d", &n); err == nil && n > 0 { + return n + } + } + return def +} + +func durFromEnv(name string, def time.Duration) time.Duration { + if v := os.Getenv(name); v != "" { + if d, err := time.ParseDuration(v); err == nil { + return d + } + } + return def +} + +func main() { + cli := parseFlags() + if err := run(cli); err != nil { + fmt.Fprintln(os.Stderr, "fatal:", err) + os.Exit(1) + } +} + +func run(cli cliFlags) error { + if cli.txSize <= 16 { + return fmt.Errorf("--tx-size must be > 16 (header is 16 bytes: seq + emit_time)") + } + + // Pre-fill a randomness pool sized for cheap per-tx sampling. At + // 100 MiB/s of tx bytes, calling rand.Read per tx is itself the + // hot path; sampling from a fixed pool is dramatically cheaper + // and the experiment doesn't care about cryptographic uniqueness. + poolSize := 8 * cli.txSize + if poolSize < (1 << 20) { + poolSize = 1 << 20 + } + pool := make([]byte, poolSize) + if _, err := rand.Read(pool); err != nil { + return fmt.Errorf("seed random pool: %w", err) + } + + httpClient := &http.Client{Timeout: cli.timeout} + + ctx, cancel := context.WithCancel(context.Background()) + if cli.duration > 0 { + ctx, cancel = context.WithTimeout(ctx, cli.duration) + } + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + cancel() + }() + + var ( + sent atomic.Uint64 + ok atomic.Uint64 + fail atomic.Uint64 + totalBytes atomic.Uint64 + ) + + // RTT samples are collected in per-worker buffers and merged at + // the end. With a 30 s pump at 10 KiB/s per worker × 8 workers, + // that's ~30 000 samples per worker, which fits in memory comfortably. + rttBufs := make([][]int64, cli.concurrency) // microseconds + + wg := sync.WaitGroup{} + start := time.Now() + for i := range cli.concurrency { + wg.Add(1) + go func(idx int) { + defer wg.Done() + rtts := make([]int64, 0, 16384) + defer func() { rttBufs[idx] = rtts }() + + buf := make([]byte, cli.txSize) + poolLen := len(pool) + var localSeq uint64 + for { + if ctx.Err() != nil { + return + } + localSeq++ + now := time.Now() + binary.BigEndian.PutUint64(buf, uint64(idx)<<32|localSeq) + binary.BigEndian.PutUint64(buf[8:], uint64(now.UnixNano())) + offset := int((localSeq * 7919) % uint64(poolLen-cli.txSize+16)) + copy(buf[16:], pool[offset:offset+cli.txSize-16]) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, cli.target, bytes.NewReader(buf)) + if err != nil { + sent.Add(1) + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "request build:", err) + } + continue + } + req.Header.Set("Content-Type", "application/octet-stream") + + rttStart := time.Now() + resp, err := httpClient.Do(req) + rtt := time.Since(rttStart) + sent.Add(1) + if err != nil { + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "post:", err) + } + continue + } + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + ok.Add(1) + totalBytes.Add(uint64(cli.txSize)) + } else { + fail.Add(1) + if cli.verbose { + fmt.Fprintln(os.Stderr, "http:", resp.StatusCode) + } + } + rtts = append(rtts, rtt.Microseconds()) + } + }(i) + } + + wg.Wait() + elapsed := time.Since(start) + + // Merge RTT buffers and compute percentiles. Sorting in place is + // fine — the buffer goroutines have all returned by now. + var allRTT []int64 + totalRTT := 0 + for _, b := range rttBufs { + totalRTT += len(b) + } + allRTT = make([]int64, 0, totalRTT) + for _, b := range rttBufs { + allRTT = append(allRTT, b...) + } + sort.Slice(allRTT, func(i, j int) bool { return allRTT[i] < allRTT[j] }) + + p50 := percentileMicros(allRTT, 0.50) + p99 := percentileMicros(allRTT, 0.99) + + mb := float64(totalBytes.Load()) / (1024 * 1024) + mbPerS := mb / elapsed.Seconds() + sentPerS := float64(sent.Load()) / elapsed.Seconds() + + fmt.Printf("TXSIM: target=%s duration=%s tx_size=%d concurrency=%d sent=%d ok=%d err=%d wall_s=%.2f sent_per_s=%.0f mb_per_s=%.2f rtt_p50_us=%d rtt_p99_us=%d\n", + cli.target, cli.duration, cli.txSize, cli.concurrency, + sent.Load(), ok.Load(), fail.Load(), + elapsed.Seconds(), sentPerS, mbPerS, + p50, p99, + ) + return nil +} + +func percentileMicros(sorted []int64, p float64) int64 { + if len(sorted) == 0 { + return 0 + } + idx := int(float64(len(sorted)-1) * p) + return sorted[idx] +} diff --git a/tools/talis/config.go b/tools/talis/config.go new file mode 100644 index 0000000000..b3ec4e3ed5 --- /dev/null +++ b/tools/talis/config.go @@ -0,0 +1,469 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "sync/atomic" +) + +type NodeType string + +const ( + // Validator represents a validator node in the network. + Validator NodeType = "validator" + // Bridge represents a bridge node in the network. + Bridge NodeType = "bridge" + // Light represents a light node in the network. + Light NodeType = "light" + // Observability represents a observability monitoring node for Prometheus/Grafana. + Observability NodeType = "observability" + // Encoder represents a dedicated fibre-txsim encoder node. + Encoder NodeType = "encoder" + + // Evnode represents an ev-node aggregator wired to celestia-node-fiber. + // Runs the evnode-fibre runner from + // tools/celestia-node-fiber/cmd/evnode-fibre. One per experiment in + // the smallest topology. + Evnode NodeType = "evnode" + + // Loadgen is a dedicated load-generator instance running + // tools/talis/cmd/evnode-txsim. Lives on its own EC2 instance to + // keep its CPU + network footprint from biasing measurements on + // the ev-node box. + Loadgen NodeType = "loadgen" +) + +var ( + valCount = atomic.Uint32{} + nodeCount = atomic.Uint32{} + lightCount = atomic.Uint32{} + observabilityCount = atomic.Uint32{} + encoderCount = atomic.Uint32{} + evnodeCount = atomic.Uint32{} + loadgenCount = atomic.Uint32{} +) + +// NodeName returns the name of the node based on its type and index. The +// name is in the format "-". For example, "validator-0" or +// "bridge-1". Index is a global counter that is incremented for each node created. +func NodeName(nodeType NodeType) string { + index := 0 + switch nodeType { + case Validator: + index = int(valCount.Add(1)) - 1 + case Bridge: + index = int(nodeCount.Add(1)) - 1 + case Light: + index = int(lightCount.Add(1)) - 1 + case Observability: + index = int(observabilityCount.Add(1)) - 1 + case Encoder: + index = int(encoderCount.Add(1)) - 1 + case Evnode: + index = int(evnodeCount.Add(1)) - 1 + case Loadgen: + index = int(loadgenCount.Add(1)) - 1 + default: + panic(fmt.Sprintf("unknown node type: %s", nodeType)) + } + return fmt.Sprintf("%s-%d", nodeType, index) +} + +// Provider simply marks the provider the instance config should target. +type Provider string + +const ( + DigitalOcean Provider = "digitalocean" + GoogleCloud Provider = "googlecloud" + AWS Provider = "aws" +) + +// Instance represents a single instance in the network. It contains +// information about the instance such as its public and private IP address, +// provider, region, and name. It also contains a list of tags that are +// attached to the instance. +type Instance struct { + NodeType NodeType `json:"node_type"` + // PublicIP is the public IP address of the instance. + PublicIP string `json:"public_ip"` + // PrivateIP is the private IP address of the instance. + PrivateIP string `json:"private_ip"` + // Provider is the provider of the instance. For example, "digitalocean" or + // "aws". + Provider Provider `json:"provider"` + // Slug is a provider specific string that determines what type of instance + // the node is ran on. + Slug string `json:"slug"` + // Region is the region in which the instance is created. For example, + // "nyc1" for DigitalOcean or "us-east-1" for AWS. + Region string `json:"region"` + // Zone is the provider-specific availability zone within Region. Empty + // means "any zone". Currently only populated for AWS (e.g. "us-east-1a") + // so instances can be pinned to a single AZ for free intra-AZ traffic + // and minimum latency within a cluster placement group. + Zone string `json:"zone,omitempty"` + // Name is the name of the instance. This is used to identify the instance + // in the network and is also used as the hostname of the instance. It + // therefore should be unique. + Name string `json:"name"` + // Tags are attached to every spun up instance. They are used to identify + // the instance in the network, associate the instance with an experiment + // and network, and mark as a talis instance. + Tags []string `json:"tags"` +} + +func NewBaseInstance(nodeType NodeType) Instance { + name := NodeName(nodeType) + return Instance{ + NodeType: nodeType, + PublicIP: "TBD", + PrivateIP: "TBD", + Name: name, + Tags: []string{"talis"}, + } +} + +func (i Instance) WithExperiment(experimentID, chainID string) Instance { + index := extractIndexFromName(i.Name) + experimentTag := ExperimentTag(i.NodeType, index, experimentID, chainID) + i.Tags = append(i.Tags, experimentTag) + return i +} + +func extractIndexFromName(name string) int { + parts := strings.Split(name, "-") + if len(parts) < 2 { + return 0 + } + index, _ := strconv.Atoi(parts[len(parts)-1]) + return index +} + +func ExperimentTag(nodeType NodeType, index int, experimentID, chainID string) string { + return fmt.Sprintf("%s-%d-%s-%s", nodeType, index, experimentID, chainID) +} + +func GetExperimentTag(tags []string) string { + for _, tag := range tags { + if strings.HasPrefix(tag, "validator-") || strings.HasPrefix(tag, "bridge-") || strings.HasPrefix(tag, "light-") || strings.HasPrefix(tag, "observability-") || strings.HasPrefix(tag, "encoder-") || strings.HasPrefix(tag, "evnode-") || strings.HasPrefix(tag, "loadgen-") { + return tag + } + } + return "" +} + +// Config describes the desired state of the network. +type Config struct { + Validators []Instance `json:"validators"` + Bridges []Instance `json:"bridges,omitempty"` + Lights []Instance `json:"lights,omitempty"` + Observability []Instance `json:"observability,omitempty"` + Encoders []Instance `json:"encoders,omitempty"` + Evnodes []Instance `json:"evnodes,omitempty"` + Loadgens []Instance `json:"loadgens,omitempty"` + + // ChainID is the chain ID of the network. This is used to identify the + // network and is also used as the chain ID of the network. It is + // automatically prefixed with "talis-" by default. This is required to + // increase the square size beyond the v4 limit of 128. + ChainID string `json:"chain_id"` + // Experiment is the experiment ID of the network. This is used to index which experiment + // the network is associated with. + Experiment string `json:"experiment"` + // SSHPubKeyPath is the path to the SSH public key that will be added to + // every instance. + SSHPubKeyPath string `json:"ssh_pub_key_path"` + // SSHKeyName is the name of the SSH key that will be used to access the + // instances. This is used to identify the SSH key in the provider's + // dashboard. If it's not already kept by the provider, the key will be + // added. + SSHKeyName string `json:"ssh_key_name"` + // DigitalOceanToken is used to authenticate with DigitalOcean. It can be + // provided via an env var or flag. + DigitalOceanToken string `json:"digitalocean_token"` + GoogleCloudProject string `json:"google_cloud_project"` + GoogleCloudKeyJSONPath string `json:"google_cloud_key_json_path"` + // AWSRegion is the default region for launching EC2 instances. When set + // (and DigitalOceanToken / GoogleCloudProject are empty), NewClient + // uses AWS as the compute provider. Credentials come from the standard + // AWS SDK credential chain (env vars, ~/.aws/credentials, IAM role). + AWSRegion string `json:"aws_region"` + // AWSZone is the availability zone within AWSRegion. All AWS instances + // get pinned to this AZ + a cluster placement group so intra-cluster + // traffic stays free and latency is minimised. Empty means "default AZ". + AWSZone string `json:"aws_zone"` + S3Config S3Config `json:"s3_config"` +} + +func NewConfig(experiment, chainID string) Config { + return Config{ + Validators: []Instance{}, + Bridges: []Instance{}, + Lights: []Instance{}, + Observability: []Instance{}, + Encoders: []Instance{}, + Evnodes: []Instance{}, + Loadgens: []Instance{}, + Experiment: experiment, + ChainID: TalisChainID(chainID), + S3Config: S3Config{ + AccessKeyID: os.Getenv(EnvVarAWSAccessKeyID), + SecretAccessKey: os.Getenv(EnvVarAWSSecretAccessKey), + BucketName: os.Getenv(EnvVarS3Bucket), + Region: os.Getenv(EnvVarAWSRegion), + Endpoint: os.Getenv(EnvVarS3Endpoint), + }, + } +} + +func (cfg Config) WithSSHPubKeyPath(path string) Config { + cfg.SSHPubKeyPath = path + return cfg +} + +func (cfg Config) WithSSHKeyName(name string) Config { + cfg.SSHKeyName = name + return cfg +} + +func (cfg Config) WithDigitalOceanToken(token string) Config { + cfg.DigitalOceanToken = token + return cfg +} + +func (cfg Config) WithGoogleCloudProject(project string) Config { + cfg.GoogleCloudProject = project + return cfg +} + +func (cfg Config) WithGoogleCloudKeyJSONPath(keyJSONPath string) Config { + cfg.GoogleCloudKeyJSONPath = keyJSONPath + return cfg +} + +func (cfg Config) WithAWSRegion(region string) Config { + cfg.AWSRegion = region + return cfg +} + +func (cfg Config) WithAWSZone(zone string) Config { + cfg.AWSZone = zone + return cfg +} + +func (cfg Config) WithS3Config(s3 S3Config) Config { + cfg.S3Config = s3 + return cfg +} + +func (cfg Config) WithDigitalOceanValidator(region string) Config { + i := NewDigitalOceanValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithDigitalOceanObservability(region string) Config { + i := NewDigitalOceanObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithGoogleCloudValidator(region string) Config { + i := NewGoogleCloudValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithGoogleCloudObservability(region string) Config { + i := NewGoogleCloudObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithDigitalOceanEncoder(region string) Config { + i := NewDigitalOceanEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithGoogleCloudEncoder(region string) Config { + i := NewGoogleCloudEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithAWSValidator(region string) Config { + i := NewAWSValidator(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Validators = append(cfg.Validators, i) + return cfg +} + +func (cfg Config) WithAWSObservability(region string) Config { + i := NewAWSObservability(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Observability = append(cfg.Observability, i) + return cfg +} + +func (cfg Config) WithAWSEncoder(region string) Config { + i := NewAWSEncoder(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Encoders = append(cfg.Encoders, i) + return cfg +} + +func (cfg Config) WithDigitalOceanBridge(region string) Config { + i := NewDigitalOceanBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithGoogleCloudBridge(region string) Config { + i := NewGoogleCloudBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithAWSBridge(region string) Config { + i := NewAWSBridge(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Bridges = append(cfg.Bridges, i) + return cfg +} + +func (cfg Config) WithDigitalOceanEvnode(region string) Config { + i := NewDigitalOceanEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithGoogleCloudEvnode(region string) Config { + i := NewGoogleCloudEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithAWSEvnode(region string) Config { + i := NewAWSEvnode(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Evnodes = append(cfg.Evnodes, i) + return cfg +} + +func (cfg Config) WithDigitalOceanLoadgen(region string) Config { + i := NewDigitalOceanLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithGoogleCloudLoadgen(region string) Config { + i := NewGoogleCloudLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithAWSLoadgen(region string) Config { + i := NewAWSLoadgen(region).WithExperiment(cfg.Experiment, cfg.ChainID) + cfg.Loadgens = append(cfg.Loadgens, i) + return cfg +} + +func (cfg Config) WithChainID(chainID string) Config { + cfg.ChainID = TalisChainID(chainID) + return cfg +} + +func (cfg Config) Save(root string) error { + // Create the directory if it doesn't exist + if err := os.MkdirAll(root, 0o755); err != nil { + return err + } + + // Create the config file path + configFilePath := filepath.Join(root, "config.json") + + cfgFile, err := os.OpenFile(configFilePath, os.O_RDWR|os.O_CREATE|os.O_SYNC, 0o755) + if err != nil { + return err + } + defer cfgFile.Close() + + // Write the config to the file + encoder := json.NewEncoder(cfgFile) + encoder.SetIndent("", " ") + return encoder.Encode(cfg) +} + +// LoadConfig loads the config from the specified path. +func LoadConfig(rootDir string) (Config, error) { + cfgFile, err := os.Open(filepath.Join(rootDir, "config.json")) + if err != nil { + return Config{}, err + } + defer cfgFile.Close() + + var cfg Config + decoder := json.NewDecoder(cfgFile) + if err := decoder.Decode(&cfg); err != nil { + return Config{}, err + } + + return cfg, nil +} + +func TalisChainID(chainID string) string { + return "talis-" + chainID +} + +func (cfg Config) UpdateInstance(name, publicIP, privateIP string) (Config, error) { + for i := range cfg.Validators { + if cfg.Validators[i].Name == name { + cfg.Validators[i].PublicIP = publicIP + cfg.Validators[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Bridges { + if cfg.Bridges[i].Name == name { + cfg.Bridges[i].PublicIP = publicIP + cfg.Bridges[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Lights { + if cfg.Lights[i].Name == name { + cfg.Lights[i].PublicIP = publicIP + cfg.Lights[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Observability { + if cfg.Observability[i].Name == name { + cfg.Observability[i].PublicIP = publicIP + cfg.Observability[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Encoders { + if cfg.Encoders[i].Name == name { + cfg.Encoders[i].PublicIP = publicIP + cfg.Encoders[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Evnodes { + if cfg.Evnodes[i].Name == name { + cfg.Evnodes[i].PublicIP = publicIP + cfg.Evnodes[i].PrivateIP = privateIP + return cfg, nil + } + } + for i := range cfg.Loadgens { + if cfg.Loadgens[i].Name == name { + cfg.Loadgens[i].PublicIP = publicIP + cfg.Loadgens[i].PrivateIP = privateIP + return cfg, nil + } + } + return cfg, fmt.Errorf("instance %s not found", name) +} diff --git a/tools/talis/deployment.go b/tools/talis/deployment.go new file mode 100644 index 0000000000..72d0cfebee --- /dev/null +++ b/tools/talis/deployment.go @@ -0,0 +1,928 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/digitalocean/godo" + "github.com/spf13/cobra" + "golang.org/x/oauth2" +) + +func upCmd() *cobra.Command { + var rootDir string + var cfgPath string + var SSHPubKeyPath string + var SSHKeyName string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + var workers int + + cmd := &cobra.Command{ + Use: "up", + Short: "Uses the config to spin up a distributed network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.SSHKeyName = resolveValue(SSHKeyName, EnvVarSSHKeyName, cfg.SSHKeyName) + cfg.SSHPubKeyPath = resolveValue(SSHPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + if err := checkForRunningExperiments(cmd.Context(), cfg); err != nil { + return err + } + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + if err := client.Up(cmd.Context(), workers); err != nil { + return fmt.Errorf("failed to spin up network: %w", err) + } + + if err := client.GetConfig().Save(rootDir); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", "", "path to the user's SSH public key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", "", "name for the SSH key") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} + +func deployCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + directUpload bool + ignoreFailed bool + workers int + ) + + cmd := &cobra.Command{ + Use: "deploy", + Short: "Uses the config to spin up a distributed network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + tarPath := filepath.Join(rootDir, "payload.tar.gz") + log.Printf("Compressing payload to %s\n", tarPath) + tarCmd := exec.Command("tar", "-czf", tarPath, "-C", rootDir, "payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") // suppress macOS ._* resource-fork files + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress payload: %w, output: %s", err, string(output)) + } + log.Printf("✅ Payload compressed to %s\n", tarPath) + + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + log.Printf("Sending payload to validators...") + if directUpload { + if err := deployPayloadDirect(cfg.Validators, tarPath, SSHKeyPath, "/root", "payload/validator_init.sh", 7*time.Minute, workers); err != nil { + if !ignoreFailed { + return err + } + log.Printf("continuing despite validator deployment errors: %v", err) + } + if err := deployObservabilityIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload); err != nil { + return err + } + if err := deployEncodersIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployBridgesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployEvnodesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + return deployLoadgensIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers) + } + if err := deployPayloadViaS3(cmd.Context(), rootDir, cfg.Validators, tarPath, SSHKeyPath, "/root", "payload/validator_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + if !ignoreFailed { + return err + } + log.Printf("continuing despite validator deployment errors: %v", err) + } + if err := deployObservabilityIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload); err != nil { + return err + } + if err := deployEncodersIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployBridgesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + if err := deployEvnodesIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers); err != nil { + return err + } + return deployLoadgensIfConfigured(cmd.Context(), cfg, rootDir, SSHKeyPath, directUpload, workers) + }, + } + + homeDir, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get user home directory: %v", err) + } + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-pub-key-path", "s", defaultKeyPath, "path to the user's SSH key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().BoolVar(&directUpload, "direct-payload-upload", false, "Upload payload directly to nodes instead of using S3") + cmd.Flags().BoolVar(&ignoreFailed, "ignore-failed-validators", false, "Continue deploying observability monitoring even if some validators fail") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} + +func deployObservabilityIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool) error { + if len(cfg.Observability) == 0 { + return nil + } + + observabilityNode := cfg.Observability[0] + + observabilityTarPath := filepath.Join(rootDir, "observability-payload.tar.gz") + log.Printf("Compressing observability payload to %s\n", observabilityTarPath) + tarCmd := exec.Command("tar", "-czf", observabilityTarPath, "-C", filepath.Join(rootDir, "payload"), "observability") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") // suppress macOS ._* resource-fork files + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress observability payload: %w, output: %s", err, string(output)) + } + log.Printf("✅ Observability payload compressed to %s\n", observabilityTarPath) + + log.Printf("Sending observability payload to observability monitoring node...") + var err error + if directUpload { + err = deployObservabilityPayloadDirect(observabilityNode, observabilityTarPath, sshKeyPath, "/root", 15*time.Minute) + } else { + err = deployObservabilityPayloadViaS3(ctx, rootDir, observabilityNode, observabilityTarPath, sshKeyPath, "/root", 15*time.Minute, cfg.S3Config) + } + if err != nil { + return err + } + + printGrafanaInfo(observabilityNode, rootDir) + return nil +} + +// deployBridgesIfConfigured tars the bridge-payload directory (celestia +// binary + genesis + bridge_init.sh) and ships it to each bridge +// instance. The init script then runs `celestia bridge init/start` in +// a tmux session and generates a JWT to /root/bridge-jwt.txt. +func deployBridgesIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Bridges) == 0 { + return nil + } + + bridgePayloadDir := filepath.Join(rootDir, "bridge-payload") + if _, err := os.Stat(bridgePayloadDir); os.IsNotExist(err) { + return fmt.Errorf("bridge-payload directory not found — run 'talis genesis' first") + } + + bridgeTarPath := filepath.Join(rootDir, "bridge-payload.tar.gz") + log.Printf("Compressing bridge payload to %s\n", bridgeTarPath) + tarCmd := exec.Command("tar", "-czf", bridgeTarPath, "-C", rootDir, "bridge-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress bridge payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending bridge payload to %d bridge(s)...\n", len(cfg.Bridges)) + + if directUpload { + if err := deployPayloadDirect(cfg.Bridges, bridgeTarPath, sshKeyPath, "/root", "bridge-payload/bridge_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("bridge deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Bridges, bridgeTarPath, sshKeyPath, "/root", "bridge-payload/bridge_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("bridge deployment: %w", err) + } + } + + log.Printf("Bridge deployment complete\n") + return nil +} + +// deployLoadgensIfConfigured tars the loadgen-payload directory +// (evnode-txsim binary + templated init script) and ships it to each +// load-gen instance. The init script poll-waits for ev-node's /stats +// endpoint to become reachable, then bursts traffic at /tx for the +// configured duration, writing a final TXSIM: line to /root/txsim.log. +func deployLoadgensIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Loadgens) == 0 { + return nil + } + + lgPayloadDir := filepath.Join(rootDir, "loadgen-payload") + if _, err := os.Stat(lgPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("loadgen-payload directory not found — run 'talis genesis' first") + } + + lgTarPath := filepath.Join(rootDir, "loadgen-payload.tar.gz") + log.Printf("Compressing loadgen payload to %s\n", lgTarPath) + tarCmd := exec.Command("tar", "-czf", lgTarPath, "-C", rootDir, "loadgen-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress loadgen payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending loadgen payload to %d loadgen(s)...\n", len(cfg.Loadgens)) + + if directUpload { + if err := deployPayloadDirect(cfg.Loadgens, lgTarPath, sshKeyPath, "/root", "loadgen-payload/loadgen_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("loadgen deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Loadgens, lgTarPath, sshKeyPath, "/root", "loadgen-payload/loadgen_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("loadgen deployment: %w", err) + } + } + + log.Printf("loadgen deployment complete (init script will poll-wait for ev-node /stats then start txsim)\n") + return nil +} + +// deployEvnodesIfConfigured tars the evnode-payload directory (evnode +// binary + templated init script) and ships it to each ev-node +// instance. The init script poll-waits for the bridge JWT + fibre +// keyring, both scp'd in separately, before starting the daemon. +func deployEvnodesIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Evnodes) == 0 { + return nil + } + + evPayloadDir := filepath.Join(rootDir, "evnode-payload") + if _, err := os.Stat(evPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("evnode-payload directory not found — run 'talis genesis' first") + } + + evTarPath := filepath.Join(rootDir, "evnode-payload.tar.gz") + log.Printf("Compressing evnode payload to %s\n", evTarPath) + tarCmd := exec.Command("tar", "-czf", evTarPath, "-C", rootDir, "evnode-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress evnode payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending evnode payload to %d evnode(s)...\n", len(cfg.Evnodes)) + + if directUpload { + if err := deployPayloadDirect(cfg.Evnodes, evTarPath, sshKeyPath, "/root", "evnode-payload/evnode_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("evnode deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Evnodes, evTarPath, sshKeyPath, "/root", "evnode-payload/evnode_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("evnode deployment: %w", err) + } + } + + log.Printf("evnode deployment complete (init script will poll-wait for bridge JWT + fibre keyring on each box)\n") + return nil +} + +// deployEncodersIfConfigured creates a lightweight encoder-payload tar and deploys +// it to all configured encoder instances. +func deployEncodersIfConfigured(ctx context.Context, cfg Config, rootDir, sshKeyPath string, directUpload bool, workers int) error { + if len(cfg.Encoders) == 0 { + return nil + } + + encoderPayloadDir := filepath.Join(rootDir, "encoder-payload") + if _, err := os.Stat(encoderPayloadDir); os.IsNotExist(err) { + return fmt.Errorf("encoder-payload directory not found — run 'talis genesis' first") + } + + encoderTarPath := filepath.Join(rootDir, "encoder-payload.tar.gz") + log.Printf("Compressing encoder payload to %s\n", encoderTarPath) + tarCmd := exec.Command("tar", "-czf", encoderTarPath, "-C", rootDir, "encoder-payload") + tarCmd.Env = append(os.Environ(), "COPYFILE_DISABLE=1") + if output, err := tarCmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to compress encoder payload: %w, output: %s", err, string(output)) + } + log.Printf("Sending encoder payload to %d encoder(s)...\n", len(cfg.Encoders)) + + if directUpload { + if err := deployPayloadDirect(cfg.Encoders, encoderTarPath, sshKeyPath, "/root", "encoder-payload/encoder_init.sh", 7*time.Minute, workers); err != nil { + return fmt.Errorf("encoder deployment: %w", err) + } + } else { + if err := deployPayloadViaS3(ctx, rootDir, cfg.Encoders, encoderTarPath, sshKeyPath, "/root", "encoder-payload/encoder_init.sh", 7*time.Minute, cfg.S3Config, workers); err != nil { + return fmt.Errorf("encoder deployment: %w", err) + } + } + + log.Printf("Encoder deployment complete\n") + return nil +} + +// printGrafanaInfo prints the Grafana URL and a pointer to where credentials can be found. +func printGrafanaInfo(node Instance, rootDir string) { + envPath := filepath.Join(rootDir, "payload", "observability", "docker", ".env") + fmt.Println() + fmt.Println("Grafana available at:") + fmt.Printf(" http://%s:3000\n", node.PublicIP) + fmt.Printf(" Credentials: admin / \n", envPath) + fmt.Println() +} + +// deployPayloadDirect copies a local archive to each remote host, unpacks it, +// and launches the specified remote script inside a detached tmux session. +// It runs all operations in parallel and returns an error if any host fails. +func deployPayloadDirect( + ips []Instance, + archivePath string, // e.g. "./payload.tar.gz" + sshKeyPath string, // e.g. "~/.ssh/id_ed25519" + remoteDir string, // e.g. "/root" + remoteScript string, // e.g. "start.sh" + timeout time.Duration, // per‐host timeout + workers int, // number of concurrent workers +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(ips)) + archiveFile := path.Base(archivePath) + + counter := atomic.Uint32{} + + workerChan := make(chan struct{}, workers) + for _, inst := range ips { + workerChan <- struct{}{} + wg.Add(1) + go func(inst Instance) { + defer func() { + <-workerChan + wg.Done() + }() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + archivePath, + fmt.Sprintf("root@%s:%s/", inst.PublicIP, remoteDir), + ) + if out, err := scp.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] scp error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + + log.Printf("sent payload to instance 📦 %s: %s\n", inst.Name, inst.PublicIP) + + remoteCmd := strings.Join([]string{ + // unpack + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + // make sure script is executable + fmt.Sprintf("chmod +x %s", filepath.Join(remoteDir, remoteScript)), + // start in a named, detached tmux session + fmt.Sprintf("tmux new-session -d -s app '%s'", filepath.Join(remoteDir, remoteScript)), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + log.Printf("started instance ✅ %s: %s (total %d/%d)\n", inst.Name, inst.PublicIP, counter.Add(1), len(ips)) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("deployment errors:\n") + for _, e := range errs { + sb.WriteString("- " + e.Error() + "\n") + } + return errors.New(sb.String()) + } + return nil +} + +// deployPayloadViaS3 uploads the payload to S3 first, then has each node download it +func deployPayloadViaS3( + ctx context.Context, + rootDir string, + ips []Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + remoteScript string, + timeout time.Duration, + s3cfg S3Config, + workers int, +) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + s3Client, err := createS3Client(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + + log.Printf("Uploading payload to S3...\n") + s3URL, err := uploadToS3(ctx, s3Client, s3cfg, archivePath) + if err != nil { + return fmt.Errorf("failed to upload to S3: %w", err) + } + + log.Printf("✅ Payload uploaded to S3: %s\n", s3URL) + + var wg sync.WaitGroup + errCh := make(chan error, len(ips)) + counter := atomic.Uint32{} + workersChan := make(chan struct{}, workers) + + for _, inst := range ips { + wg.Add(1) + go func(inst Instance) { + workersChan <- struct{}{} + defer func() { + wg.Done() + <-workersChan + }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + archiveFile := filepath.Base(archivePath) + remoteCmd := strings.Join([]string{ + fmt.Sprintf("curl -L '%s' -o %s", s3URL, filepath.Join(remoteDir, archiveFile)), + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s", filepath.Join(remoteDir, remoteScript)), + fmt.Sprintf("tmux new-session -d -s app '%s'", filepath.Join(remoteDir, remoteScript)), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + log.Printf("started instance ✅ %s: %s (total %d/%d)\n", inst.Name, inst.PublicIP, counter.Add(1), len(ips)) + }(inst) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0) + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("deployment errors:\n") + for _, e := range errs { + sb.WriteString("- " + e.Error() + "\n") + } + return errors.New(sb.String()) + } + return nil +} + +// deployObservabilityPayloadDirect copies an observability archive to the observability monitoring host, unpacks it, +// installs prerequisites, and launches the observability stack in a detached tmux session. +func deployObservabilityPayloadDirect( + inst Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + timeout time.Duration, +) error { + archiveFile := path.Base(archivePath) + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + archivePath, + fmt.Sprintf("root@%s:%s/", inst.PublicIP, remoteDir), + ) + if out, err := scp.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] scp error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + + log.Printf("sent observability payload to instance 📦 %s: %s\n", inst.Name, inst.PublicIP) + + remoteCmd := strings.Join([]string{ + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s %s", + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + ), + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + log.Printf("started observability instance ✅ %s: %s\n", inst.Name, inst.PublicIP) + + return nil +} + +// deployObservabilityPayloadViaS3 uploads the observability payload to S3 first, then has the node download it. +func deployObservabilityPayloadViaS3( + ctx context.Context, + rootDir string, + inst Instance, + archivePath string, + sshKeyPath string, + remoteDir string, + timeout time.Duration, + s3cfg S3Config, +) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + s3Client, err := createS3Client(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + + log.Printf("Uploading observability payload to S3...\n") + s3URL, err := uploadToS3(ctx, s3Client, s3cfg, archivePath) + if err != nil { + return fmt.Errorf("failed to upload observability payload to S3: %w", err) + } + + log.Printf("✅ Observability payload uploaded to S3: %s\n", s3URL) + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + archiveFile := filepath.Base(archivePath) + remoteCmd := strings.Join([]string{ + fmt.Sprintf("curl -L '%s' -o %s", s3URL, filepath.Join(remoteDir, archiveFile)), + fmt.Sprintf("tar -xzf %s -C %s", filepath.Join(remoteDir, archiveFile), remoteDir), + fmt.Sprintf("chmod +x %s %s", + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + ), + filepath.Join(remoteDir, "observability/install_metrics.sh"), + filepath.Join(remoteDir, "observability/start_metrics.sh"), + }, " && ") + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + remoteCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("[%s:%s] ssh error in region %s: %v\n%s", inst.Name, inst.PublicIP, inst.Region, err, out) + } + log.Printf("started observability instance ✅ %s: %s\n", inst.Name, inst.PublicIP) + + return nil +} + +func uploadToS3(ctx context.Context, client *s3.Client, cfg S3Config, localPath string) (string, error) { + file, err := os.Open(localPath) + if err != nil { + return "", fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + filename := filepath.Base(localPath) + uploader := manager.NewUploader(client) + + if _, err := uploader.Upload(ctx, &s3.PutObjectInput{ + Bucket: &cfg.BucketName, + Key: &filename, + Body: file, + }); err != nil { + return "", fmt.Errorf("failed to upload file: %w", err) + } + + // Return a presigned GET URL valid for an hour so remote hosts can curl + // the object without the bucket/object needing public-read ACLs. Works + // for real AWS S3 (where public access is blocked by default) and for + // S3-compatible providers like DigitalOcean Spaces. + presign := s3.NewPresignClient(client) + req, err := presign.PresignGetObject(ctx, &s3.GetObjectInput{ + Bucket: &cfg.BucketName, + Key: &filename, + }, s3.WithPresignExpires(time.Hour)) + if err != nil { + return "", fmt.Errorf("failed to presign GET: %w", err) + } + + return req.URL, nil +} + +func downCmd() *cobra.Command { + var rootDir string + var cfgPath string + var SSHPubKeyPath string + var SSHKeyName string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + var workers int + var all bool + + cmd := &cobra.Command{ + Use: "down", + Short: "Uses the config to spin down a distributed network", + Long: "Destroys the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil && !all { + return fmt.Errorf("failed to load config: %w", err) + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + if all { + return destroyAllInstances(cmd.Context(), cfg, workers) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + cfg.SSHKeyName = resolveValue(SSHKeyName, EnvVarSSHKeyName, cfg.SSHKeyName) + cfg.SSHPubKeyPath = resolveValue(SSHPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + if err := client.Down(cmd.Context(), workers); err != nil { + return fmt.Errorf("failed to spin down network: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", "", "path to the user's SSH public key") + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", "", "name for the SSH key") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + cmd.Flags().BoolVar(&all, "all", false, "destroy all talis instances across all providers and all experiments") + + return cmd +} + +// resolveValue selects a value based on priority: flag > env > config +func resolveValue(flagVal, envKey, configVal string) string { + if flagVal != "" { + return flagVal + } + if env := os.Getenv(envKey); env != "" { + if configVal != "" { + log.Printf("Using %s from environment variable instead of config", envKey) + } + return env + } + return configVal +} + +func listCmd() *cobra.Command { + var rootDir string + var cfgPath string + var DOAPIToken string + var GCProject string + var GCKeyJSONPath string + var AWSRegion string + + cmd := &cobra.Command{ + Use: "list", + Short: "Lists the instances in the network", + Long: "Lists the instances in the network. Can be used to see if someone is running experiments at the moment", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + // overwrite the config values if flags or env vars are set + // flag > env > config + cfg.DigitalOceanToken = resolveValue(DOAPIToken, EnvVarDigitalOceanToken, cfg.DigitalOceanToken) + cfg.GoogleCloudProject = resolveValue(GCProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(GCKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + cfg.AWSRegion = resolveValue(AWSRegion, EnvVarAWSRegion, cfg.AWSRegion) + + client, err := NewClient(cfg) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + return client.List(cmd.Context()) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") + cmd.Flags().StringVarP(&DOAPIToken, "do-api-token", "t", "", "digital ocean api token (defaults to config or env)") + cmd.Flags().StringVar(&GCProject, "gc-project", "", "google cloud project (defaults to config or env)") + cmd.Flags().StringVar(&GCKeyJSONPath, "gc-key-json-path", "", "path to google cloud service account key JSON file (defaults to config or env)") + cmd.Flags().StringVar(&AWSRegion, "aws-region", "", "AWS default region for EC2 (defaults to config or AWS_DEFAULT_REGION)") + + return cmd +} + +func checkForRunningExperiments(ctx context.Context, cfg Config) error { + var hasRunningExperiments bool + + if cfg.DigitalOceanToken != "" { + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + doClient := godo.NewClient(oauth2.NewClient(ctx, tokenSource)) + running, err := checkForRunningDOExperiments(ctx, doClient, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check DigitalOcean for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in DigitalOcean", cfg.Experiment, cfg.ChainID) + } + } + + if cfg.GoogleCloudProject != "" { + opts, err := gcClientOptions(cfg) + if err != nil { + log.Printf("⚠️ Warning: failed to create Google Cloud client options: %v", err) + } else { + running, err := checkForRunningGCExperiments(ctx, cfg.GoogleCloudProject, opts, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check Google Cloud for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in Google Cloud", cfg.Experiment, cfg.ChainID) + } + } + } + + if cfg.AWSRegion != "" { + running, err := checkForRunningAWSExperiments(ctx, true, cfg.Experiment, cfg.ChainID) + if err != nil { + log.Printf("⚠️ Warning: failed to check AWS for running experiments: %v", err) + } else if running { + hasRunningExperiments = true + log.Printf("⚠️ Found experiment '%s' with chainID '%s' already running in AWS", cfg.Experiment, cfg.ChainID) + } + } + + if hasRunningExperiments { + return fmt.Errorf("experiment '%s' with chainID '%s' is already running", cfg.Experiment, cfg.ChainID) + } + + return nil +} + +func destroyAllInstances(ctx context.Context, cfg Config, workers int) error { + var wg sync.WaitGroup + // One slot per potential provider goroutine (DO + GCP + AWS). Sized + // to match max writers so a three-way all-fail doesn't deadlock on + // errCh<- (wg.Wait() below blocks on the goroutine, which blocks on + // the channel send if capacity < writers). + errCh := make(chan error, 3) + + if cfg.DigitalOceanToken != "" { + wg.Go(func() { + log.Println("Destroying all DigitalOcean instances...") + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cfg.DigitalOceanToken}) + doClient := godo.NewClient(oauth2.NewClient(ctx, tokenSource)) + if _, err := destroyAllTalisDroplets(ctx, doClient, workers); err != nil { + errCh <- fmt.Errorf("DigitalOcean: %w", err) + } + }) + } + + if cfg.GoogleCloudProject != "" { + wg.Go(func() { + log.Println("Destroying all Google Cloud instances...") + opts, err := gcClientOptions(cfg) + if err != nil { + errCh <- fmt.Errorf("google Cloud client options: %w", err) + return + } + if _, err := destroyAllTalisGCInstances(ctx, cfg.GoogleCloudProject, opts, workers); err != nil { + errCh <- fmt.Errorf("google Cloud: %w", err) + } + }) + } + + if cfg.AWSRegion != "" || os.Getenv(EnvVarAWSAccessKeyID) != "" { + wg.Go(func() { + log.Println("Destroying all AWS instances...") + if _, err := destroyAllTalisAWSInstances(ctx, workers); err != nil { + errCh <- fmt.Errorf("AWS: %w", err) + } + }) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0, 3) + for err := range errCh { + errs = append(errs, err) + } + + if len(errs) > 0 { + var sb strings.Builder + sb.WriteString("errors destroying instances:\n") + for _, err := range errs { + sb.WriteString("- " + err.Error() + "\n") + } + return errors.New(sb.String()) + } + + log.Println("✅ All talis instances destroyed") + return nil +} diff --git a/tools/talis/digital_ocean.go b/tools/talis/digital_ocean.go new file mode 100644 index 0000000000..bb17711d00 --- /dev/null +++ b/tools/talis/digital_ocean.go @@ -0,0 +1,529 @@ +package main + +import ( + "context" + "fmt" + "log" + "math/rand" + "net/http" + "slices" + "strings" + "sync" + "time" + + "github.com/digitalocean/godo" +) + +const ( + DODefaultValidatorSlug = "c2-16vcpu-32gb" + DODefaultEncoderSlug = "c2-8vcpu-16gb" + DODefaultBridgeSlug = "c2-8vcpu-16gb" + DODefaultEvnodeSlug = "c2-8vcpu-16gb" + DODefaultLoadgenSlug = "c2-8vcpu-16gb" + DODefaultObservabilitySlug = "s-2vcpu-4gb" + DODefaultImage = "ubuntu-24-04-x64" + RandomRegion = "random" +) + +var ( + DORegions = []string{ + "nyc1", "nyc3", "tor1", "sfo2", "sfo3", "ams3", "sgp1", "lon1", "fra1", "syd1", + } + + DOLargeRegions = map[string]int{ + "nyc3": 6, "tor1": 6, "sfo2": 2, "sfo3": 6, "ams3": 8, "sgp1": 4, "lon1": 8, "fra1": 6, "syd1": 6, + } + + DOMediumRegions = map[string]int{ + "nyc3": 2, "tor1": 2, "sfo3": 2, "ams3": 2, "lon1": 2, + } + + DOSmallRegions = map[string]int{ + "ams3": 1, "tor1": 1, "nyc3": 1, "lon1": 1, + } +) + +func NewDigitalOceanValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Validator) + i.Provider = DigitalOcean + i.Slug = DODefaultValidatorSlug + i.Region = region + return i +} + +func NewDigitalOceanEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Encoder) + i.Provider = DigitalOcean + i.Slug = DODefaultEncoderSlug + i.Region = region + return i +} + +func NewDigitalOceanBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Bridge) + i.Provider = DigitalOcean + i.Slug = DODefaultBridgeSlug + i.Region = region + return i +} + +func NewDigitalOceanEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Evnode) + i.Provider = DigitalOcean + i.Slug = DODefaultEvnodeSlug + i.Region = region + return i +} + +func NewDigitalOceanLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = DigitalOcean + i.Slug = DODefaultLoadgenSlug + i.Region = region + return i +} + +func NewDigitalOceanObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomDORegion() + } + i := NewBaseInstance(Observability) + i.Provider = DigitalOcean + i.Slug = DODefaultObservabilitySlug + i.Region = region + return i +} + +func RandomDORegion() string { + return DORegions[rand.Intn(len(DORegions))] +} + +// GetDOSSHKeyMeta checks if the provided raw SSH public key is registered in DigitalOcean +// and returns its ID and Name. If not found, returns an error instructing to upload the key. +func GetDOSSHKeyMeta(ctx context.Context, client *godo.Client, publicKey string) (godo.Key, error) { + pubKeySplit := strings.Split(publicKey, " ") + if len(pubKeySplit) <= 1 { + return godo.Key{}, fmt.Errorf("invalid public key format") + } + publicKey = strings.Join(pubKeySplit[:2], "") + + // Pagination options + opt := &godo.ListOptions{PerPage: 200} + + for { + keys, resp, err := client.Keys.List(ctx, opt) + if err != nil { + return godo.Key{}, fmt.Errorf("failed to list SSH keys: %w", err) + } + + for _, key := range keys { + // only compare the first two parts of the public key. The third part is the host + // which can be ignored. + if strings.Join(strings.Split(key.PublicKey, " ")[:2], "") == publicKey { + return key, nil + } + } + + // Break if we're at the last page + if resp.Links.IsLastPage() { + break + } + // Advance to next page + page, err := resp.Links.CurrentPage() + if err != nil { + return godo.Key{}, fmt.Errorf("unable to parse pagination: %w", err) + } + opt.Page = page + 1 + } + + return godo.Key{}, fmt.Errorf( + "ssh public key not found in DigitalOcean. Please upload it via the control panel or API before proceeding", + ) +} + +// CreateDroplets launches all droplets in parallel, waits for their IPs, and +// returns the filled-out []Instance slice. +func CreateDroplets(ctx context.Context, client *godo.Client, insts []Instance, key godo.Key, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingInstances(ctx, client, insts) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + total := len(insts) + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(total) + + for _, v := range insts { + go func() { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + req := &godo.DropletCreateRequest{ + Name: v.Name, + Region: v.Region, + Size: v.Slug, + Image: godo.DropletCreateImage{ + Slug: DODefaultImage, + }, + SSHKeys: []godo.DropletCreateSSHKey{{ID: key.ID, Fingerprint: key.Fingerprint}}, + Tags: v.Tags, + } + + start := time.Now() + + log.Println("Creating droplet", v.Name, "in region", v.Region, start.Format(time.RFC3339)) + + d, _, err := client.Droplets.Create(ctx, req) + if err != nil { + results <- result{inst: v, err: fmt.Errorf("create %s: %w", v.Name, err)} + return + } + + pubIP, privIP, err := waitForNetworkIP(ctx, client, d.ID) + if err != nil { + results <- result{inst: v, err: fmt.Errorf("public IP %s: %w", v.Name, err)} + return + } + + v.PublicIP = pubIP + v.PrivateIP = privIP + results <- result{inst: v, err: nil, timeRequired: time.Since(start)} + }() + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", + res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + + return created, nil +} + +func filterExistingInstances(ctx context.Context, client *godo.Client, insts []Instance) ([]Instance, []Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, nil, fmt.Errorf("listing before delete: %w", err) + } + + var existing []Instance //nolint:prealloc + var newInsts []Instance //nolint:prealloc + for _, inst := range insts { + var exists bool + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" { + newInsts = append(newInsts, inst) + continue + } + for _, d := range droplets { + if slices.Contains(d.Tags, experimentTag) { + exists = true + break + } + } + + if !exists { + newInsts = append(newInsts, inst) + continue + } + + existing = append(existing, inst) + } + + return newInsts, existing, nil +} + +// waitForNetworkIP polls until the droplet has an IPv4 of the given type ("public" or "private") +// or ctx is done. +func waitForNetworkIP(ctx context.Context, client *godo.Client, dropletID int) (pub, priv string, err error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + d, _, err := client.Droplets.Get(ctx, dropletID) + if err != nil { + return "", "", err + } + for _, net := range d.Networks.V4 { + if net.Type == "public" { + pub = net.IPAddress + } + if net.Type == "private" { + priv = net.IPAddress + } + if pub != "" && priv != "" { + return pub, priv, nil + } + } + } + } +} + +func DestroyDroplets(ctx context.Context, client *godo.Client, insts []Instance, workers int) ([]Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, fmt.Errorf("listing droplets: %w", err) + } + + return destroyDropletsByMatch(ctx, client, droplets, insts, workers, matchByExperimentTag) +} + +func destroyAllTalisDroplets(ctx context.Context, client *godo.Client, workers int) ([]Instance, error) { + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return nil, fmt.Errorf("listing droplets: %w", err) + } + + var talisInstances []Instance + for _, d := range droplets { + if slices.Contains(d.Tags, "talis") { + publicIP := "" + for _, net := range d.Networks.V4 { + if net.Type == "public" { + publicIP = net.IPAddress + break + } + } + talisInstances = append(talisInstances, Instance{ + Name: d.Name, + PublicIP: publicIP, + }) + } + } + + if len(talisInstances) == 0 { + log.Println("No talis droplets found to destroy") + return nil, nil + } + + return destroyDropletsByMatch(ctx, client, droplets, talisInstances, workers, matchByName) +} + +type dropletMatcher func(inst Instance, d godo.Droplet) bool + +func matchByExperimentTag(inst Instance, d godo.Droplet) bool { + experimentTag := GetExperimentTag(inst.Tags) + return experimentTag != "" && slices.Contains(d.Tags, experimentTag) +} + +func matchByName(inst Instance, d godo.Droplet) bool { + return d.Name == inst.Name +} + +func destroyDropletsByMatch(ctx context.Context, client *godo.Client, droplets []godo.Droplet, insts []Instance, workers int, matcher dropletMatcher) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + + fmt.Println("⏳ Deleting droplet", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + var matchIDs []int + for _, d := range droplets { + if matcher(inst, d) { + matchIDs = append(matchIDs, d.ID) + } + } + + if len(matchIDs) > 1 { + results <- result{inst: inst, err: fmt.Errorf("multiple droplets match %s", inst.Name)} + } + + if len(matchIDs) == 0 { + results <- result{inst: inst, err: fmt.Errorf("no droplets found for %s", inst.Name)} + return + } + + for _, id := range matchIDs { + _, err := client.Droplets.Delete(delCtx, id) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("delete %s: %w", inst.Name, err)} + return + } + + if err := waitForDeletion(delCtx, client, id); err != nil { + results <- result{inst: inst, err: fmt.Errorf("confirm delete %s: %w", inst.Name, err)} + return + } + + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + } + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", + res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s deleted (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + + return removed, nil +} + +// waitForDeletion polls until Get() returns a 404 Not Found or ctx is done. +func waitForDeletion(ctx context.Context, client *godo.Client, dropletID int) error { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + _, resp, err := client.Droplets.Get(ctx, dropletID) + if err != nil { + // godo returns a non-nil resp when it's an HTTP error + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + // other errors: continue polling or exit? + return err + } + // still exists; try again + } + } +} + +// listAllDroplets pages through your account’s droplets. +func listAllDroplets(ctx context.Context, client *godo.Client) ([]godo.Droplet, error) { + var all []godo.Droplet + opt := &godo.ListOptions{PerPage: 200} + for { + page, resp, err := client.Droplets.List(ctx, opt) + if err != nil { + return nil, err + } + all = append(all, page...) + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + pageNum, _ := resp.Links.CurrentPage() + opt.Page = pageNum + 1 + } + return all, nil +} + +// hasAllTags returns true if candidate contains every tag in want. +func hasAllTags(candidate, want []string) bool { + tagset := make(map[string]struct{}, len(candidate)) + for _, t := range candidate { + tagset[t] = struct{}{} + } + for _, w := range want { + if _, ok := tagset[w]; !ok { + return false + } + } + return true +} + +func checkForRunningDOExperiments(ctx context.Context, client *godo.Client, experimentID, chainID string) (bool, error) { + if client == nil { + return false, nil + } + + droplets, err := listAllDroplets(ctx, client) + if err != nil { + return false, fmt.Errorf("failed to list droplets: %w", err) + } + + for _, d := range droplets { + if slices.Contains(d.Tags, "talis") && hasExperimentTag(d.Tags, experimentID, chainID) { + return true, nil + } + } + + return false, nil +} + +func hasExperimentTag(tags []string, experimentID, chainID string) bool { + for _, tag := range tags { + if (strings.HasPrefix(tag, "validator-") || strings.HasPrefix(tag, "bridge-") || strings.HasPrefix(tag, "light-") || strings.HasPrefix(tag, "encoder-")) && + strings.Contains(tag, experimentID) && strings.Contains(tag, chainID) { + return true + } + } + return false +} diff --git a/tools/talis/download.go b/tools/talis/download.go new file mode 100644 index 0000000000..99284d0326 --- /dev/null +++ b/tools/talis/download.go @@ -0,0 +1,261 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/spf13/cobra" +) + +func downloadCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + nodes string + table string + workers int + noCompress bool + ) + + cmd := &cobra.Command{ + Use: "download -n -t
", + Short: "Download a file from the Talis network", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + nodes, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + + if len(nodes) == 0 { + return fmt.Errorf("no matching nodes found") + } + + baseTracesRemotePath := "/root/.celestia-app/data/traces" + remotePaths := []string{} + switch table { + case "logs": + remotePaths = append(remotePaths, "/root/logs") + case "latency-monitor": + remotePaths = append(remotePaths, "/root/talis-latency-monitor.log") + case "txsim": + remotePaths = append(remotePaths, "/root/talis-txsim.log") + case "*", "": + path := filepath.Join(baseTracesRemotePath, "*") + remotePaths = append(remotePaths, path) + default: + if strings.Contains(table, ",") { + tables := strings.SplitSeq(table, ",") + for table := range tables { + remotePaths = append(remotePaths, filepath.Join(baseTracesRemotePath, table+".jsonl")) + } + } else { + remotePaths = append(remotePaths, filepath.Join(baseTracesRemotePath, table+".jsonl")) + } + } + + workers := make(chan struct{}, workers) + var wg sync.WaitGroup + for _, node := range nodes { + wg.Add(1) + go func() { + workers <- struct{}{} + defer func() { + wg.Done() + <-workers + }() + localPath := filepath.Join(rootDir, "data/", node.Name) + if strings.Contains(table, ",") { + filepath.Join(localPath, "traces") + } + if err := os.MkdirAll(localPath, 0o755); err != nil { + fmt.Printf("failed to create directory %s: %v\n", localPath, err) + return + } + if noCompress { + for _, remotePath := range remotePaths { + err := sftpDownload(remotePath, localPath, "root", node.PublicIP, SSHKeyPath) + if err != nil { + fmt.Printf("failed to download from %s: %v\n", node.PublicIP, err) + } + } + } else { + if err := compressAndDownload(table, localPath, "root", node.PublicIP, SSHKeyPath); err != nil { + fmt.Printf("failed to download from %s: %v\n", node.PublicIP, err) + return + } + } + if table == "logs" { + // usually, the logs from tmux also include color codes. So we will clean them up. + logFile := filepath.Join(localPath, "logs") + content, err := os.ReadFile(logFile) + if err != nil { + fmt.Printf("Error reading file: %v\n", err) + return + } + cleaned := stripANSI(string(content)) + // Write back to the same file + err = os.WriteFile(logFile, []byte(cleaned), 0o644) + if err != nil { + fmt.Printf("Error writing file: %v\n", err) + return + } + } + }() + } + + wg.Wait() + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "path to your network config file") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "*", "specify the node(s) to download from. * or specific nodes.") + cmd.Flags().StringVarP(&table, "tables", "t", "*", "specify tables to download (comma-separated) or logs to download logs. default is all tables.") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + cmd.Flags().BoolVar(&noCompress, "no-compress", false, "disable remote compression before download (compression is enabled by default)") + + cmd.AddCommand(downloadS3DataCmd()) + + return cmd +} + +// compressAndDownload compresses data on the remote server using xz -6 +// before downloading, then extracts locally. This significantly reduces +// bandwidth for JSONL trace files which compress very well (often 15-25x). +func compressAndDownload(table, localPath, user, host, sshKeyPath string) error { + baseTracesRemotePath := "/root/.celestia-app/data/traces" + remoteArchive := "/tmp/talis-traces.tar.xz" + + var compressCmd string + switch table { + case "logs": + compressCmd = fmt.Sprintf("tar -cf - -C /root logs | xz -6 -T0 > %s", remoteArchive) + case "latency-monitor": + compressCmd = fmt.Sprintf("tar -cf - -C /root talis-latency-monitor.log | xz -6 -T0 > %s", remoteArchive) + case "txsim": + compressCmd = fmt.Sprintf("tar -cf - -C /root talis-txsim.log | xz -6 -T0 > %s", remoteArchive) + case "*", "": + compressCmd = fmt.Sprintf("tar -cf - -C %s . | xz -6 -T0 > %s", baseTracesRemotePath, remoteArchive) + default: + var files []string + if strings.Contains(table, ",") { + for t := range strings.SplitSeq(table, ",") { + files = append(files, strings.TrimSpace(t)+".jsonl") + } + } else { + files = append(files, table+".jsonl") + } + compressCmd = fmt.Sprintf("tar -cf - -C %s %s | xz -6 -T0 > %s", + baseTracesRemotePath, strings.Join(files, " "), remoteArchive) + } + + fmt.Printf("[%s] Compressing data on remote server...\n", host) + out, err := sshExec(user, host, sshKeyPath, compressCmd) + if err != nil { + return fmt.Errorf("remote compression failed: %v\n%s", err, string(out)) + } + + fmt.Printf("[%s] Downloading compressed archive...\n", host) + if err := sftpDownload(remoteArchive, localPath, user, host, sshKeyPath); err != nil { + _, _ = sshExec(user, host, sshKeyPath, "rm -f "+remoteArchive) + return fmt.Errorf("download failed: %v", err) + } + + localArchive := filepath.Join(localPath, filepath.Base(remoteArchive)) + fmt.Printf("[%s] Extracting archive...\n", host) + extractCmd := exec.Command("tar", "-xJf", localArchive, "-C", localPath) + if extractOut, err := extractCmd.CombinedOutput(); err != nil { + return fmt.Errorf("local extraction failed: %v\n%s", err, string(extractOut)) + } + + os.Remove(localArchive) + _, _ = sshExec(user, host, sshKeyPath, "rm -f "+remoteArchive) + + fmt.Printf("[%s] Download complete.\n", host) + return nil +} + +// sshExec runs a command on a remote host via SSH and returns the combined output. +func sshExec(user, host, sshKeyPath, command string) ([]byte, error) { + cmd := exec.Command("ssh", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + fmt.Sprintf("%s@%s", user, host), + command, + ) + return cmd.CombinedOutput() +} + +func sftpDownload(remotePath, localPath, user, host, sshKeyPath string) error { + target := fmt.Sprintf("%s@%s:%s", user, host, remotePath) + + // Use `-r` always — safe for both files and dirs in practice + cmd := exec.Command("sftp", + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + "-r", target, + localPath, + ) + + fmt.Printf("Running: sftp -i %s -r %s %s\n", sshKeyPath, target, localPath) + return cmd.Run() +} + +func filterMatchingInstances(insts []Instance, pattern string) ([]Instance, error) { + var filtered []Instance + for _, inst := range insts { + match, err := matchPattern(pattern, inst.Name) + if err != nil { + return nil, err + } + if match { + filtered = append(filtered, inst) + } + } + return filtered, nil +} + +// matchPattern compiles a wildcard pattern (e.g., "validator-*") +// to a regex and returns whether it matches the input string. +func matchPattern(pattern, input string) (bool, error) { + // Escape regex special characters + escaped := regexp.QuoteMeta(pattern) + + // Convert wildcard '*' to '.*' + regexPattern := "^" + strings.ReplaceAll(escaped, "\\*", ".*") + "$" + + re, err := regexp.Compile(regexPattern) + if err != nil { + return false, err + } + + return re.MatchString(input), nil +} + +// Regex to match ANSI escape codes +var ansiEscape = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]`) + +// stripANSI removes ANSI escape codes from the input string, returning a plain text version without formatting codes. +func stripANSI(input string) string { + return ansiEscape.ReplaceAllString(input, "") +} diff --git a/tools/talis/download_monitoring.go b/tools/talis/download_monitoring.go new file mode 100644 index 0000000000..a73187f6df --- /dev/null +++ b/tools/talis/download_monitoring.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/spf13/cobra" +) + +func downloadResourcesCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + nodes string + output string + workers int + ) + + cmd := &cobra.Command{ + Use: "download-resources", + Short: "Download monitoring JSONL files from remote validators", + Long: `Downloads /root/monitor.jsonl from each matching validator. +Files are saved to {output}/{validator-name}/monitor.jsonl.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + validators, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + if len(validators) == 0 { + return fmt.Errorf("no matching validators found for pattern %q", nodes) + } + + sem := make(chan struct{}, workers) + var wg sync.WaitGroup + var mu sync.Mutex + downloaded := 0 + + for _, val := range validators { + wg.Add(1) + go func(val Instance) { + sem <- struct{}{} + defer func() { + wg.Done() + <-sem + }() + + localDir := filepath.Join(output, val.Name) + if err := os.MkdirAll(localDir, 0o755); err != nil { + fmt.Printf("[%s] failed to create directory %s: %v\n", val.Name, localDir, err) + return + } + + err := sftpDownload("/root/monitor.jsonl", localDir, "root", val.PublicIP, resolvedSSHKeyPath) + if err != nil { + fmt.Printf("[%s] failed to download monitor.jsonl: %v\n", val.Name, err) + return + } + + mu.Lock() + downloaded++ + mu.Unlock() + fmt.Printf("[%s] downloaded monitor.jsonl\n", val.Name) + }(val) + } + + wg.Wait() + + fmt.Printf("\nDownloaded monitoring data from %d/%d validator(s) to %s/\n", downloaded, len(validators), output) + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "validator-*", "glob pattern for which validators to download from") + cmd.Flags().StringVarP(&output, "output", "o", "./data/monitoring/resources", "local directory to save downloaded files") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent download workers") + + return cmd +} diff --git a/tools/talis/env.go b/tools/talis/env.go new file mode 100644 index 0000000000..fb5a6eb30f --- /dev/null +++ b/tools/talis/env.go @@ -0,0 +1,137 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +func initEnvCmd() *cobra.Command { + var provider string + + cmd := &cobra.Command{ + Use: "init-env", + Short: "Generate a .env template file", + Long: "Generate a .env template file with the required environment variables for the specified cloud provider.", + RunE: func(cmd *cobra.Command, args []string) error { + if provider == "" { + provider = "digitalocean" + } + + var envContent string + + switch provider { + case "digitalocean": + envContent = generateDigitalOceanEnv() + case "googlecloud": + envContent = generateGoogleCloudEnv() + case "aws": + envContent = generateAWSEnv() + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + + // Check if .env already exists + if _, err := os.Stat(".env"); err == nil { + return fmt.Errorf(".env file already exists. Delete it first or edit manually") + } + + // Write .env file + if err := os.WriteFile(".env", []byte(envContent), 0o600); err != nil { + return fmt.Errorf("failed to write .env file: %w", err) + } + + fmt.Printf("✅ Created .env template for %s\n", provider) + fmt.Println("\nNext steps:") + fmt.Println("1. Edit .env and fill in your credentials") + fmt.Println("2. Run: talis init -c -e --with-observability --provider", provider) + + return nil + }, + } + + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "Cloud provider (digitalocean, googlecloud, aws)") + + return cmd +} + +func generateDigitalOceanEnv() string { + return `# Provider Configuration +PROVIDER=digitalocean + +# DigitalOcean Configuration +# Get your API token from: https://cloud.digitalocean.com/account/api/tokens +DIGITALOCEAN_TOKEN= + +# SSH Configuration (optional - will use defaults if not set) +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3/DigitalOcean Spaces Configuration (optional - for payload distribution) +# Create a Space and generate API keys at: https://cloud.digitalocean.com/spaces +# AWS_DEFAULT_REGION=fra1 +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_S3_BUCKET= +# AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com +` +} + +func generateGoogleCloudEnv() string { + return `# Provider Configuration +PROVIDER=googlecloud + +# Google Cloud Configuration +# Project ID from: https://console.cloud.google.com/ +GOOGLE_CLOUD_PROJECT= + +# Service account key JSON path +# Create at: https://console.cloud.google.com/iam-admin/serviceaccounts +# Download the JSON key file and set the path below +GOOGLE_CLOUD_KEY_JSON_PATH= + +# SSH Configuration (optional - will use defaults if not set) +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3/DigitalOcean Spaces Configuration (optional - for payload distribution) +# You can use DigitalOcean Spaces for S3-compatible storage +# AWS_DEFAULT_REGION=fra1 +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_S3_BUCKET= +# AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com +` +} + +func generateAWSEnv() string { + return `# Provider Configuration +PROVIDER=aws + +# AWS Credentials (used for both EC2 and the S3 payload bucket) +# Create an access key at: https://console.aws.amazon.com/iam/home#/security_credentials +# The user must have EC2 permissions (RunInstances, Terminate, Describe*, +# ImportKeyPair, CreateSecurityGroup/AuthorizeSecurityGroupIngress, +# CreatePlacementGroup, DescribeVpcs/DescribeSubnets, DescribeImages) and +# S3 (PutObject, GetObject) on the payload bucket. +# +# You can also leave these unset and use 'aws configure --profile ' + +# AWS_PROFILE= — the Go SDK picks up shared credentials automatically. +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= + +# Region for EC2 and (by default) the S3 payload bucket. +AWS_DEFAULT_REGION=us-east-1 + +# SSH Configuration +# TALIS_SSH_KEY_PATH is the local path to your SSH public key. The key is +# imported to EC2 (once per region) under TALIS_SSH_KEY_NAME. +# TALIS_SSH_KEY_PATH=~/.ssh/id_ed25519.pub +# TALIS_SSH_KEY_NAME=your-username + +# S3 Payload Bucket (optional — omit and use 'deploy --direct-payload-upload') +# Must be an S3 bucket you own in AWS_DEFAULT_REGION. +# AWS_S3_BUCKET= +` +} diff --git a/tools/talis/execution.go b/tools/talis/execution.go new file mode 100644 index 0000000000..79bdcedb35 --- /dev/null +++ b/tools/talis/execution.go @@ -0,0 +1,159 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os/exec" + "strings" + "sync" + "sync/atomic" + "time" +) + +// runScriptInTMux SSHes into each remote host in parallel, and launches +// the specified remoteScript inside a detached tmux session named sessionName. +// It uses the same timeout per host and returns a combined error if any fail. +func runScriptInTMux( + instances []Instance, + sshKeyPath string, // e.g. "~/.ssh/id_ed25519" + remoteScript string, // e.g. "source /root/start.sh" or "celestia-appd start" + sessionName string, // e.g. "app" + timeout time.Duration, +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + counter := atomic.Uint32{} + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Launch in tmux and capture output to a per-session log. + logPath := fmt.Sprintf("/root/talis-%s.log", sessionName) + scriptPath := fmt.Sprintf("/root/talis-%s.sh", sessionName) + encodedScript := base64.StdEncoding.EncodeToString([]byte("#!/usr/bin/env bash\n" + remoteScript + "\n")) + fullCmd := fmt.Sprintf( + "printf '%%s' %q | base64 -d > %s && chmod +x %s && tmux new-session -d -s %s %q", + encodedScript, + scriptPath, + scriptPath, + sessionName, + fmt.Sprintf("bash %s > %s 2>&1", scriptPath, logPath), + ) + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + fullCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + errCh <- fmt.Errorf("[%s:%s] ssh error in %s: %v\n%s", + inst.Name, inst.PublicIP, inst.Region, err, out) + return + } + + log.Printf("started %s session on %s (%s) 🏁 – %d/%d\n", + sessionName, inst.Name, inst.PublicIP, counter.Add(1), len(instances)) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + sb := strings.Builder{} + sb.WriteString("❌ errors running remote script:\n") + for _, e := range errs { + sb.WriteString("- ") + sb.WriteString(e.Error()) + sb.WriteByte('\n') + } + return errors.New(sb.String()) + } + return nil +} + +// waitForTmuxSessions polls all instances until the named tmux session no longer +// exists on any of them (i.e. the script finished), or until the timeout expires. +func waitForTmuxSessions(instances []Instance, sshKeyPath, sessionName string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + poll := 10 * time.Second + + remaining := make(map[string]Instance, len(instances)) + for _, inst := range instances { + remaining[inst.Name] = inst + } + + for len(remaining) > 0 && time.Now().Before(deadline) { + time.Sleep(poll) + + // Check all remaining validators in parallel + type result struct { + name string + finished bool + } + results := make(chan result, len(remaining)) + for name, inst := range remaining { + go func(name string, inst Instance) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName), + ) + err := ssh.Run() + switch { + case err == nil: + // tmux has-session exited 0 → session still running. + results <- result{name: name, finished: false} + case errors.As(err, new(*exec.ExitError)): + // Remote command ran but returned non-zero → session gone. + results <- result{name: name, finished: true} + default: + // SSH connection error (network blip, refused, etc.) → + // cannot determine session state; treat as still running. + log.Printf("warning: SSH probe failed for %s (%s): %v", name, inst.PublicIP, err) + results <- result{name: name, finished: false} + } + }(name, inst) + } + for range len(remaining) { + r := <-results + if r.finished { + log.Printf("%s session finished on %s (%s)\n", sessionName, r.name, remaining[r.name].PublicIP) + delete(remaining, r.name) + } + } + + if len(remaining) > 0 { + fmt.Printf(" still waiting on %d validator(s)...\n", len(remaining)) + } + } + + if len(remaining) > 0 { + names := make([]string, 0, len(remaining)) + for name := range remaining { + names = append(names, name) + } + return fmt.Errorf("timeout waiting for %s sessions on: %s", sessionName, strings.Join(names, ", ")) + } + return nil +} diff --git a/tools/talis/fibre.md b/tools/talis/fibre.md new file mode 100644 index 0000000000..fadff4def0 --- /dev/null +++ b/tools/talis/fibre.md @@ -0,0 +1,196 @@ +# Running Fibre Experiments with Talis + +This guide covers running Fibre throughput experiments. For general talis setup (prerequisites, installation, cloud provider config, spinning up nodes, and tearing them down), see the main [README.md](README.md). + +## Overview + +A fibre experiment has four phases: + +1. **Setup** — Register fibre host addresses and fund escrow accounts on each validator. +2. **Start fibre server** — Start the fibre server on each validator. +3. **Load generation** — Start `fibre-txsim` on one or more validators to submit blobs via the Fibre protocol. +4. **Monitoring** — Run `fibre-throughput` to observe per-block throughput in real time and optionally write structured traces to a JSONL file. + +## Prerequisites + +Follow the main [README.md](README.md) through the **deploy** step so you have a running network: + +```sh +talis init --chain-id --experiment +talis add --type validator --count +talis up +talis genesis --square-size 256 --build-dir build +talis deploy --direct-payload-upload --workers 20 +``` + +## 1. Fibre setup + +Register each validator's fibre host address and deposit tokens into escrow for all fibre worker accounts: + +```sh +talis setup-fibre +``` + +| Flag | Default | Description | +|--------------------|-----------------------|------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--escrow-amount` | `200000000000000utia` | Amount to deposit into escrow per account | +| `--fibre-port` | `7980` | Fibre gRPC port on validators | +| `--fees` | `5000utia` | Transaction fees | +| `--workers` | `10` | Number of validators to set up in parallel | +| `--fibre-accounts` | `100` | Number of fibre worker accounts to deposit escrow for| + +This SSHes into every validator and runs the `set-host` and `deposit-to-escrow` transactions (one per fibre account). It polls tmux sessions to wait for all transactions to complete before returning. + +## 2. Start fibre server + +Start the fibre server on validators: + +```sh +talis start-fibre +``` + +| Flag | Default | Description | +|---------------------|---------------------|---------------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--instances` | `0` (all) | Number of validators to start fibre on | +| `--otel-endpoint` | *(auto)* | OTLP HTTP endpoint for metrics/traces (auto-enabled with observability) | + +The fibre server delegates signing to the colocated validator node's PrivValidatorAPI gRPC endpoint (default `127.0.0.1:26659`). Override with `--signer-grpc-address` if needed. Metrics and traces are auto-enabled via OTLP when observability nodes are configured. + +Each validator runs the fibre server inside a tmux session called `fibre`. To stop: + +```sh +talis kill-session --session fibre +``` + +## 3. Start fibre-txsim + +Start blob submission on one or more validators: + +```sh +talis fibre-txsim --instances 4 \ + --concurrency 2 \ + --blob-size 1000000 +``` + +| Flag | Default | Description | +|------------------|---------------------|--------------------------------------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--ssh-key-path` | *(from env/config)* | Path to SSH private key | +| `--instances` | `1` | Number of validators to start fibre-txsim on | +| `--concurrency` | `1` | Concurrent blob submissions per instance (each gets its own account) | +| `--blob-size` | `1000000` | Size of each blob in bytes | +| `--interval` | `0` | Delay between submissions per worker (`0` = no delay) | +| `--duration` | `0` | How long to run (`0` = until killed) | +| `--key-prefix` | `fibre` | Key name prefix in keyring (keys are named `-0`, `-1`, ...) | + +Each concurrent worker gets its own signing key and account (e.g. `fibre-0`, `fibre-1`, ...), eliminating sequence number conflicts. + +Each instance runs inside a tmux session called `fibre-txsim` on the remote validator. To stop all instances: + +```sh +talis kill-session --session fibre-txsim +``` + +To view logs on a specific validator: + +```sh +ssh root@ 'cat /root/talis-fibre-txsim.log' +``` + +## 4. Monitor throughput + +Run `fibre-throughput` from your local machine to poll blocks and print per-block stats: + +```sh +talis fibre-throughput +``` + +This connects to the first validator's RPC endpoint and prints a line per block: + +```text +height=350 pff_txs=4 pfb_txs=0 pff_bytes=3MB pfb_bytes=0MB block_time=3.06s pff_throughput=1.02MB/s pfb_throughput=0.00MB/s +``` + +### Flags + +| Flag | Default | Description | +|------------------|------------------------------|-----------------------------------------------| +| `--directory` | `.` | Experiment root directory | +| `--rpc-endpoint` | *(first validator IP:26657)* | CometBFT RPC endpoint to poll | +| `--duration` | `0` | How long to run (`0` = until Ctrl+C) | +| `--start-height` | `0` | Block height to start from (`0` = latest + 1) | +| `--with-traces` | `false` | Enable JSONL trace file output | +| `--traces-dir` | `traces/throughput` | Directory where trace files are written | + +### Writing traces + +To record structured per-block data for later analysis, enable the `--with-traces` flag: + +```sh +talis fibre-throughput --directory --with-traces +``` + +This creates a timestamped JSONL file inside the traces directory: + +```text +traces/throughput/throughput_2026-02-18T20:59:35Z.jsonl +``` + +Each run creates a new file. To use a custom directory: + +```sh +talis fibre-throughput --directory --with-traces --traces-dir my/traces +``` + +Each line in the JSONL file is a JSON object with the following fields: + +```json +{ + "height": 350, + "timestamp": "2026-02-18T20:59:33Z", + "block_time_sec": 3.06, + "pff_count": 4, + "pfb_count": 0, + "total_pff_bytes": 4000000, + "total_pfb_bytes": 0, + "pff_throughput_mbs": 1.25, + "pfb_throughput_mbs": 0 +} +``` + +| Field | Description | +|----------------------|------------------------------------------------------------| +| `height` | Block height | +| `timestamp` | Block header timestamp (RFC 3339) | +| `block_time_sec` | Seconds since the previous block | +| `pff_count` | Number of `MsgPayForFibre` transactions | +| `pfb_count` | Number of `MsgPayForBlobs` transactions | +| `total_pff_bytes` | Total PFF blob bytes in the block | +| `total_pfb_bytes` | Total PFB blob bytes in the block | +| `pff_throughput_mbs` | PFF throughput in MB/s (`pff_bytes / block_time / 1024^2`) | +| `pfb_throughput_mbs` | PFB throughput in MB/s (`pfb_bytes / block_time / 1024^2`) | + +### Replaying past blocks + +To analyze blocks from a past experiment, use `--start-height`: + +```sh +talis fibre-throughput --directory --with-traces --start-height 100 +``` + +## 5. Teardown + +When the experiment is complete: + +```sh +# Stop fibre-txsim and fibre server on all validators +talis kill-session --session fibre-txsim +talis kill-session --session fibre + +# Tear down cloud instances +talis down --workers 20 +``` diff --git a/tools/talis/fibre_bootstrap_evnode.go b/tools/talis/fibre_bootstrap_evnode.go new file mode 100644 index 0000000000..5df12d41ae --- /dev/null +++ b/tools/talis/fibre_bootstrap_evnode.go @@ -0,0 +1,212 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "sync" + "time" + + "github.com/spf13/cobra" +) + +// fibreBootstrapEvnodeCmd wires the two operator-supplied dependencies +// that ev-node needs before its init script will start the daemon: +// +// 1. Bridge admin JWT (/root/bridge-jwt.txt on the bridge box, written +// by bridge_init.sh). +// 2. cosmos-sdk file keyring with the Fibre payment account (lives at +// /root/.celestia-app/keyring-test on validator-0, populated during +// validator_init.sh + setup-fibre). +// +// Both get pulled to the operator's local machine first (keeps the +// transfers serial and observable), then pushed to every evnode-* in +// the config. After this command returns, evnode_init.sh's poll loop +// observes the files and starts the daemon. +// +// Run after `talis up && talis genesis && talis deploy && talis +// setup-fibre`. Idempotent — re-running just overwrites the files. +func fibreBootstrapEvnodeCmd() *cobra.Command { + var ( + rootDir string + sshKeyPath string + sshUser string + jwtTimeout time.Duration + ) + + cmd := &cobra.Command{ + Use: "fibre-bootstrap-evnode", + Short: "Pull bridge JWT + validator-0 keyring and push them to every ev-node instance", + Long: `After deploy + setup-fibre, this command stitches the two operator- +supplied dependencies onto each ev-node box so its init script's poll +loop unblocks and starts the daemon. SSHes to bridge-0 + validator-0 +to fetch, then SCPs to each evnode-*.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + if len(cfg.Bridges) == 0 { + return fmt.Errorf("no bridges in config — run `talis add --type bridge` first") + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators in config") + } + if len(cfg.Evnodes) == 0 { + return fmt.Errorf("no evnodes in config — nothing to bootstrap") + } + bridge := cfg.Bridges[0] + validator := cfg.Validators[0] + if bridge.PublicIP == "" || bridge.PublicIP == "TBD" { + return fmt.Errorf("bridge-0 has no public IP — run `talis up` first") + } + if validator.PublicIP == "" || validator.PublicIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP — run `talis up` first") + } + + tmpDir, err := os.MkdirTemp("", "talis-evnode-bootstrap-") + if err != nil { + return fmt.Errorf("create temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + + localJWT := filepath.Join(tmpDir, "bridge-jwt.txt") + localKeyringRoot := filepath.Join(tmpDir, "keyring-fibre") + if err := os.MkdirAll(localKeyringRoot, 0o700); err != nil { + return fmt.Errorf("create local keyring root: %w", err) + } + + // Pull JWT (poll-retry: bridge_init.sh writes it after + // `celestia bridge auth admin`, which can take ~30s after + // the bridge process starts). + log.Printf("Fetching bridge JWT from bridge-0 (%s) — up to %s", bridge.PublicIP, jwtTimeout) + deadline := time.Now().Add(jwtTimeout) + for { + if err := scpFromRemote(sshUser, bridge.PublicIP, sshKeyPath, "/root/bridge-jwt.txt", localJWT, false); err == nil { + if info, statErr := os.Stat(localJWT); statErr == nil && info.Size() > 0 { + break + } + } + if time.Now().After(deadline) { + return fmt.Errorf("bridge JWT not ready at /root/bridge-jwt.txt within %s — check bridge tmux session: tmux attach -t bridge", jwtTimeout) + } + time.Sleep(5 * time.Second) + } + log.Printf("✓ pulled JWT to %s", localJWT) + + // Pull validator-0's keyring directory. The cosmos-sdk + // file backend stores per-account keys under + // keyring-test/, so we mirror that layout locally so the + // outbound push lands at /root/keyring-fibre/keyring-test/ + // — exactly where evnode_init.sh expects it. + log.Printf("Fetching keyring-test from validator-0 (%s)", validator.PublicIP) + if err := scpFromRemote(sshUser, validator.PublicIP, sshKeyPath, "/root/.celestia-app/keyring-test", localKeyringRoot, true); err != nil { + return fmt.Errorf("scp keyring from validator-0: %w", err) + } + if _, err := os.Stat(filepath.Join(localKeyringRoot, "keyring-test")); err != nil { + return fmt.Errorf("keyring-test directory not present after pull (got %s): %w", localKeyringRoot, err) + } + log.Printf("✓ pulled keyring to %s/keyring-test", localKeyringRoot) + + // Push to every ev-node in parallel. The init script's + // poll loop checks every 5s, so a successful push here + // means daemon startup within ~10s. + var wg sync.WaitGroup + errCh := make(chan error, len(cfg.Evnodes)) + for _, ev := range cfg.Evnodes { + if ev.PublicIP == "" || ev.PublicIP == "TBD" { + errCh <- fmt.Errorf("evnode %s has no public IP", ev.Name) + continue + } + wg.Add(1) + go func(ev Instance) { + defer wg.Done() + log.Printf("[%s] pushing JWT + keyring", ev.Name) + + if err := scpToRemote(sshUser, ev.PublicIP, sshKeyPath, localJWT, "/root/bridge-jwt.txt", false); err != nil { + errCh <- fmt.Errorf("[%s] push JWT: %w", ev.Name, err) + return + } + + // mkdir the parent so scp lands at the exact path + // evnode_init.sh waits for. + if _, err := sshExec(sshUser, ev.PublicIP, sshKeyPath, "mkdir -p /root/keyring-fibre && rm -rf /root/keyring-fibre/keyring-test"); err != nil { + errCh <- fmt.Errorf("[%s] mkdir keyring-fibre: %w", ev.Name, err) + return + } + if err := scpToRemote(sshUser, ev.PublicIP, sshKeyPath, filepath.Join(localKeyringRoot, "keyring-test"), "/root/keyring-fibre/keyring-test", true); err != nil { + errCh <- fmt.Errorf("[%s] push keyring: %w", ev.Name, err) + return + } + + log.Printf("[%s] ✓ pushed; daemon should start within ~10s", ev.Name) + }(ev) + } + wg.Wait() + close(errCh) + var errs []error + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + for _, e := range errs { + log.Println(e) + } + return fmt.Errorf("%d evnode(s) failed to bootstrap", len(errs)) + } + log.Printf("✓ bootstrap complete for %d evnode(s)", len(cfg.Evnodes)) + return nil + }, + } + + homeDir, _ := os.UserHomeDir() + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519") + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "experiment root directory") + cmd.Flags().StringVarP(&sshKeyPath, "ssh-key-path", "s", defaultKeyPath, "SSH private key for talis instances") + cmd.Flags().StringVar(&sshUser, "ssh-user", "root", "SSH user (talis instances boot as root)") + cmd.Flags().DurationVar(&jwtTimeout, "jwt-timeout", 5*time.Minute, "max wall time to wait for the bridge JWT to appear on bridge-0") + + return cmd +} + +// scpFromRemote pulls a file or directory off a remote box. recursive=true +// uses scp -r so directories transfer with their contents. +func scpFromRemote(user, host, sshKeyPath, remotePath, localPath string, recursive bool) error { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + } + if recursive { + args = append(args, "-r") + } + args = append(args, fmt.Sprintf("%s@%s:%s", user, host, remotePath), localPath) + cmd := exec.Command("scp", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("scp pull: %w (%s)", err, string(out)) + } + return nil +} + +// scpToRemote pushes a file or directory onto a remote box. +func scpToRemote(user, host, sshKeyPath, localPath, remotePath string, recursive bool) error { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-i", sshKeyPath, + } + if recursive { + args = append(args, "-r") + } + args = append(args, localPath, fmt.Sprintf("%s@%s:%s", user, host, remotePath)) + cmd := exec.Command("scp", args...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("scp push: %w (%s)", err, string(out)) + } + return nil +} diff --git a/tools/talis/fibre_setup.go b/tools/talis/fibre_setup.go new file mode 100644 index 0000000000..28f7115b80 --- /dev/null +++ b/tools/talis/fibre_setup.go @@ -0,0 +1,161 @@ +package main + +import ( + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" +) + +const SetupFibreSessionName = "setup-fibre" + +func setupFibreCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + escrowAmount string + fibrePort int + fees string + workers int + fibreAccounts int + encoderFibreAccounts int + ) + + cmd := &cobra.Command{ + Use: "setup-fibre", + Short: "Register fibre host addresses and fund escrow accounts on remote validators", + Long: "SSHes into each validator and runs transactions: register the fibre host address and fund escrow accounts for the validator and all fibre worker accounts.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + sem := make(chan struct{}, workers) + var ( + wg sync.WaitGroup + mu sync.Mutex + errs []error + ) + + for _, val := range cfg.Validators { + // Build script: register host + deposit escrow for validator + all fibre accounts + var sb strings.Builder + + // 1. Register fibre host address. Plain `host:port` form — + // x/valaddr requires it; the gRPC client dials it via the + // passthrough resolver. Don't prefix `dns:///` here. + sb.WriteString(fmt.Sprintf( + "celestia-appd tx valaddr set-host %s:%d "+ + "--from validator --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --yes\n", + val.PublicIP, fibrePort, + cfg.ChainID, fees, + )) + sb.WriteString("sleep 10\n") + + // 2. Deposit escrow for each fibre worker account + for i := range fibreAccounts { + keyName := fmt.Sprintf("fibre-%d", i) + sb.WriteString(fmt.Sprintf( + "celestia-appd tx fibre deposit-to-escrow %s "+ + "--from %s --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --yes\n", + escrowAmount, + keyName, + cfg.ChainID, fees, + )) + } + + script := sb.String() + + sem <- struct{}{} + wg.Add(1) + go func(inst Instance, s string) { + defer wg.Done() + defer func() { <-sem }() + + fmt.Printf("Running setup-fibre on %s (%s) — registering host + %d escrow deposits\n", inst.Name, inst.PublicIP, fibreAccounts) + if err := runScriptInTMux([]Instance{inst}, resolvedSSHKeyPath, s, SetupFibreSessionName, time.Minute*30); err != nil { + mu.Lock() + errs = append(errs, fmt.Errorf("%s: %w", inst.Name, err)) + mu.Unlock() + } + }(val, script) + } + + wg.Wait() + + if len(errs) > 0 { + return errors.Join(errs...) + } + + fmt.Printf("Waiting for fibre setup to complete (%d accounts per validator)...\n", fibreAccounts) + if err := waitForTmuxSessions(cfg.Validators, resolvedSSHKeyPath, SetupFibreSessionName, 10*time.Minute); err != nil { + return fmt.Errorf("waiting for setup-fibre sessions: %w", err) + } + fmt.Println("Validator setup done!") + + // Deposit escrow for encoder accounts. + // Each encoder runs deposit-to-escrow from its own machine using its + // own keyring, broadcasting via the first validator's RPC endpoint. + if len(cfg.Encoders) > 0 && len(cfg.Validators) > 0 { + rpcNode := fmt.Sprintf("tcp://%s:26657", cfg.Validators[0].PublicIP) + fmt.Printf("Setting up escrow for %d encoder(s) via %s...\n", len(cfg.Encoders), rpcNode) + + for _, enc := range cfg.Encoders { + encIndex := extractIndexFromName(enc.Name) + keyPrefix := fmt.Sprintf("enc%d", encIndex) + nAccounts := encoderFibreAccounts + + var sb strings.Builder + for i := range nAccounts { + keyName := fmt.Sprintf("%s-%d", keyPrefix, i) + sb.WriteString(fmt.Sprintf( + "celestia-appd tx fibre deposit-to-escrow %s "+ + "--from %s --keyring-backend=test --home .celestia-app "+ + "--chain-id %s --fees %s --node %s --yes\n", + escrowAmount, + keyName, + cfg.ChainID, fees, rpcNode, + )) + } + + script := sb.String() + fmt.Printf("Running escrow deposits on encoder %s (%s) — %d accounts\n", enc.Name, enc.PublicIP, nAccounts) + if err := runScriptInTMux([]Instance{enc}, resolvedSSHKeyPath, script, SetupFibreSessionName, 30*time.Minute); err != nil { + return fmt.Errorf("encoder %s escrow setup: %w", enc.Name, err) + } + } + + fmt.Printf("Waiting for encoder escrow deposits to complete...\n") + if err := waitForTmuxSessions(cfg.Encoders, resolvedSSHKeyPath, SetupFibreSessionName, 15*time.Minute); err != nil { + return fmt.Errorf("waiting for encoder setup-fibre sessions: %w", err) + } + fmt.Println("Encoder escrow setup done!") + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key") + cmd.Flags().StringVar(&escrowAmount, "escrow-amount", "200000000000000utia", "amount to deposit into escrow") + cmd.Flags().IntVar(&fibrePort, "fibre-port", 7980, "fibre gRPC port on validators") + cmd.Flags().StringVar(&fees, "fees", "5000utia", "transaction fees") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of validators to set up in parallel") + cmd.Flags().IntVar(&fibreAccounts, "fibre-accounts", 100, "number of fibre worker accounts to deposit escrow for") + cmd.Flags().IntVar(&encoderFibreAccounts, "encoder-fibre-accounts", 100, "number of fibre worker accounts per encoder instance") + + return cmd +} diff --git a/tools/talis/fibre_throughput.go b/tools/talis/fibre_throughput.go new file mode 100644 index 0000000000..adcc08112a --- /dev/null +++ b/tools/talis/fibre_throughput.go @@ -0,0 +1,240 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/signal" + "path/filepath" + "time" + + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + blobtypes "github.com/celestiaorg/celestia-app/v9/x/blob/types" + fibretypes "github.com/celestiaorg/celestia-app/v9/x/fibre/types" + "github.com/cometbft/cometbft/rpc/client/http" + "github.com/spf13/cobra" +) + +type blockTrace struct { + Height int64 `json:"height"` + Timestamp string `json:"timestamp"` + BlockTimeSec float64 `json:"block_time_sec"` + PFFCount int `json:"pff_count"` + PFBCount int `json:"pfb_count"` + TotalPFFBytes int64 `json:"total_pff_bytes"` + TotalPFBBytes int64 `json:"total_pfb_bytes"` + PFFThroughputMBs float64 `json:"pff_throughput_mbs"` + PFBThroughputMBs float64 `json:"pfb_throughput_mbs"` +} + +func fibreThroughputCmd() *cobra.Command { + var ( + rootDir string + rpcEndpoint string + duration time.Duration + withTraces bool + tracesDir string + startHeight int64 + ) + + cmd := &cobra.Command{ + Use: "fibre-throughput", + Short: "Monitor real-time fibre throughput per block", + Long: "Polls blocks from a validator's RPC endpoint, decodes MsgPayForFibre transactions, and prints throughput per block.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + if rpcEndpoint == "" { + rpcEndpoint = fmt.Sprintf("http://%s:26657", cfg.Validators[0].PublicIP) + } + + fmt.Printf("RPC endpoint: %s\n", rpcEndpoint) + + client, err := http.New(rpcEndpoint, "/websocket") + if err != nil { + return fmt.Errorf("failed to create RPC client: %w", err) + } + + encCfg := encoding.MakeConfig(app.ModuleEncodingRegisters...) + txDecoder := encCfg.TxConfig.TxDecoder() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + go func() { + <-sigCh + fmt.Println("\nReceived interrupt, shutting down...") + cancel() + }() + + if duration > 0 { + ctx, cancel = context.WithTimeout(ctx, duration) + defer cancel() + } + + var nextHeight int64 + if startHeight > 0 { + nextHeight = startHeight + } else { + statusResp, err := client.Status(ctx) + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + nextHeight = statusResp.SyncInfo.LatestBlockHeight + 1 + } + fmt.Printf("Starting from height %d\n\n", nextHeight) + + var ( + totalBlocks int64 + totalBytes int64 + prevBlockTime time.Time + totalThroughput float64 + ) + + var traceEncoder *json.Encoder + var traceFile *os.File + if withTraces { + if err := os.MkdirAll(tracesDir, 0o755); err != nil { + return fmt.Errorf("failed to create traces directory: %w", err) + } + traceFileName := filepath.Join(tracesDir, fmt.Sprintf("throughput_%s.jsonl", time.Now().Format(time.RFC3339))) + traceFile, err = os.Create(traceFileName) + if err != nil { + return fmt.Errorf("failed to create trace file: %w", err) + } + defer traceFile.Close() + traceEncoder = json.NewEncoder(traceFile) + fmt.Printf("Writing traces to %s\n", traceFileName) + } + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for ctx.Err() == nil { + select { + case <-ctx.Done(): + continue + case <-ticker.C: + } + + // Fetch the latest height + st, err := client.Status(ctx) + if err != nil { + if ctx.Err() != nil { + continue + } + fmt.Printf("error fetching status: %v\n", err) + continue + } + latestHeight := st.SyncInfo.LatestBlockHeight + + // Process all new blocks + for h := nextHeight; h <= latestHeight && ctx.Err() == nil; h++ { + height := h + block, err := client.Block(ctx, &height) + if err != nil { + if ctx.Err() != nil { + break + } + fmt.Printf("error fetching block %d: %v\n", h, err) + continue + } + + blockTime := block.Block.Time + var blockTimeDelta float64 + if !prevBlockTime.IsZero() { + blockTimeDelta = blockTime.Sub(prevBlockTime).Seconds() + } + prevBlockTime = blockTime + + var pffCount int + var pffBytes int64 + var pfbCount int + var pfbBytes int64 + for _, rawTx := range block.Block.Txs { + sdkTx, err := txDecoder(rawTx) + if err != nil { + continue + } + for _, msg := range sdkTx.GetMsgs() { + if pff, ok := msg.(*fibretypes.MsgPayForFibre); ok { + pffCount++ + pffBytes += int64(pff.PaymentPromise.BlobSize) + continue + } + if pfb, ok := msg.(*blobtypes.MsgPayForBlobs); ok { + pfbCount++ + for _, size := range pfb.BlobSizes { + pfbBytes += int64(size) + } + } + } + } + + var pffThroughputMBs float64 + var pfbThroughputMBs float64 + if blockTimeDelta > 0 { + pffThroughputMBs = float64(pffBytes) / blockTimeDelta / (1024 * 1024) + pfbThroughputMBs = float64(pfbBytes) / blockTimeDelta / (1024 * 1024) + } + + fmt.Printf("height=%d pff_txs=%d pfb_txs=%d pff_bytes=%dMB pfb_bytes=%dMB block_time=%.2fs pff_throughput=%.2fMB/s pfb_throughput=%.2fMB/s\n", + h, pffCount, pfbCount, pffBytes/(1024*1024), pfbBytes/(1024*1024), blockTimeDelta, pffThroughputMBs, pfbThroughputMBs) + + if traceEncoder != nil { + trace := blockTrace{ + Height: h, + Timestamp: blockTime.Format(time.RFC3339), + BlockTimeSec: blockTimeDelta, + PFFCount: pffCount, + PFBCount: pfbCount, + TotalPFFBytes: pffBytes, + TotalPFBBytes: pfbBytes, + PFFThroughputMBs: pffThroughputMBs, + PFBThroughputMBs: pfbThroughputMBs, + } + if err := traceEncoder.Encode(trace); err != nil { + fmt.Printf("error writing trace: %v\n", err) + } + } + + totalBytes += pffBytes + if blockTimeDelta > 0 { + totalBlocks++ + totalThroughput += pffThroughputMBs + } + + nextHeight = h + 1 + } + } + + fmt.Printf("\n--- Summary ---\n") + fmt.Printf("Total blocks: %d\n", totalBlocks) + fmt.Printf("Total bytes: %d\n", totalBytes) + if totalBlocks > 0 { + fmt.Printf("Avg throughput: %.2f MB/s\n", totalThroughput/float64(totalBlocks)) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVar(&rpcEndpoint, "rpc-endpoint", "", "CometBFT RPC endpoint (default: first validator IP:26657)") + cmd.Flags().DurationVar(&duration, "duration", 0, "how long to run (0 = until Ctrl+C)") + cmd.Flags().BoolVar(&withTraces, "with-traces", false, "enable JSONL trace file output") + cmd.Flags().StringVar(&tracesDir, "traces-dir", "./data/monitoring/throughput", "directory for trace files") + cmd.Flags().Int64Var(&startHeight, "start-height", 0, "block height to start from (0 = latest + 1)") + + return cmd +} diff --git a/tools/talis/fibre_txsim.go b/tools/talis/fibre_txsim.go new file mode 100644 index 0000000000..3cc2b5ba29 --- /dev/null +++ b/tools/talis/fibre_txsim.go @@ -0,0 +1,178 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const FibreTxSimSessionName = "fibre-txsim" + +func fibreTxsimCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + instances int + concurrency int + blobSize int + interval time.Duration + duration time.Duration + keyPrefix string + download bool + uploadOnly bool + pyroscopeEndpoint string + onEncoders bool + ) + + cmd := &cobra.Command{ + Use: "fibre-txsim", + Short: "Start fibre-txsim on remote validators or encoder instances via SSH + tmux", + Long: "Starts fibre-txsim tmux sessions on remote validators or dedicated encoder instances. The fibre-txsim binary must already be deployed via 'talis deploy' (built by 'make build-talis-bins').", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + if onEncoders { + return startFibreTxsimOnEncoders(cfg, resolvedSSHKeyPath, instances, concurrency, blobSize, interval, duration, download, uploadOnly, pyroscopeEndpoint) + } + + // Legacy mode: run fibre-txsim on validators themselves + n := min(instances, len(cfg.Validators)) + validators := cfg.Validators[:n] + + // Build the remote command — binaries are copied to /bin/ by validator_init.sh + // OTEL_METRICS_EXEMPLAR_FILTER=always_on attaches trace exemplars to all metric observations + remoteCmd := fmt.Sprintf( + "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre-txsim --chain-id %s --grpc-endpoint localhost:9091 --keyring-dir .celestia-app --key-prefix %s --blob-size %d --concurrency %d --interval %s --duration %s --download=%t --upload-only=%t", + cfg.ChainID, + keyPrefix, + blobSize, + concurrency, + interval, + duration, + download, + uploadOnly, + ) + + // Auto-wire observability endpoints when observability nodes are configured + if len(cfg.Observability) > 0 { + remoteCmd += fmt.Sprintf(" --otel-endpoint http://%s:4318", cfg.Observability[0].PublicIP) + if pyroscopeEndpoint == "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint http://%s:4040", cfg.Observability[0].PublicIP) + } + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf("Starting fibre-txsim sessions on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, remoteCmd, FibreTxSimSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start remote sessions: %w", err) + } + + printFibreTxsimSummary(validators) + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().IntVar(&instances, "instances", 1, "number of instances to start fibre-txsim on") + cmd.Flags().IntVar(&concurrency, "concurrency", 1, "number of concurrent blob submissions per instance") + cmd.Flags().IntVar(&blobSize, "blob-size", 1000000, "size of each blob in bytes") + cmd.Flags().DurationVar(&interval, "interval", 0, "delay between blob submissions (0 = no delay)") + cmd.Flags().DurationVar(&duration, "duration", 0, "how long to run (0 = until killed)") + cmd.Flags().StringVar(&keyPrefix, "key-prefix", "fibre", "key name prefix in keyring (keys are named -0, -1, ...)") + cmd.Flags().BoolVar(&download, "download", false, "enable download verification after each successful upload (downloads blob back and compares with original data)") + cmd.Flags().BoolVar(&uploadOnly, "upload-only", false, "skip PFF transaction — only upload shards to validators without on-chain confirmation") + cmd.Flags().StringVar(&pyroscopeEndpoint, "pyroscope-endpoint", "", "Pyroscope endpoint for continuous profiling (default: auto-detected from observability config, e.g. http://host:4040)") + cmd.Flags().BoolVar(&onEncoders, "on-encoders", false, "run fibre-txsim on dedicated encoder instances instead of validators") + + return cmd +} + +// startFibreTxsimOnEncoders launches fibre-txsim on each encoder instance. +// Each encoder is mapped to a validator (round-robin) and uses a unique key +// prefix (enc0, enc1, ...) so that their escrow accounts are independent. +func startFibreTxsimOnEncoders(cfg Config, sshKeyPath string, instances, concurrency, blobSize int, interval, duration time.Duration, download, uploadOnly bool, pyroscopeEndpoint string) error { + if len(cfg.Encoders) == 0 { + return fmt.Errorf("no encoder instances found in config — add encoders via 'talis add -t encoder'") + } + + n := min(instances, len(cfg.Encoders)) + encoders := cfg.Encoders[:n] + + fmt.Printf("Starting fibre-txsim on %d encoder(s)...\n", len(encoders)) + + for _, enc := range encoders { + encIndex := extractIndexFromName(enc.Name) + // Round-robin map encoder → validator for gRPC endpoint + valIndex := encIndex % len(cfg.Validators) + grpcEndpoint := fmt.Sprintf("%s:9091", cfg.Validators[valIndex].PublicIP) + encKeyPrefix := fmt.Sprintf("enc%d", encIndex) + + remoteCmd := fmt.Sprintf( + // Encoders keep their per-encoder keyring under + // /root/encoder-payload//keyring-test/, never copied to + // the default ~/.celestia-app/keyring-test by the deploy step; + // point fibre-txsim at the right directory directly so it can + // load enc-* keys. + "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre-txsim --chain-id %s --grpc-endpoint %s --keyring-dir encoder-payload/%s --key-prefix %s --blob-size %d --concurrency %d --interval %s --duration %s --download=%t --upload-only=%t", + cfg.ChainID, + grpcEndpoint, + enc.Name, + encKeyPrefix, + blobSize, + concurrency, + interval, + duration, + download, + uploadOnly, + ) + + // Auto-wire observability endpoints + if len(cfg.Observability) > 0 { + remoteCmd += fmt.Sprintf(" --otel-endpoint http://%s:4318", cfg.Observability[0].PublicIP) + if pyroscopeEndpoint == "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint http://%s:4040", cfg.Observability[0].PublicIP) + } + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf(" encoder %s → validator %s (grpc=%s, keys=%s-*)\n", + enc.Name, cfg.Validators[valIndex].Name, grpcEndpoint, encKeyPrefix) + + if err := runScriptInTMux([]Instance{enc}, sshKeyPath, remoteCmd, FibreTxSimSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start fibre-txsim on encoder %s: %w", enc.Name, err) + } + } + + printFibreTxsimSummary(encoders) + return nil +} + +func printFibreTxsimSummary(instances []Instance) { + fmt.Println() + fmt.Println("=== fibre-txsim sessions started ===") + fmt.Printf(" tmux session: %s\n", FibreTxSimSessionName) + fmt.Printf(" log file: /root/talis-%s.log\n", FibreTxSimSessionName) + fmt.Println(" instances:") + for _, inst := range instances { + fmt.Printf(" - %s (%s)\n", inst.Name, inst.PublicIP) + } + fmt.Println() + fmt.Printf(" To kill all: talis kill-session -s %s\n", FibreTxSimSessionName) + fmt.Printf(" To view logs: ssh root@ 'cat /root/talis-%s.log'\n", FibreTxSimSessionName) +} diff --git a/tools/talis/genesis.go b/tools/talis/genesis.go new file mode 100644 index 0000000000..9a4f481a81 --- /dev/null +++ b/tools/talis/genesis.go @@ -0,0 +1,779 @@ +package main + +import ( + "fmt" + "log" + "os" + "path/filepath" + + "github.com/celestiaorg/celestia-app/v9/pkg/appconsts" + "github.com/celestiaorg/celestia-app/v9/test/util/genesis" + "github.com/spf13/cobra" +) + +const ( + chainIDFlag = "chainID" + rootDirFlag = "directory" +) + +// generateCmd is the Cobra command for creating the payload for the experiment. +func generateCmd() *cobra.Command { + var ( + rootDir string + chainID string // will overwrite that in the config + squareSize int + buildDirPath string + appBinaryPath string + nodeBinaryPath string + txsimBinaryPath string + latencyMonitorBinaryPath string + fibreBinaryPath string + fibreTxsimBinaryPath string + observabilityDirPath string + useMainnetStakingDistribution bool + fibreAccounts int + encoderFibreAccounts int + ) + cmd := &cobra.Command{ + Use: "genesis", + Short: "Create a genesis for the network.", + Long: "Create a genesis for the network along with everything else needed to start the network. Call this only after init and add.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if chainID != "" { + cfg = cfg.WithChainID(chainID) + } + + payloadDir := filepath.Join(rootDir, "payload") + + if err := os.RemoveAll(payloadDir); err != nil { + return fmt.Errorf("failed to remove old payload directory: %w", err) + } + if err := os.RemoveAll(filepath.Join(rootDir, "encoder-payload")); err != nil { + return fmt.Errorf("failed to remove old encoder-payload directory: %w", err) + } + + err = createPayload(cfg.Validators, cfg.Encoders, cfg.ChainID, payloadDir, squareSize, useMainnetStakingDistribution, fibreAccounts, encoderFibreAccounts) + if err != nil { + log.Fatalf("Failed to create payload: %v", err) + } + + srcAppConfig := filepath.Join(rootDir, "app.toml") + + for _, v := range cfg.Validators { + valDir := filepath.Join(payloadDir, v.Name) + // Note: per-validator config.toml is written by Network.InitNodes + // with the correct persistent_peers list. Don't overwrite it + // here — that would clobber the peer list and the chain comes + // up with zero peers. + + if err := copyFile(srcAppConfig, filepath.Join(valDir, "app.toml"), 0o755); err != nil { + return fmt.Errorf("failed to copy app.toml: %w", err) + } + } + + if err := copyDir(filepath.Join(rootDir, "scripts"), filepath.Join(rootDir, "payload")); err != nil { + return fmt.Errorf("failed to copy scripts: %w", err) + } + + buildDest := filepath.Join(payloadDir, "build") + if buildDirPath != "" { + info, err := os.Stat(buildDirPath) + if err != nil { + return fmt.Errorf("failed to stat build directory %q: %w", buildDirPath, err) + } + if !info.IsDir() { + return fmt.Errorf("build path %q is not a directory", buildDirPath) + } + if err := copyDir(buildDirPath, buildDest); err != nil { + return fmt.Errorf("failed to copy build directory: %w", err) + } + } else { + if err := copyFile(appBinaryPath, filepath.Join(buildDest, "celestia-appd"), 0o755); err != nil { + return fmt.Errorf("failed to copy app binary: %w", err) + } + + if err := copyFile(nodeBinaryPath, filepath.Join(buildDest, "celestia"), 0o755); err != nil { + log.Println("failed to copy celestia binary, bridge and light nodes will not be able to start") + } + + if err := copyFile(txsimBinaryPath, filepath.Join(buildDest, "txsim"), 0o755); err != nil { + return fmt.Errorf("failed to copy txsim binary: %w", err) + } + + // Copy latency monitor binary + if err := copyFile(latencyMonitorBinaryPath, filepath.Join(buildDest, "latency-monitor"), 0o755); err != nil { + log.Printf("failed to copy latency monitor binary: %v", err) + } + + // Copy fibre server binary + if err := copyFile(fibreBinaryPath, filepath.Join(buildDest, "fibre"), 0o755); err != nil { + log.Printf("failed to copy fibre binary: %v", err) + } + + // Copy fibre-txsim binary + if err := copyFile(fibreTxsimBinaryPath, filepath.Join(buildDest, "fibre-txsim"), 0o755); err != nil { + log.Printf("failed to copy fibre-txsim binary: %v", err) + } + } + + if err := writeAWSEnv(filepath.Join(payloadDir, "vars.sh"), cfg); err != nil { + return fmt.Errorf("failed to write aws env: %w", err) + } + + if err := stageObservabilityPayload(cfg, observabilityDirPath, payloadDir); err != nil { + return fmt.Errorf("failed to stage observability payload: %w", err) + } + + // Stage encoder payload: copy binaries, genesis, and vars to the + // encoder-payload directory so deploy can create a lightweight tar. + if len(cfg.Encoders) > 0 { + if err := stageEncoderPayload(rootDir, payloadDir, appBinaryPath, fibreTxsimBinaryPath, buildDirPath); err != nil { + return fmt.Errorf("failed to stage encoder payload: %w", err) + } + } + + // Stage bridge payload: celestia-node binary + genesis + init + // script. Each bridge points at validator-0's RPC for header + // sync; talis up has already populated cfg.Validators[0].PublicIP. + if len(cfg.Bridges) > 0 { + if len(cfg.Validators) == 0 { + return fmt.Errorf("bridges configured but no validators — bring up validators first") + } + if err := stageBridgePayload(rootDir, payloadDir, nodeBinaryPath, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage bridge payload: %w", err) + } + } + + // Stage ev-node payload: evnode binary + templated init script. + // ev-node needs the bridge JWT + a funded fibre keyring, both + // of which are scp'd in a separate `talis fibre-bootstrap-evnode` + // step (or by hand) — the init script polls for them and only + // starts the daemon once they exist. + if len(cfg.Evnodes) > 0 { + if len(cfg.Validators) == 0 { + return fmt.Errorf("evnodes configured but no validators — bring up validators first") + } + if len(cfg.Bridges) == 0 { + return fmt.Errorf("evnodes configured but no bridges — at least one bridge is required") + } + if err := stageEvnodePayload(rootDir, payloadDir, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage evnode payload: %w", err) + } + } + + // Stage loadgen payload: evnode-txsim binary + init script + // templated with evnode-0's HTTP endpoint as the target. + if len(cfg.Loadgens) > 0 { + if len(cfg.Evnodes) == 0 { + return fmt.Errorf("loadgens configured but no evnodes — at least one ev-node is required") + } + if err := stageLoadgenPayload(rootDir, payloadDir, buildDirPath, cfg); err != nil { + return fmt.Errorf("failed to stage loadgen payload: %w", err) + } + } + + return cfg.Save(rootDir) + }, + } + + gopath := os.Getenv("GOPATH") + if gopath == "" { + home, err := os.UserHomeDir() + if err != nil { + panic("failed to determine home dir: " + err.Error()) + } + gopath = filepath.Join(home, "go") + } + gopath = filepath.Join(gopath, "bin") + + cmd.Flags().StringVarP(&chainID, chainIDFlag, "c", "", "Override the chainID in the config") + cmd.Flags().StringVarP(&rootDir, rootDirFlag, "d", ".", "root directory in which to initialize (default is the current directory)") + cmd.Flags().IntVarP(&squareSize, "ods-size", "s", appconsts.SquareSizeUpperBound, "The size of the ODS for the network (make sure to also build a celestia-app binary with a greater SquareSizeUpperBound)") + cmd.Flags().StringVarP(&buildDirPath, "build-dir", "b", "", "directory containing binaries to include in the payload") + cmd.Flags().StringVarP(&appBinaryPath, "app-binary", "a", filepath.Join(gopath, "celestia-appd"), "app binary to include in the payload (assumes the binary is installed") + cmd.Flags().StringVarP(&nodeBinaryPath, "node-binary", "n", filepath.Join(gopath, "celestia"), "node binary to include in the payload (assumes the binary is installed") + cmd.Flags().StringVarP(&txsimBinaryPath, "txsim-binary", "t", filepath.Join(gopath, "txsim"), "txsim binary to include in the payload (assumes the binary is installed)") + cmd.Flags().StringVar(&latencyMonitorBinaryPath, "latency-monitor-binary", filepath.Join(gopath, "latency-monitor"), "latency monitor binary to include in the payload") + cmd.Flags().StringVar(&fibreBinaryPath, "fibre-binary", filepath.Join(gopath, "fibre"), "fibre server binary to include in the payload") + cmd.Flags().StringVar(&fibreTxsimBinaryPath, "fibre-txsim-binary", filepath.Join(gopath, "fibre-txsim"), "fibre-txsim binary to include in the payload") + cmd.Flags().StringVar(&observabilityDirPath, "observability-dir", "", "path to observability directory containing docker-compose, Prometheus config, and scripts (required if observability nodes are configured)") + cmd.Flags().BoolVarP(&useMainnetStakingDistribution, "mainnet-staking-distribution", "m", false, "replace the default uniform staking distribution with the actual mainnet distribution") + cmd.Flags().IntVar(&fibreAccounts, "fibre-accounts", 100, "number of pre-funded fibre accounts to create per validator") + cmd.Flags().IntVar(&encoderFibreAccounts, "encoder-fibre-accounts", 100, "number of pre-funded fibre accounts to create per encoder instance") + + return cmd +} + +// createPayload takes ips created by pulumi and the path to the payload directory +// to create the payload required for the experiment. +func createPayload(ips, encoders []Instance, chainID, ppath string, squareSize int, useMainnetDistribution bool, fibreAccounts, encoderFibreAccounts int, mods ...genesis.Modifier) error { + n, err := NewNetwork(chainID, squareSize, mods...) + if err != nil { + return err + } + + stake := int64(genesis.DefaultInitialBalance) / 2 + for index, info := range ips { + if useMainnetDistribution { + stake = getMainnetStake(index) + } + err = n.AddValidator( + info.Name, + info.PublicIP, + ppath, + info.Region, + stake, + fibreAccounts, + ) + if err != nil { + return err + } + } + + // Create encoder-payload directory and keyrings for each encoder. + // Encoder keyrings are stored in /../encoder-payload// + // so that a separate, lighter tar can be built during deploy. + encoderPayloadDir := filepath.Join(filepath.Dir(ppath), "encoder-payload") + if len(encoders) > 0 { + if err := os.MkdirAll(encoderPayloadDir, 0o755); err != nil { + return fmt.Errorf("failed to create encoder-payload dir: %w", err) + } + } + for _, enc := range encoders { + if err := n.AddEncoder(enc.Name, encoderPayloadDir, encoderFibreAccounts); err != nil { + return fmt.Errorf("failed to add encoder %s: %w", enc.Name, err) + } + } + + for _, val := range n.genesis.Validators() { + fmt.Println(val.Name, val.ConsensusKey.PubKey()) + } + + err = n.InitNodes(ppath) + if err != nil { + return err + } + + err = n.SaveAddressBook(ppath, n.Peers()) + if err != nil { + return err + } + + return nil +} + +// mainnetVotingPowers contains the current Celestia mainnet staking distribution for more realistic tests. +var mainnetVotingPowers []int + +func getMainnetStake(index int) int64 { + if index < 0 { + return 0 + } + if len(mainnetVotingPowers) == 0 { + // these figures reflect the exact staking values on 09/07/25. + mainnetVotingPowers = []int{ + 44706511, 44437002, 37932228, 37544929, 29421912, 27045838, 25722376, 25574864, 19573478, 17083572, + 14156979, 10990505, 10228508, 8017107, 7985256, 7465738, 7156557, 7000454, 6957695, 6816721, + 6497714, 6133878, 6061770, 6023778, 5837045, 5817421, 5788259, 5571126, 5504182, 5500773, + 5070168, 4672609, 4360060, 4326293, 3978439, 3894538, 3746172, 3608145, 3606324, 3606128, + 3600486, 3560552, 3538637, 3456887, 3449504, 3365860, 3330140, 3329077, 3242441, 3231836, + 3163103, 3162476, 3139329, 3132732, 3117200, 3071253, 3059325, 3043103, 3039694, 3038574, + 3038322, 3025332, 3025137, 3013047, 3011854, 3010337, 3004185, 3001607, 3000732, 3000592, + 3000433, 3000236, 3000215, 3000207, 3000142, 3000128, 3000126, 2689474, 2500012, 2329666, + 2242943, 2083890, 2038490, 1957574, 1619120, 1615290, 1482045, 1291544, 1286175, 1204480, + 1202416, 1156152, 1137365, 1101315, 1045017, 1000381, 977562, 948538, 820448, 445353, + } + } + if index >= len(mainnetVotingPowers) { + return int64(mainnetVotingPowers[len(mainnetVotingPowers)-1]) + } + return int64(mainnetVotingPowers[index]) +} + +// stageEncoderPayload copies the binaries (celestia-appd, fibre-txsim), genesis, +// vars.sh, and an encoder_init.sh script into the encoder-payload directory so +// that the deploy step can create a lightweight tar for encoder instances. +func stageEncoderPayload(rootDir, payloadDir, appBinaryPath, fibreTxsimBinaryPath, buildDirPath string) error { + encPayload := filepath.Join(rootDir, "encoder-payload") + + // Build directory with only the two binaries an encoder needs + encBuild := filepath.Join(encPayload, "build") + if err := os.MkdirAll(encBuild, 0o755); err != nil { + return err + } + + if buildDirPath != "" { + for _, name := range []string{"celestia-appd", "fibre-txsim"} { + src := filepath.Join(buildDirPath, name) + if err := copyFile(src, filepath.Join(encBuild, name), 0o755); err != nil { + return fmt.Errorf("copy %s from build dir: %w", name, err) + } + } + } else { + if err := copyFile(appBinaryPath, filepath.Join(encBuild, "celestia-appd"), 0o755); err != nil { + return fmt.Errorf("copy celestia-appd: %w", err) + } + if err := copyFile(fibreTxsimBinaryPath, filepath.Join(encBuild, "fibre-txsim"), 0o755); err != nil { + return fmt.Errorf("copy fibre-txsim: %w", err) + } + } + + // Copy genesis and vars.sh + if err := copyFile(filepath.Join(payloadDir, "genesis.json"), filepath.Join(encPayload, "genesis.json"), 0o644); err != nil { + return fmt.Errorf("copy genesis.json: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(encPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + // Write the encoder init script + return writeEncoderInitScript(filepath.Join(encPayload, "encoder_init.sh")) +} + +// stageBridgePayload copies the celestia-node binary, the consensus +// chain's genesis.json, and a templated bridge_init.sh into a +// bridge-payload directory. Deploy uses this to ship a lightweight tar +// to each bridge instance. The first validator's public IP is baked +// into the init script as core.ip — bridges follow validator-0 for +// header / block sync. With a multi-validator chain, validator-0 is a +// fine choice since headers come from consensus regardless. +func stageBridgePayload(rootDir, payloadDir, nodeBinaryPath, buildDirPath string, cfg Config) error { + bridgePayload := filepath.Join(rootDir, "bridge-payload") + + if err := os.RemoveAll(bridgePayload); err != nil { + return fmt.Errorf("clean old bridge-payload: %w", err) + } + + bridgeBuild := filepath.Join(bridgePayload, "build") + if err := os.MkdirAll(bridgeBuild, 0o755); err != nil { + return err + } + + // celestia-node's binary is named "celestia". --build-dir wins over + // the per-binary path so a single packed dir can drive validator + + // bridge + ev-node deploys. + if buildDirPath != "" { + src := filepath.Join(buildDirPath, "celestia") + if err := copyFile(src, filepath.Join(bridgeBuild, "celestia"), 0o755); err != nil { + return fmt.Errorf("copy celestia from build dir: %w", err) + } + } else { + if err := copyFile(nodeBinaryPath, filepath.Join(bridgeBuild, "celestia"), 0o755); err != nil { + return fmt.Errorf("copy celestia binary: %w", err) + } + } + + if err := copyFile(filepath.Join(payloadDir, "genesis.json"), filepath.Join(bridgePayload, "genesis.json"), 0o644); err != nil { + return fmt.Errorf("copy genesis.json: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(bridgePayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + coreIP := cfg.Validators[0].PublicIP + if coreIP == "" || coreIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP yet — run `talis up` before genesis") + } + + return writeBridgeInitScript(filepath.Join(bridgePayload, "bridge_init.sh"), coreIP) +} + +// writeBridgeInitScript writes the per-bridge init script. It runs +// `celestia bridge init`, points the bridge at validator-0's gRPC for +// state sync, generates an admin JWT (printed to a known file so +// downstream ev-node deploys can scp it), and starts the bridge in a +// detached tmux session. +// +// All values that change per-experiment are baked in at staging time. +// CHAIN_ID comes from sourced vars.sh; coreIP is templated literally +// since it's only known after talis up has populated config.json. +func writeBridgeInitScript(path string, coreIP string) error { + script := `#!/bin/bash +set -euo pipefail + +CELES_BRIDGE_HOME="$HOME/.celestia-bridge" +CORE_IP="` + coreIP + `" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +# TCP BBR — same tuning as validators / encoders. +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Install celestia-node binary +cp bridge-payload/build/celestia /bin/celestia +chmod +x /bin/celestia + +source bridge-payload/vars.sh +echo "Bridge bootstrap: chain_id=$CHAIN_ID core_ip=$CORE_IP" + +# Initialize node store. p2p.network is the chain id; celestia-node +# accepts it because vars.sh exported CELESTIA_CUSTOM=$CHAIN_ID, +# which registers the chain id as a custom network at startup. +if [ ! -f "$CELES_BRIDGE_HOME/config.toml" ]; then + celestia bridge init --p2p.network "$CHAIN_ID" --node.store "$CELES_BRIDGE_HOME" +fi + +# Drop the consensus chain's genesis next to the bridge config so +# anything that reads it (peer discovery, header validation) sees +# the same genesis as validators. +mkdir -p "$CELES_BRIDGE_HOME/config" +cp bridge-payload/genesis.json "$CELES_BRIDGE_HOME/genesis.json" + +# Generate the admin JWT and stash it where downstream consumers +# (ev-node deploy) can scp it. With CELESTIA_CUSTOM set, celestia +# prints a multi-line "WARNING: custom network..." banner to stdout +# alongside the token, so we grep for the JWT line specifically — +# otherwise downstream consumers send the warning text as the auth +# header and get a 401. +celestia bridge auth admin --node.store "$CELES_BRIDGE_HOME" 2>/dev/null \ + | grep -E '^eyJ' | tail -1 > /root/bridge-jwt.txt +echo "Wrote /root/bridge-jwt.txt ($(wc -c < /root/bridge-jwt.txt) bytes)" + +ufw allow 26658/tcp || true # RPC (admin API) +ufw allow 2121/tcp || true # P2P +ufw allow 2121/udp || true + +# Run in tmux so the SSH session can detach. RPC is exposed on +# 0.0.0.0:26658 (auth required via JWT). Core gRPC connection to +# validator-0 is plaintext for testnet. +tmux kill-session -t bridge 2>/dev/null || true +# tmux sessions inherit env from the tmux server, not the caller, so +# CELESTIA_CUSTOM has to be re-exported inside the inner command. +tmux new-session -d -s bridge "env CELESTIA_CUSTOM=${CHAIN_ID} celestia bridge start \ + --p2p.network ${CHAIN_ID} \ + --node.store ${CELES_BRIDGE_HOME} \ + --core.ip ${CORE_IP} \ + --core.port 9091 \ + --core.tls=false \ + --rpc.addr 0.0.0.0 \ + --rpc.port 26658 \ + --metrics 2>&1 | tee -a /root/bridge.log" + +echo "Bridge started in tmux session 'bridge' — attach with: tmux attach -t bridge" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// stageEvnodePayload copies the evnode-fibre binary + a templated init +// script into evnode-payload/ so the deploy step can build a small tar +// per ev-node. The init script poll-waits for /root/bridge-jwt.txt and +// /root/keyring-fibre/ to exist before starting — both are scp'd in by +// a separate bootstrap step (or manually) so that JWT + keyring don't +// need to be embedded in the payload. +func stageEvnodePayload(rootDir, payloadDir, buildDirPath string, cfg Config) error { + evPayload := filepath.Join(rootDir, "evnode-payload") + + if err := os.RemoveAll(evPayload); err != nil { + return fmt.Errorf("clean old evnode-payload: %w", err) + } + + evBuild := filepath.Join(evPayload, "build") + if err := os.MkdirAll(evBuild, 0o755); err != nil { + return err + } + + if buildDirPath == "" { + return fmt.Errorf("--build-dir is required when evnodes are configured (must contain `evnode` binary)") + } + src := filepath.Join(buildDirPath, "evnode") + if err := copyFile(src, filepath.Join(evBuild, "evnode"), 0o755); err != nil { + return fmt.Errorf("copy evnode from build dir: %w", err) + } + + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(evPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + bridgeIP := cfg.Bridges[0].PublicIP + coreIP := cfg.Validators[0].PublicIP + if bridgeIP == "" || bridgeIP == "TBD" { + return fmt.Errorf("bridge-0 has no public IP yet — run `talis up` before genesis") + } + if coreIP == "" || coreIP == "TBD" { + return fmt.Errorf("validator-0 has no public IP yet — run `talis up` before genesis") + } + + return writeEvnodeInitScript(filepath.Join(evPayload, "evnode_init.sh"), bridgeIP, coreIP) +} + +// writeEvnodeInitScript writes the evnode aggregator init script. +// Templated values: BRIDGE_IP (bridge-0 RPC for blob.Subscribe / Submit) +// and CORE_GRPC_ADDR (validator-0 gRPC for state queries via +// celestia-node's submit path). CHAIN_ID flows through vars.sh. +// +// The script does NOT copy bridge-jwt.txt or the fibre keyring itself — +// those must already exist on the box (manually scp'd or pushed by a +// future `talis fibre-bootstrap-evnode` command). The poll loop makes +// the script restartable: re-running deploy after copying the missing +// pieces will cleanly start the daemon. +func writeEvnodeInitScript(path string, bridgeIP, coreIP string) error { + script := `#!/bin/bash +set -euo pipefail + +EVNODE_HOME="$HOME/.evnode-fibre" +# celestia-node API client requires a URL scheme on the bridge addr. +BRIDGE_ADDR="http://` + bridgeIP + `:26658" +# celestia-app exposes Tendermint RPC services on :9090 and the +# cosmos.* / celestia.* state services on :9091. The fiber adapter's +# submit path queries cosmos.base.tendermint.v1beta1.Service so it +# needs the :9091 endpoint. +CORE_GRPC_ADDR="` + coreIP + `:9091" +BRIDGE_JWT_FILE="/root/bridge-jwt.txt" +FIBRE_KEYRING_DIR="/root/keyring-fibre" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +cp evnode-payload/build/evnode /bin/evnode +chmod +x /bin/evnode + +source evnode-payload/vars.sh +echo "evnode bootstrap: chain_id=$CHAIN_ID bridge=$BRIDGE_ADDR core=$CORE_GRPC_ADDR" + +mkdir -p "$EVNODE_HOME" + +# Wait for the operator-supplied dependencies. These come from a +# separate step (manual scp or 'talis fibre-bootstrap-evnode'): +# 1. /root/bridge-jwt.txt admin JWT from the bridge +# 2. /root/keyring-fibre/keyring-test cosmos-sdk file keyring with +# a Fibre payment account +# Without them the daemon would crash immediately on startup. +echo "Waiting for $BRIDGE_JWT_FILE and $FIBRE_KEYRING_DIR..." +WAITED=0 +until [ -s "$BRIDGE_JWT_FILE" ] && [ -d "$FIBRE_KEYRING_DIR/keyring-test" ]; do + sleep 5 + WAITED=$((WAITED + 5)) + if [ $((WAITED % 60)) -eq 0 ]; then + echo " still waiting after ${WAITED}s..." + fi +done +echo "Dependencies present after ${WAITED}s" + +ufw allow 7777/tcp || true # tx-ingest HTTP +ufw allow 7331/tcp || true # ev-node RPC +ufw allow 7676/tcp || true # libp2p (idle when Fiber on) + +# A passphrase file keeps the file-signer reproducible across restarts +# without baking creds into the script. +mkdir -p "$EVNODE_HOME/.signer" +if [ ! -f "$EVNODE_HOME/.signer/passphrase" ]; then + echo "evnode-fibre-passphrase" > "$EVNODE_HOME/.signer/passphrase" + chmod 600 "$EVNODE_HOME/.signer/passphrase" +fi + +tmux kill-session -t evnode 2>/dev/null || true +# CELESTIA_CUSTOM has to be present in the env that evnode runs under, +# not the caller's: tmux sessions inherit from the tmux server, not the +# shell that issues new-session. Without it, celestia-node refuses to +# accept --core-network=$CHAIN_ID since it's not in the hard-coded +# networksList. +tmux new-session -d -s evnode "env CELESTIA_CUSTOM=${CHAIN_ID} evnode \ + --home ${EVNODE_HOME} \ + --chain-id ${CHAIN_ID} \ + --bridge-addr ${BRIDGE_ADDR} \ + --bridge-token-file ${BRIDGE_JWT_FILE} \ + --core-grpc-addr ${CORE_GRPC_ADDR} \ + --core-network ${CHAIN_ID} \ + --keyring-path ${FIBRE_KEYRING_DIR} \ + --key-name fibre-0 \ + --signer-passphrase-file ${EVNODE_HOME}/.signer/passphrase \ + --log-level info \ + 2>&1 | tee -a /root/evnode.log" + +echo "ev-node started in tmux session 'evnode' — attach with: tmux attach -t evnode" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// stageLoadgenPayload stages the evnode-txsim binary + a templated +// init script for each load-gen instance. The script bursts traffic at +// evnode-0's HTTP /tx endpoint for a fixed duration (override via the +// TXSIM_DURATION / TXSIM_CONCURRENCY / TXSIM_TX_SIZE env vars on the +// box). Final TXSIM: line lands in /root/txsim.log. +func stageLoadgenPayload(rootDir, payloadDir, buildDirPath string, cfg Config) error { + lgPayload := filepath.Join(rootDir, "loadgen-payload") + + if err := os.RemoveAll(lgPayload); err != nil { + return fmt.Errorf("clean old loadgen-payload: %w", err) + } + lgBuild := filepath.Join(lgPayload, "build") + if err := os.MkdirAll(lgBuild, 0o755); err != nil { + return err + } + + if buildDirPath == "" { + return fmt.Errorf("--build-dir is required when loadgens are configured (must contain `evnode-txsim` binary)") + } + src := filepath.Join(buildDirPath, "evnode-txsim") + if err := copyFile(src, filepath.Join(lgBuild, "evnode-txsim"), 0o755); err != nil { + return fmt.Errorf("copy evnode-txsim from build dir: %w", err) + } + if err := copyFile(filepath.Join(payloadDir, "vars.sh"), filepath.Join(lgPayload, "vars.sh"), 0o755); err != nil { + return fmt.Errorf("copy vars.sh: %w", err) + } + + evnodeIP := cfg.Evnodes[0].PublicIP + if evnodeIP == "" || evnodeIP == "TBD" { + return fmt.Errorf("evnode-0 has no public IP yet — run `talis up` before genesis") + } + + return writeLoadgenInitScript(filepath.Join(lgPayload, "loadgen_init.sh"), evnodeIP) +} + +// writeLoadgenInitScript writes the per-loadgen init script. evnode-0's +// HTTP endpoint is templated literally because it's only known after +// `talis up`. Tunables (duration, concurrency, tx size) come through +// env vars at start time so a single deploy can drive multiple +// experiments via SSH-set environment. +func writeLoadgenInitScript(path string, evnodeIP string) error { + script := `#!/bin/bash +set -euo pipefail + +EVNODE_IP="` + evnodeIP + `" +TARGET="${TXSIM_TARGET:-http://${EVNODE_IP}:7777/tx}" +DURATION="${TXSIM_DURATION:-30s}" +CONCURRENCY="${TXSIM_CONCURRENCY:-8}" +TX_SIZE="${TXSIM_TX_SIZE:-10240}" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl chrony tmux --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +cp loadgen-payload/build/evnode-txsim /bin/evnode-txsim +chmod +x /bin/evnode-txsim + +source loadgen-payload/vars.sh +echo "loadgen bootstrap: target=$TARGET duration=$DURATION concurrency=$CONCURRENCY tx_size=$TX_SIZE chain_id=$CHAIN_ID" + +# Wait for ev-node's tx endpoint to come up (it will only start once +# bridge JWT + fibre keyring are scp'd in by the operator). +echo "Waiting for $TARGET to accept tx (testing /stats)..." +STATS_URL="${TARGET%/tx}/stats" +WAITED=0 +until curl --silent --max-time 2 --output /dev/null "$STATS_URL" 2>/dev/null; do + sleep 5 + WAITED=$((WAITED + 5)) + if [ $((WAITED % 60)) -eq 0 ]; then + echo " still waiting for ev-node after ${WAITED}s..." + fi +done +echo "ev-node reachable after ${WAITED}s; starting txsim run" + +tmux kill-session -t txsim 2>/dev/null || true +tmux new-session -d -s txsim "evnode-txsim \ + --target $TARGET \ + --duration $DURATION \ + --concurrency $CONCURRENCY \ + --tx-size $TX_SIZE \ + 2>&1 | tee -a /root/txsim.log" + +echo "txsim started in tmux session 'txsim' — attach with: tmux attach -t txsim" +echo "Final summary lands at /root/txsim.log; grep TXSIM: for the machine-parseable line" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +// writeEncoderInitScript creates a minimal init script for encoder instances. +// Encoders only need the fibre-txsim binary, celestia-appd (for escrow deposits), +// a keyring, and genesis. +func writeEncoderInitScript(path string) error { + script := `#!/bin/bash +set -euo pipefail + +CELES_HOME="$HOME/.celestia-app" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install curl jq chrony --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +systemctl enable chrony +systemctl start chrony + +# TCP BBR +modprobe tcp_bbr || true +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Install binaries +cp encoder-payload/build/celestia-appd /bin/celestia-appd +cp encoder-payload/build/fibre-txsim /bin/fibre-txsim + +source encoder-payload/vars.sh + +# Determine this encoder's directory from hostname (e.g. "encoder-0") +hostname=$(hostname) +parsed_hostname=$(echo "$hostname" | awk -F'-' '{print $1 "-" $2}') + +# Set up celestia-app home with keyring + genesis +rm -rf "$CELES_HOME" +mkdir -p "$CELES_HOME/config" +cp encoder-payload/genesis.json "$CELES_HOME/config/genesis.json" +cp -r "encoder-payload/$parsed_hostname/keyring-test" "$CELES_HOME/" + +echo "Encoder $parsed_hostname initialized" +` + return os.WriteFile(path, []byte(script), 0o755) +} + +func writeAWSEnv(varsPath string, cfg Config) error { + f, err := os.OpenFile(varsPath, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, + 0o755, + ) + if err != nil { + return fmt.Errorf("failed to open vars.sh for append: %w", err) + } + defer f.Close() + + exports := []string{ + fmt.Sprintf("export AWS_DEFAULT_REGION=%q\n", cfg.S3Config.Region), + fmt.Sprintf("export AWS_ACCESS_KEY_ID=%q\n", cfg.S3Config.AccessKeyID), + fmt.Sprintf("export AWS_SECRET_ACCESS_KEY=%q\n", cfg.S3Config.SecretAccessKey), + fmt.Sprintf("export AWS_S3_BUCKET=%q\n", cfg.S3Config.BucketName), + fmt.Sprintf("export AWS_S3_ENDPOINT=%q\n", cfg.S3Config.Endpoint), + fmt.Sprintf("export CHAIN_ID=%q\n", cfg.ChainID), + // celestia-node refuses any --p2p.network value that's not in + // its known networksList unless CELESTIA_CUSTOM registers a + // custom one. Format: :: + // — only netID is required. We use the chain id so bridge + + // evnode-fibre's Network identifier matches the consensus + // chain id and celestia-node's "wrong network in core.ip" + // validation passes. + fmt.Sprintf("export CELESTIA_CUSTOM=%q\n", cfg.ChainID), + } + + for _, line := range exports { + if _, err := f.WriteString(line); err != nil { + return fmt.Errorf("failed to append to vars.sh: %w", err) + } + } + + return nil +} diff --git a/tools/talis/go.mod b/tools/talis/go.mod new file mode 100644 index 0000000000..4478fce32e --- /dev/null +++ b/tools/talis/go.mod @@ -0,0 +1,289 @@ +module github.com/evstack/ev-node/tools/talis + +go 1.26.1 + +// Replace directives mirror celestia-app/feat/fibre-payments at +// fe8cb867. They are required because celestia-app's own go.mod uses +// celestia-forked SDK / cometbft / IBC modules, and Go's module system +// only honors `replace` directives in the main module — so any module +// that imports celestia-app must repeat them here. +replace ( + cosmossdk.io/api => github.com/celestiaorg/cosmos-sdk/api v0.7.6 + cosmossdk.io/log => github.com/celestiaorg/cosmos-sdk/log v1.3.0 + cosmossdk.io/store => github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1 + cosmossdk.io/x/tx => github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9 + cosmossdk.io/x/upgrade => github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 + github.com/cometbft/cometbft => github.com/celestiaorg/celestia-core v0.40.2 + github.com/cosmos/cosmos-sdk => github.com/celestiaorg/cosmos-sdk v0.52.3 + github.com/cosmos/ibc-go/v8 => github.com/celestiaorg/ibc-go/v8 v8.7.2 + github.com/cosmos/ledger-cosmos-go => github.com/cosmos/ledger-cosmos-go v0.16.0 + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.55.0-tm-v0.34.35 +) + +require ( + cloud.google.com/go/compute v1.60.0 + cosmossdk.io/math v1.5.3 + github.com/aws/aws-sdk-go-v2 v1.41.6 + github.com/aws/aws-sdk-go-v2/config v1.32.14 + github.com/aws/aws-sdk-go-v2/credentials v1.19.14 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 + github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e + github.com/celestiaorg/go-square/v4 v4.0.0-rc4 + github.com/cometbft/cometbft v1.0.1 + github.com/cosmos/cosmos-sdk v0.50.13 + github.com/digitalocean/godo v1.186.0 + github.com/joho/godotenv v1.5.1 + github.com/spf13/cobra v1.10.2 + github.com/spf13/viper v1.21.0 + github.com/stretchr/testify v1.11.1 + golang.org/x/oauth2 v0.36.0 + google.golang.org/api v0.276.0 +) + +require ( + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect + cloud.google.com/go/auth v0.20.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/storage v1.61.3 // indirect + cosmossdk.io/api v1.0.0 // indirect + cosmossdk.io/client/v2 v2.0.0-beta.8 // indirect + cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core v1.1.0 // indirect + cosmossdk.io/depinject v1.2.1 // indirect + cosmossdk.io/errors v1.0.2 // indirect + cosmossdk.io/log v1.6.0 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/circuit v0.1.1 // indirect + cosmossdk.io/x/evidence v0.1.1 // indirect + cosmossdk.io/x/feegrant v0.1.1 // indirect + cosmossdk.io/x/tx v0.13.8 // indirect + cosmossdk.io/x/upgrade v0.1.4 // indirect + filippo.io/edwards25519 v1.1.1 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect + github.com/RaduBerinde/axisds v0.1.0 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 // indirect + github.com/aws/smithy-go v1.25.0 // indirect + github.com/bcp-innovations/hyperlane-cosmos v1.1.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/celestiaorg/go-square/v2 v2.3.3 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/celestiaorg/nmt v0.24.3 // indirect + github.com/celestiaorg/rsmt2d v0.15.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/pebble/v2 v2.1.4 // indirect + github.com/cockroachdb/redact v1.1.6 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb // indirect + github.com/cometbft/cometbft-db v1.0.4 // indirect + github.com/consensys/gnark v0.14.0 // indirect + github.com/consensys/gnark-crypto v0.19.2 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-db v1.1.3 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogogateway v1.2.0 // indirect + github.com/cosmos/gogoproto v1.7.2 // indirect + github.com/cosmos/iavl v1.2.8 // indirect + github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 // indirect + github.com/cosmos/ibc-go/modules/capability v1.0.1 // indirect + github.com/cosmos/ibc-go/v8 v8.7.0 // indirect + github.com/cosmos/ics23/go v0.11.0 // indirect + github.com/cosmos/ledger-cosmos-go v0.15.0 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/desertbit/timer v1.0.1 // indirect + github.com/dgraph-io/badger/v4 v4.5.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect + github.com/ethereum/go-ethereum v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/getsentry/sentry-go v0.42.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.4 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/flatbuffers v25.1.24+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.21.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grafana/otel-profiling-go v0.5.1 // indirect + github.com/grafana/pyroscope-go v1.2.8 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.8.6 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-version v1.8.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/huandu/skiplist v1.2.1 // indirect + github.com/iancoleman/orderedmap v0.3.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.18.5 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.12.3 // indirect + github.com/linxGnu/grocksdb v1.9.8 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/minio/highwayhash v1.0.4 // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.3.0 // indirect + github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ronanh/intcomp v1.1.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/rs/zerolog v1.35.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sasha-s/go-deadlock v0.3.9 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/zondax/golem v0.27.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v1.0.1 // indirect + go.etcd.io/bbolt v1.4.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 // indirect + go.opentelemetry.io/otel/metric v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect + go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/term v0.42.0 // indirect + golang.org/x/text v0.36.0 // indirect + golang.org/x/time v0.15.0 // indirect + google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d // indirect + google.golang.org/grpc v1.80.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect + nhooyr.io/websocket v1.8.17 // indirect + pgregory.net/rapid v1.2.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/tools/talis/go.sum b/tools/talis/go.sum new file mode 100644 index 0000000000..27ab17b16b --- /dev/null +++ b/tools/talis/go.sum @@ -0,0 +1,1248 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.20.0 h1:kXTssoVb4azsVDoUiF8KvxAqrsQcQtB53DcSgta74CA= +cloud.google.com/go/auth v0.20.0/go.mod h1:942/yi/itH1SsmpyrbnTMDgGfdy2BUqIKyd0cyYLc5Q= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.60.0 h1:CqGt23ysz990ZZe1vq/9aDPKKnmwM6kcC7Y1Q05H2kI= +cloud.google.com/go/compute v1.60.0/go.mod h1:Xm6PbsLgBpAg4va77ljbBdpMjzuU+uPp5Ze2dnZq7lw= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= +cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg= +cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= +cosmossdk.io/client/v2 v2.0.0-beta.8 h1:RXMJdA4V9H1H3/3BfMD6dAW3lF8W9DpNPPYnKD+ArxY= +cosmossdk.io/client/v2 v2.0.0-beta.8/go.mod h1:x+E2eji+ToMtUIqKzoJ5mJIhat+Zak47xZ8jOYjJQBA= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v1.1.0 h1:iJ7j2DjNsFzg4/z4ImNQYzy2D4LfMCsaQ8Lrz1KCmxk= +cosmossdk.io/core v1.1.0/go.mod h1:qGmJxBFHobvG1k4bROQnueslotBU5MIKZLC57xVBYYI= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= +cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= +cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= +cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= +cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= +cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= +cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= +cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= +cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= +cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= +cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU= +github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI= +github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= +github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= +github.com/adlio/schema v1.4.0 h1:dekxG6P0my/bPvlyWzMULelR2Xej8RGErlnJcoY5ddw= +github.com/adlio/schema v1.4.0/go.mod h1:3/ojUldWBCWp4e+6VN9ets6unG5WdqbjF7vyzM0zTVQ= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.41.6 h1:1AX0AthnBQzMx1vbmir3Y4WsnJgiydmnJjiLu+LvXOg= +github.com/aws/aws-sdk-go-v2 v1.41.6/go.mod h1:dy0UzBIfwSeot4grGvY1AqFWN5zgziMmWGzysDnHFcQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9 h1:adBsCIIpLbLmYnkQU+nAChU5yhVTvu5PerROm+/Kq2A= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.9/go.mod h1:uOYhgfgThm/ZyAuJGNQ5YgNyOlYfqnGpTHXvk3cpykg= +github.com/aws/aws-sdk-go-v2/config v1.32.14 h1:opVIRo/ZbbI8OIqSOKmpFaY7IwfFUOCCXBsUpJOwDdI= +github.com/aws/aws-sdk-go-v2/config v1.32.14/go.mod h1:U4/V0uKxh0Tl5sxmCBZ3AecYny4UNlVmObYjKuuaiOo= +github.com/aws/aws-sdk-go-v2/credentials v1.19.14 h1:n+UcGWAIZHkXzYt87uMFBv/l8THYELoX6gVcUvgl6fI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.14/go.mod h1:cJKuyWB59Mqi0jM3nFYQRmnHVQIcgoxjEMAbLkpr62w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 h1:1hWFp+52Vq8Fevy/KUhbW/1MEApMz7uitCF/PQXRJpk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1/go.mod h1:sIec8j802/rCkCKgZV678HFR0s7lhQUYXT77tIvlaa4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22 h1:GmLa5Kw1ESqtFpXsx5MmC84QWa/ZrLZvlJGa2y+4kcQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.22/go.mod h1:6sW9iWm9DK9YRpRGga/qzrzNLgKpT2cIxb7Vo2eNOp0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22 h1:dY4kWZiSaXIzxnKlj17nHnBcXXBfac6UlsAx2qL6XrU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.22/go.mod h1:KIpEUx0JuRZLO7U6cbV204cWAEco2iC3l061IxlwLtI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23 h1:FPXsW9+gMuIeKmz7j6ENWcWtBGTe1kH8r9thNt5Uxx4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.23/go.mod h1:7J8iGMdRKk6lw2C+cMIphgAnT8uTwBwNOsGkyOCm80U= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1 h1:9nfacm+uWgbdPaOplvJjxN50qgthexb7GOR/97ygc5o= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.297.1/go.mod h1:E1pnYwWFZ8N3REmeN9Fe/Zipbpps4HJj8DQGNnLUMYc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8 h1:HtOTYcbVcGABLOVuPYaIihj6IlkqubBwFj10K5fxRek= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.8/go.mod h1:VsK9abqQeGlzPgUr+isNWzPlK2vKe9INMLWnY65f5Xs= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14 h1:xnvDEnw+pnj5mctWiYuFbigrEzSm35x7k4KS/ZkCANg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.14/go.mod h1:yS5rNogD8e0Wu9+l3MUwr6eENBzEeGejvINpN5PAYfY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22 h1:PUmZeJU6Y1Lbvt9WFuJ0ugUK2xn6hIWUBBbKuOWF30s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.22/go.mod h1:nO6egFBoAaoXze24a2C0NjQCvdpk8OueRoYimvEB9jo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22 h1:SE+aQ4DEqG53RRCAIHlCf//B2ycxGH7jFkpnAh/kKPM= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.22/go.mod h1:ES3ynECd7fYeJIL6+oax+uIEljmfps0S70BaQzbMd/o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1 h1:kU/eBN5+MWNo/LcbNa4hWDdN76hdcd7hocU5kvu7IsU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.99.1/go.mod h1:Fw9aqhJicIVee1VytBBjH+l+5ov6/PhbtIK/u3rt/ls= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 h1:lFd1+ZSEYJZYvv9d6kXzhkZu07si3f+GQ1AaYwa2LUM= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.15/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 h1:dzztQ1YmfPrxdrOiuZRMF6fuOwWlWpD2StNLTceKpys= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw= +github.com/aws/smithy-go v1.25.0 h1:Sz/XJ64rwuiKtB6j98nDIPyYrV1nVNJ4YU74gttcl5U= +github.com/aws/smithy-go v1.25.0/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0 h1:WXt+WrKv2DG/xVIkLvggDRbi/2law104Vj6AWZGxHNw= +github.com/bcp-innovations/hyperlane-cosmos v1.1.0/go.mod h1:NP59yKAk2qFaT7+FSCh7kkoKKLlTxXNdIlxMstAJ5no= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= +github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e h1:0UJZ/CA2iPKdXP/tAK1qG35NPVnuXl7iwBDLOpbt2As= +github.com/celestiaorg/celestia-app/v9 v9.0.0-20260427135040-fe8cb867259e/go.mod h1:HKqFxEeuWopDU87dCOkLktn6P4N+wEeVm1FdSEESvSQ= +github.com/celestiaorg/celestia-core v0.40.2 h1:+8D3anWx0mn0Wyp/Hahml/1ZiyDc5yGbIR/k4iFtqms= +github.com/celestiaorg/celestia-core v0.40.2/go.mod h1:ZCrmRE1UQzgZfho4Og6tAHtH1KY6s8Jpri5+EKobV5c= +github.com/celestiaorg/cosmos-sdk v0.52.3 h1:YPMFCycTw77P7tn+HQHTmmdBwXWNMDOrZ6/xVPK9nvM= +github.com/celestiaorg/cosmos-sdk v0.52.3/go.mod h1:2N4NRio08+WQsB7hsKo/ELXCQSWl78GiYdd9M1H6MpQ= +github.com/celestiaorg/cosmos-sdk/api v0.7.6 h1:81in9Zk+noz0ko+hZFSSK8L1aawFN8/CmdcQAUhbiUU= +github.com/celestiaorg/cosmos-sdk/api v0.7.6/go.mod h1:1BgQSufu6ZQkst3YBIHDCo/TPUrhfU4fV7tOI0ftql8= +github.com/celestiaorg/cosmos-sdk/log v1.3.0 h1:DfckA2UihWckeKHBQU3UXkF2G/qEmsPxd3LtGYB9HeM= +github.com/celestiaorg/cosmos-sdk/log v1.3.0/go.mod h1:lQTBplaW3HQLKQdPaQq+ElW6zASAoo9r3bJ7pOr8SWo= +github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1 h1:lEP9DjBMA5frZy/B1IYhAdbJrEwutwGQ+EiTOs4Lm8M= +github.com/celestiaorg/cosmos-sdk/store v1.1.3-celestia.1/go.mod h1:7+G078fe9GK42pXdYGncWm820tEJkzk+jc6K333Q7aI= +github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9 h1:YELTe9/1YksoqSd+Hm1uDZ6auHFNhyJrk5jvli0lbT4= +github.com/celestiaorg/cosmos-sdk/x/tx v0.13.9/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0 h1:GyDYfK8dLETlUI7F+w+3QYQgAszUegMXgB6cTbDm7CA= +github.com/celestiaorg/cosmos-sdk/x/upgrade v0.2.0/go.mod h1:T4K9O18zQNKNpt4YvTL3lcUt4aKOEU05ZIFWVdQi3Ak= +github.com/celestiaorg/go-square/v2 v2.3.3 h1:vhu6Lt39km19Q/Jk4nS3r2cuWJq6jFg+/1+iG8YGftY= +github.com/celestiaorg/go-square/v2 v2.3.3/go.mod h1:vY5RRv+qRmEVjPF6dAdr0dyLwKmTTDHHffENPQw8pUA= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4 h1:bh7rney5lLq4Z9OpaSg9ckY9bt6BZUW0VYnFOi1RPwQ= +github.com/celestiaorg/go-square/v4 v4.0.0-rc4/go.mod h1:7Vc4H3u3gvcfLFp84EqyMVT/9r0ZGUgZP4aYMOYXVsw= +github.com/celestiaorg/ibc-go/v8 v8.7.2 h1:AWae851fdX7pJWlGnUBKlKJzpr4c2t5m4TLs6vDfmAY= +github.com/celestiaorg/ibc-go/v8 v8.7.2/go.mod h1:E3WTax+cfyDIehNRpwEI96/0E8GBtU1g9XWr18qUGZ8= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.24.3 h1:ylQnRlXkVoTtq36CxtCyXYZX4JISBsHgKlAAUAnf7ig= +github.com/celestiaorg/nmt v0.24.3/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= +github.com/celestiaorg/rsmt2d v0.15.2 h1:wHqNqaBboSX5e8Czm4FnBnys4RPp5gSNm4CAcsXAyTU= +github.com/celestiaorg/rsmt2d v0.15.2/go.mod h1:1NyWG9hj7veHbLmpQUKg+77teLuVgq0kpv3FS9nEtL4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk= +github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8= +github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= +github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb h1:3bCgBvB8PbJVMX1ouCcSIxvsqKPYM7gs72o0zC76n9g= +github.com/cockroachdb/tokenbucket v0.0.0-20250429170803-42689b6311bb/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cometbft/cometbft-db v1.0.4 h1:cezb8yx/ZWcF124wqUtAFjAuDksS1y1yXedvtprUFxs= +github.com/cometbft/cometbft-db v1.0.4/go.mod h1:M+BtHAGU2XLrpUxo3Nn1nOCcnVCiLM9yx5OuT0u5SCA= +github.com/consensys/gnark v0.14.0 h1:RG+8WxRanFSFBSlmCDRJnYMYYKpH3Ncs5SMzg24B5HQ= +github.com/consensys/gnark v0.14.0/go.mod h1:1IBpDPB/Rdyh55bQRR4b0z1WvfHQN1e0020jCvKP2Gk= +github.com/consensys/gnark-crypto v0.19.2 h1:qrEAIXq3T4egxqiliFFoNrepkIWVEeIYwt3UL0fvS80= +github.com/consensys/gnark-crypto v0.19.2/go.mod h1:rT23F0XSZqE0mUA0+pRtnL56IbPxs6gp4CeRsBk4XS0= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gogoproto v1.7.2 h1:5G25McIraOC0mRFv9TVO139Uh3OklV2hczr13KKVHCA= +github.com/cosmos/gogoproto v1.7.2/go.mod h1:8S7w53P1Y1cHwND64o0BnArT6RmdgIvsBuco6uTllsk= +github.com/cosmos/iavl v1.2.8 h1:55F96BGUJ7KT7h+Ky/cEqS+pEvhFqsU4O8Th3F0N1js= +github.com/cosmos/iavl v1.2.8/go.mod h1:FRHN4tO+6crf0p2zsqye+nAbsMgiwdkxpWm18DyP6+Y= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0 h1:rM+S14DFiqmu6Rc3PuhvWqwywPsnt/CbIslSnBftPFs= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v8 v8.2.0/go.mod h1:O5H9Ic3Pe6cmJn1eqlj5N48sLb8WQ1VWmDP4/11g/4E= +github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= +github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= +github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= +github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= +github.com/cosmos/ledger-cosmos-go v0.16.0 h1:YKlWPG9NnGZIEUb2bEfZ6zhON1CHlNTg0QKRRGcNEd0= +github.com/cosmos/ledger-cosmos-go v0.16.0/go.mod h1:WrM2xEa8koYoH2DgeIuZXNarF7FGuZl3mrIOnp3Dp0o= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= +github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= +github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/godo v1.186.0 h1:aEYwSumR47vD1tX5mdPdznHrR72DBfHcmh0v9MxCwCw= +github.com/digitalocean/godo v1.186.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/ethereum/go-ethereum v1.17.0 h1:2D+1Fe23CwZ5tQoAS5DfwKFNI1HGcTwi65/kRlAVxes= +github.com/ethereum/go-ethereum v1.17.0/go.mod h1:2W3msvdosS/MCWytpqTcqgFiRYbTH59FxDJzqah120o= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/getsentry/sentry-go v0.42.0 h1:eeFMACuZTbUQf90RE8dE4tXeSe4CZyfvR1MBL7RLEt8= +github.com/getsentry/sentry-go v0.42.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= +github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.1 h1:4hvbpePJKnIzH1B+8OR/JPbTx37NktoI9LE2QZBBkvE= +github.com/go-logfmt/logfmt v0.6.1/go.mod h1:EV2pOAQoZaT1ZXZbqDl5hrymndi4SY9ED9/z6CO0XAk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o= +github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.21.0 h1:h45NjjzEO3faG9Lg/cFrBh2PgegVVgzqKzuZl/wMbiI= +github.com/googleapis/gax-go/v2 v2.21.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= +github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/pyroscope-go v1.2.8 h1:UvCwIhlx9DeV7F6TW/z8q1Mi4PIm3vuUJ2ZlCEvmA4M= +github.com/grafana/pyroscope-go v1.2.8/go.mod h1:SSi59eQ1/zmKoY/BKwa5rSFsJaq+242Bcrr4wPix1g8= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= +github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72 h1:vTCWu1wbdYo7PEZFem/rlr01+Un+wwVmI7wiegFdRLk= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.72/go.mod h1:Vn+BBgKQHVQYdVQ4NZDICE1Brb+JfaONyDHr3q07oQc= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.8.6 h1:9sQboWULaydVphxc4S64oAI4YqpuCk7nPmvbk131ebY= +github.com/hashicorp/go-getter v1.8.6/go.mod h1:nVH12eOV2P58dIiL3rsU6Fh3wLeJEKBOJzhMmzlSWoo= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 h1:B+aWVgAx+GlFLhtYjIaF0uGjU3rzpl99Wf9wZWt+Mq8= +github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2/go.mod h1:CH/cwcr21pPWH+9GtK/PFaa4OGTv4CtfkCKro6GpbRE= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a h1:aP94idRf0yhG07gBSIyW3sy/cd+XNLWnghSp11y0oIc= +github.com/klauspost/reedsolomon v1.13.4-0.20260420101718-f7e5efe6123a/go.mod h1:yjqqjgMTQkBUHSG97/rm4zipffCNbCiZcB3kTqr++sQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.12.3 h1:tTWxr2YLKwIvK90ZXEw8GP7UFHtcbTtty8zsI+YjrfQ= +github.com/lib/pq v1.12.3/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= +github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/highwayhash v1.0.4 h1:asJizugGgchQod2ja9NJlGOWq4s7KsAWr5XUc9Clgl4= +github.com/minio/highwayhash v1.0.4/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= +github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= +github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE= +github.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/ronanh/intcomp v1.1.1 h1:+1bGV/wEBiHI0FvzS7RHgzqOpfbBJzLIxkqMJ9e6yxY= +github.com/ronanh/intcomp v1.1.1/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.35.0 h1:VD0ykx7HMiMJytqINBsKcbLS+BJ4WYjz+05us+LRTdI= +github.com/rs/zerolog v1.35.0/go.mod h1:EjML9kdfa/RMA7h/6z6pYmq1ykOuA8/mjWaEvGI+jcw= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.3.9 h1:fiaT9rB7g5sr5ddNZvlwheclN9IP86eFW9WgqlEQV+w= +github.com/sasha-s/go-deadlock v0.3.9/go.mod h1:KuZj51ZFmx42q/mPaYbRk0P1xcwe697zsJKE03vD4/Y= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zondax/golem v0.27.0 h1:IbBjGIXF3SoGOZHsILJvIM/F/ylwJzMcHAcggiqniPw= +github.com/zondax/golem v0.27.0/go.mod h1:AmorCgJPt00L8xN1VrMBe13PSifoZksnQ1Ge906bu4A= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v1.0.1 h1:Ks/2tz/dOF+dbRynfZ0dEhcdL1lqw43Sa0zMXHpQ3aQ= +github.com/zondax/ledger-go v1.0.1/go.mod h1:j7IgMY39f30apthJYMd1YsHZRqdyu4KbVmUp0nU78X0= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0 h1:0Qx7VGBacMm9ZENQ7TnNObTYI4ShC+lHI16seduaxZo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.68.0/go.mod h1:Sje3i3MjSPKTSPvVWCaL8ugBzJwik3u4smCjUeuupqg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 h1:OyrsyzuttWTSur2qN/Lm0m2a8yqyIjUVBZcxFPuXq2o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0/go.mod h1:C2NGBr+kAB4bk3xtMXfZ94gqFDtg/GkI7e9zqGh5Beg= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.276.0 h1:nVArUtfLEihtW+b0DdcqRGK1xoEm2+ltAihyztq7MKY= +google.golang.org/api v0.276.0/go.mod h1:Fnag/EWUPIcJXuIkP1pjoTgS5vdxlk3eeemL7Do6bvw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= +google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d h1:wT2n40TBqFY6wiwazVK9/iTWbsQrgk5ZfCSVFLO9LQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260406210006-6f92a3bedf2d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= +google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/tools/talis/google_cloud.go b/tools/talis/google_cloud.go new file mode 100644 index 0000000000..943089b2fa --- /dev/null +++ b/tools/talis/google_cloud.go @@ -0,0 +1,825 @@ +package main + +import ( + "context" + "errors" + "fmt" + "log" + "math/rand" + "os" + "strings" + "sync" + "time" + + compute "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" + "google.golang.org/api/option" +) + +const ( + GCDefaultValidatorMachineType = "c3d-highcpu-16" + GCDefaultEncoderMachineType = "c3d-highcpu-8" + GCDefaultBridgeMachineType = "c3d-highcpu-8" + GCDefaultEvnodeMachineType = "c3d-highcpu-8" + GCDefaultLoadgenMachineType = "c3d-highcpu-8" + GCDefaultObservabilityMachineType = "e2-medium" + GCDefaultImage = "projects/ubuntu-os-cloud/global/images/family/ubuntu-2404-lts-amd64" + GCDefaultDiskSizeGB = 400 +) + +var ( + protoTCP = "tcp" + protoUDP = "udp" + protoICMP = "icmp" + dirIngress = computepb.Firewall_INGRESS.String() + boolTrue = true + externalNAT = "External NAT" + natType = computepb.AccessConfig_ONE_TO_ONE_NAT.String() + sshKeysLabel = "ssh-keys" + diskSizeGB = int64(GCDefaultDiskSizeGB) + + GCRegions = []string{ + "us-central1", "us-east1", "us-east4", "asia-southeast1", "europe-west1", "asia-east1", + } + GCZones = map[string][]string{ + "us-central1": {"us-central1-a", "us-central1-b", "us-central1-c"}, + "us-east1": {"us-east1-b", "us-east1-c", "us-east1-d"}, + "us-east4": {"us-east4-a", "us-east4-b", "us-east4-c"}, + "asia-southeast1": {"asia-southeast1-a", "asia-southeast1-b", "asia-southeast1-c"}, + "europe-west1": {"europe-west1-b", "europe-west1-c", "europe-west1-d"}, + "asia-east1": {"asia-east1-a", "asia-east1-b", "asia-east1-c"}, + } +) + +type GCClient struct { + ClientInfo + project string +} + +func NewGCClient(cfg Config) (*GCClient, error) { + if cfg.GoogleCloudProject == "" { + return nil, errors.New("google cloud project is required") + } + + sshKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read SSH public key at: %s %w", cfg.SSHPubKeyPath, err) + } + + return &GCClient{ + ClientInfo: ClientInfo{ + sshKey: sshKey, + cfg: cfg, + }, + project: cfg.GoogleCloudProject, + }, nil +} + +func (c *GCClient) Up(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != GoogleCloud { + continue + } + + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomGCRegion() + } + + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to create") + } + + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + insts, err = CreateGCInstances(ctx, c.project, insts, string(c.sshKey), opts, workers) + if err != nil { + return fmt.Errorf("failed to create instances: %w", err) + } + + for _, inst := range insts { + cfg, err := c.cfg.UpdateInstance(inst.Name, inst.PublicIP, inst.PrivateIP) + if err != nil { + return fmt.Errorf("failed to update config with instance %s: %w", inst.Name, err) + } + c.cfg = cfg + } + + return nil +} + +func (c *GCClient) Down(ctx context.Context, workers int) error { + insts := make([]Instance, 0) + allInstances := append(append(append(append(append(c.cfg.Validators, c.cfg.Observability...), c.cfg.Encoders...), c.cfg.Bridges...), c.cfg.Evnodes...), c.cfg.Loadgens...) + for _, v := range allInstances { + if v.Provider != GoogleCloud { + continue + } + if v.Region == "" || v.Region == RandomRegion { + v.Region = RandomGCRegion() + } + insts = append(insts, v) + } + + if len(insts) == 0 { + return fmt.Errorf("no instances to destroy") + } + + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + _, err = DestroyGCInstances(ctx, c.project, insts, opts, workers) + return err +} + +func (c *GCClient) List(ctx context.Context) error { + opts, err := gcClientOptions(c.cfg) + if err != nil { + return fmt.Errorf("failed to create client options: %w", err) + } + + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + cnt := 0 + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: c.project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + publicIP := "" + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + publicIP = *ni.AccessConfigs[0].NatIP + } + } + + if cnt == 0 { + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "Name", "Status", "Zone", "Public IP", "Created") + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", "----", "------", "------", "---------", "-------") + } + + status := "UNKNOWN" + if instance.Status != nil { + status = *instance.Status + } + name := "" + if instance.Name != nil { + name = *instance.Name + } + created := "" + if instance.CreationTimestamp != nil { + created = *instance.CreationTimestamp + } + + fmt.Printf("%-30s %-10s %-15s %-15s %s\n", + name, + status, + zone, + publicIP, + created) + cnt++ + } + } + } + } + } + + fmt.Println("Total number of talis instances:", cnt) + return nil +} + +func (c *GCClient) GetConfig() Config { + return c.cfg +} + +func NewGoogleCloudValidator(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Validator) + i.Provider = GoogleCloud + i.Slug = GCDefaultValidatorMachineType + i.Region = region + return i +} + +func NewGoogleCloudEncoder(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Encoder) + i.Provider = GoogleCloud + i.Slug = GCDefaultEncoderMachineType + i.Region = region + return i +} + +func NewGoogleCloudBridge(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Bridge) + i.Provider = GoogleCloud + i.Slug = GCDefaultBridgeMachineType + i.Region = region + return i +} + +func NewGoogleCloudEvnode(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Evnode) + i.Provider = GoogleCloud + i.Slug = GCDefaultEvnodeMachineType + i.Region = region + return i +} + +func NewGoogleCloudLoadgen(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Loadgen) + i.Provider = GoogleCloud + i.Slug = GCDefaultLoadgenMachineType + i.Region = region + return i +} + +func NewGoogleCloudObservability(region string) Instance { + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + i := NewBaseInstance(Observability) + i.Provider = GoogleCloud + i.Slug = GCDefaultObservabilityMachineType + i.Region = region + return i +} + +func RandomGCRegion() string { + return GCRegions[rand.Intn(len(GCRegions))] +} + +func gcClientOptions(cfg Config) ([]option.ClientOption, error) { + var opts []option.ClientOption + if cfg.GoogleCloudKeyJSONPath != "" { + keyJSON, err := os.ReadFile(cfg.GoogleCloudKeyJSONPath) + if err != nil { + return nil, fmt.Errorf("failed to read Google Cloud key file at %s: %w", cfg.GoogleCloudKeyJSONPath, err) + } + opts = append(opts, option.WithAuthCredentialsJSON(option.ServiceAccount, keyJSON)) + } + return opts, nil +} + +func RandomGCZone(region string) string { + zones, ok := GCZones[region] + if !ok || len(zones) == 0 { + return region + "-a" + } + return zones[rand.Intn(len(zones))] +} + +func ensureGCFirewallRule(ctx context.Context, project string, opts []option.ClientOption) error { + client, err := compute.NewFirewallsRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create firewall client: %w", err) + } + defer client.Close() + + firewallName := "talis-allow-all-ports" + + // Check if firewall rule already exists + getReq := &computepb.GetFirewallRequest{ + Project: project, + Firewall: firewallName, + } + _, err = client.Get(ctx, getReq) + if err == nil { + // Firewall rule already exists + log.Println("Firewall rule", firewallName, "already exists") + return nil + } + + // Create firewall rule to allow all incoming traffic + log.Println("Creating firewall rule", firewallName, "to allow all incoming traffic") + + firewall := &computepb.Firewall{ + Name: &firewallName, + Allowed: []*computepb.Allowed{ + { + IPProtocol: &protoTCP, + Ports: []string{"0-65535"}, + }, + { + IPProtocol: &protoUDP, + Ports: []string{"0-65535"}, + }, + { + IPProtocol: &protoICMP, + }, + }, + Direction: &dirIngress, + SourceRanges: []string{"0.0.0.0/0"}, + TargetTags: []string{"talis-allow-all"}, + } + + insertReq := &computepb.InsertFirewallRequest{ + Project: project, + FirewallResource: firewall, + } + + op, err := client.Insert(ctx, insertReq) + if err != nil { + return fmt.Errorf("failed to insert firewall rule: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return fmt.Errorf("failed to wait for firewall rule creation: %w", err) + } + + log.Println("Firewall rule", firewallName, "created successfully") + return nil +} + +func CreateGCInstances(ctx context.Context, project string, insts []Instance, sshKey string, opts []option.ClientOption, workers int) ([]Instance, error) { + total := len(insts) + + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + insts, existing, err := filterExistingGCInstances(ctx, project, insts, opts) + if err != nil { + return nil, err + } + + if len(existing) > 0 { + log.Println("Existing instances found, so they are not being created.") + for _, v := range existing { + log.Println("Skipping", v.Name, v.PublicIP, v.Tags) + } + } + + // Ensure a firewall rule exists to allow all ports + if err := ensureGCFirewallRule(ctx, project, opts); err != nil { + return nil, fmt.Errorf("failed to ensure firewall rule: %w", err) + } + + results := make(chan result, total) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, v := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + + ctx, cancel := context.WithTimeout(ctx, 7*time.Minute) + defer cancel() + + start := time.Now() + log.Println("Creating instance", inst.Name, "in region", inst.Region, start.Format(time.RFC3339)) + + zone := RandomGCZone(inst.Region) + pubIP, privIP, err := createGCInstance(ctx, project, inst, zone, sshKey, opts) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("create %s: %w", inst.Name, err)} + return + } + + inst.PublicIP = pubIP + inst.PrivateIP = privIP + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(v) + } + + go func() { + wg.Wait() + close(results) + }() + + var created []Instance + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed after %v %v\n", res.inst.Name, res.timeRequired, res.err) + } else { + created = append(created, res.inst) + fmt.Printf("✅ %s is up (public=%s) in %v\n", + res.inst.Name, res.inst.PublicIP, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(created), total) + } + + return created, nil +} + +func createGCInstance(ctx context.Context, project string, inst Instance, zone string, sshKey string, opts []option.ClientOption) (string, string, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return "", "", fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + labels := make(map[string]string) + for _, tag := range inst.Tags { + labels[strings.ReplaceAll(tag, "-", "_")] = "true" + } + + username := "root" + sshKeyMetadata := fmt.Sprintf("%s:%s", username, strings.TrimSpace(sshKey)) + + machineType := fmt.Sprintf("zones/%s/machineTypes/%s", zone, inst.Slug) + sourceImage := GCDefaultImage + + req := &computepb.InsertInstanceRequest{ + Project: project, + Zone: zone, + InstanceResource: &computepb.Instance{ + Name: &inst.Name, + MachineType: &machineType, + Labels: labels, + Tags: &computepb.Tags{ + Items: []string{"talis-allow-all"}, + }, + Disks: []*computepb.AttachedDisk{ + { + Boot: &boolTrue, + AutoDelete: &boolTrue, + InitializeParams: &computepb.AttachedDiskInitializeParams{ + SourceImage: &sourceImage, + DiskSizeGb: &diskSizeGB, + }, + }, + }, + NetworkInterfaces: []*computepb.NetworkInterface{ + { + AccessConfigs: []*computepb.AccessConfig{ + { + Name: &externalNAT, + Type: &natType, + }, + }, + }, + }, + Metadata: &computepb.Metadata{ + Items: []*computepb.Items{ + { + Key: &sshKeysLabel, + Value: &sshKeyMetadata, + }, + }, + }, + }, + } + + op, err := client.Insert(ctx, req) + if err != nil { + return "", "", fmt.Errorf("failed to insert instance: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return "", "", fmt.Errorf("failed to wait for instance creation: %w", err) + } + + pubIP, privIP, err := waitForGCNetworkIP(ctx, client, project, zone, inst.Name) + if err != nil { + return "", "", fmt.Errorf("failed to get IPs: %w", err) + } + + return pubIP, privIP, nil +} + +func waitForGCNetworkIP(ctx context.Context, client *compute.InstancesClient, project, zone, name string) (string, string, error) { + ticker := time.NewTicker(4 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return "", "", ctx.Err() + case <-ticker.C: + req := &computepb.GetInstanceRequest{ + Project: project, + Zone: zone, + Instance: name, + } + instance, err := client.Get(ctx, req) + if err != nil { + return "", "", err + } + + var pubIP, privIP string + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if ni.NetworkIP != nil { + privIP = *ni.NetworkIP + } + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + pubIP = *ni.AccessConfigs[0].NatIP + } + } + + if pubIP != "" && privIP != "" { + return pubIP, privIP, nil + } + } + } +} + +func filterExistingGCInstances(ctx context.Context, project string, insts []Instance, opts []option.ClientOption) ([]Instance, []Instance, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return nil, nil, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + existingTags := make(map[string]bool) + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + for label := range instance.Labels { + existingTags[strings.ReplaceAll(label, "_", "-")] = true + } + } + } + } + } + + var newInsts, existing []Instance + for _, inst := range insts { + experimentTag := GetExperimentTag(inst.Tags) + if experimentTag == "" || !existingTags[experimentTag] { + newInsts = append(newInsts, inst) + } else { + existing = append(existing, inst) + } + } + + return newInsts, existing, nil +} + +func DestroyGCInstances(ctx context.Context, project string, insts []Instance, opts []option.ClientOption, workers int) ([]Instance, error) { + return destroyGCInstancesInternal(ctx, project, insts, opts, workers) +} + +func findGCInstanceZone(ctx context.Context, project, instanceName, region string, opts []option.ClientOption) (string, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return "", fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + zones := GCZones[region] + if len(zones) == 0 { + zones = []string{region + "-a", region + "-b", region + "-c"} + } + + for _, zone := range zones { + req := &computepb.GetInstanceRequest{ + Project: project, + Zone: zone, + Instance: instanceName, + } + _, err := client.Get(ctx, req) + if err == nil { + return zone, nil + } + } + + return "", fmt.Errorf("instance %s not found in any zone of region %s", instanceName, region) +} + +func deleteGCInstance(ctx context.Context, project, zone, name string, opts []option.ClientOption) error { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + req := &computepb.DeleteInstanceRequest{ + Project: project, + Zone: zone, + Instance: name, + } + + op, err := client.Delete(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete instance: %w", err) + } + + if err := op.Wait(ctx); err != nil { + return fmt.Errorf("failed to wait for deletion: %w", err) + } + + return nil +} + +func checkForRunningGCExperiments(ctx context.Context, project string, opts []option.ClientOption, experimentID, chainID string) (bool, error) { + if project == "" { + return false, nil + } + + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return false, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + for label := range instance.Labels { + if hasGCExperimentLabel(label, experimentID, chainID) { + return true, nil + } + } + } + } + } + } + } + + return false, nil +} + +func hasGCExperimentLabel(label, experimentID, chainID string) bool { + if !strings.HasPrefix(label, "validator_") && !strings.HasPrefix(label, "bridge_") && !strings.HasPrefix(label, "light_") && !strings.HasPrefix(label, "encoder_") { + return false + } + experimentIDLabel := strings.ReplaceAll(experimentID, "-", "_") + chainIDLabel := strings.ReplaceAll(chainID, "-", "_") + return strings.Contains(label, experimentIDLabel) && strings.Contains(label, chainIDLabel) +} + +func destroyAllTalisGCInstances(ctx context.Context, project string, opts []option.ClientOption, workers int) ([]Instance, error) { + client, err := compute.NewInstancesRESTClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create compute client: %w", err) + } + defer client.Close() + + var talisInstances []Instance + for _, region := range GCRegions { + zones := GCZones[region] + for _, zone := range zones { + req := &computepb.ListInstancesRequest{ + Project: project, + Zone: zone, + } + it := client.List(ctx, req) + for { + instance, err := it.Next() + if err != nil { + break + } + if instance.Labels != nil { + if _, hasTalis := instance.Labels["talis"]; hasTalis { + publicIP := "" + if len(instance.NetworkInterfaces) > 0 { + ni := instance.NetworkInterfaces[0] + if len(ni.AccessConfigs) > 0 && ni.AccessConfigs[0].NatIP != nil { + publicIP = *ni.AccessConfigs[0].NatIP + } + } + name := "" + if instance.Name != nil { + name = *instance.Name + } + talisInstances = append(talisInstances, Instance{ + Name: name, + PublicIP: publicIP, + Region: region, + }) + } + } + } + } + } + + if len(talisInstances) == 0 { + log.Println("No talis instances found to destroy") + return nil, nil + } + + return destroyGCInstancesInternal(ctx, project, talisInstances, opts, workers) +} + +func destroyGCInstancesInternal(ctx context.Context, project string, insts []Instance, opts []option.ClientOption, workers int) ([]Instance, error) { + type result struct { + inst Instance + err error + timeRequired time.Duration + } + + results := make(chan result, len(insts)) + workerChan := make(chan struct{}, workers) + var wg sync.WaitGroup + wg.Add(len(insts)) + + for _, inst := range insts { + go func(inst Instance) { + workerChan <- struct{}{} + defer func() { + <-workerChan + wg.Done() + }() + start := time.Now() + + fmt.Println("⏳ Deleting instance", inst.Name, inst.PublicIP) + + delCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + zone, err := findGCInstanceZone(delCtx, project, inst.Name, inst.Region, opts) + if err != nil { + results <- result{inst: inst, err: fmt.Errorf("find zone for %s: %w", inst.Name, err)} + return + } + + if err := deleteGCInstance(delCtx, project, zone, inst.Name, opts); err != nil { + results <- result{inst: inst, err: fmt.Errorf("delete %s: %w", inst.Name, err)} + return + } + + results <- result{inst: inst, err: nil, timeRequired: time.Since(start)} + }(inst) + } + + go func() { + wg.Wait() + close(results) + }() + + var removed []Instance + var failed []result + for res := range results { + if res.err != nil { + fmt.Printf("❌ %s failed to delete after %v: %v\n", + res.inst.Name, res.timeRequired, res.err) + failed = append(failed, res) + } else { + removed = append(removed, res.inst) + fmt.Printf("✅ %s deleted (took %v)\n", res.inst.Name, res.timeRequired) + } + fmt.Printf("---- Progress: %d/%d\n", len(removed)+len(failed), len(insts)) + } + + return removed, nil +} diff --git a/tools/talis/init.go b/tools/talis/init.go new file mode 100644 index 0000000000..96b34a1d45 --- /dev/null +++ b/tools/talis/init.go @@ -0,0 +1,314 @@ +package main + +import ( + "fmt" + "io" + "log" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + + "github.com/celestiaorg/celestia-app/v9/app" + cmtconfig "github.com/cometbft/cometbft/config" + serverconfig "github.com/cosmos/cosmos-sdk/server/config" + "github.com/joho/godotenv" + "github.com/spf13/cobra" +) + +const ( + EnvVarSSHKeyName = "TALIS_SSH_KEY_NAME" + EnvVarPubSSHKeyPath = "TALIS_SSH_PUB_KEY_PATH" + EnvVarSSHKeyPath = "TALIS_SSH_KEY_PATH" + EnvVarDigitalOceanToken = "DIGITALOCEAN_TOKEN" + EnvVarGoogleCloudProject = "GOOGLE_CLOUD_PROJECT" + EnvVarGoogleCloudKeyJSONPath = "GOOGLE_CLOUD_KEY_JSON_PATH" + EnvVarAWSAccessKeyID = "AWS_ACCESS_KEY_ID" + EnvVarAWSSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + EnvVarAWSRegion = "AWS_DEFAULT_REGION" + EnvVarS3Bucket = "AWS_S3_BUCKET" + EnvVarS3Endpoint = "AWS_S3_ENDPOINT" + EnvVarChainID = "CHAIN_ID" + mebibyte = 1_048_576 +) + +func initCmd() *cobra.Command { + var ( + rootDir string + srcRoot string + chainID string + experiment string + SSHPubKeyPath string + SSHKeyName string + tables []string + withObservability bool + provider string + observabilityRegion string + observabilitySlug string + awsZone string + ) + + cmd := &cobra.Command{ + Use: "init", + Short: "Initialize the Talis network", + Long: "Initialize the Talis network with the provided configuration.", + RunE: func(cmd *cobra.Command, args []string) error { + // Set default provider + if provider == "" { + provider = "digitalocean" + } + + // Load .env if it exists otherwise ignore + envPath := filepath.Join(rootDir, ".env") + err := godotenv.Load(envPath) + + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load .env: %w", err) + } + + // Validate provider if .env was loaded + if err == nil { + fmt.Println("✅ Loaded environment variables from .env") + envProvider := os.Getenv("PROVIDER") + if envProvider != "" && envProvider != provider { + return fmt.Errorf("provider mismatch: .env has PROVIDER=%s but --provider=%s\nRegenerate with: talis init-env --provider %s", + envProvider, provider, provider) + } + + // Override SSH config from env vars if they exist + if envSSHKeyPath := os.Getenv(EnvVarSSHKeyPath); envSSHKeyPath != "" { + SSHPubKeyPath = envSSHKeyPath + } + if envSSHKeyName := os.Getenv(EnvVarSSHKeyName); envSSHKeyName != "" { + SSHKeyName = envSSHKeyName + } + } + + if err := initDirs(rootDir); err != nil { + return fmt.Errorf("failed to initialize directories: %w", err) + } + + if err := CopyTalisScripts(rootDir, srcRoot); err != nil { + return fmt.Errorf("failed to copy scripts: %w", err) + } + + // todo: use the number of validators, bridges, and lights to create the config + cfg := NewConfig(experiment, chainID). + WithSSHPubKeyPath(SSHPubKeyPath). + WithSSHKeyName(SSHKeyName) + + // If --with-observability is set, add a observability node and enable prometheus + enablePrometheus := false + if withObservability { + switch provider { + case "digitalocean": + cfg = cfg.WithDigitalOceanObservability(observabilityRegion). + WithDigitalOceanToken(os.Getenv(EnvVarDigitalOceanToken)) + case "googlecloud": + cfg = cfg.WithGoogleCloudObservability(observabilityRegion). + WithGoogleCloudProject(os.Getenv(EnvVarGoogleCloudProject)). + WithGoogleCloudKeyJSONPath(os.Getenv(EnvVarGoogleCloudKeyJSONPath)) + case "aws": + cfg = cfg.WithAWSObservability(observabilityRegion). + WithAWSRegion(awsRegionFromEnv()). + WithAWSZone(resolveAWSZone(awsZone)) + default: + return fmt.Errorf("unknown provider %q (supported: digitalocean, googlecloud, aws)", provider) + } + enablePrometheus = true + + if observabilitySlug != "" && len(cfg.Observability) > 0 { + cfg.Observability[0].Slug = observabilitySlug + } + } else if provider == "aws" { + // Stamp AWSRegion / AWSZone so NewClient later routes to + // AWSClient even when the user doesn't want an obs node. + cfg = cfg.WithAWSRegion(awsRegionFromEnv()). + WithAWSZone(resolveAWSZone(awsZone)) + } + + if err := cfg.Save(rootDir); err != nil { + return fmt.Errorf("failed to save init config: %w", err) + } + + // write the default config files that will be copied to the payload + // for each validator unless otherwise overridden + consensusConfig := app.DefaultConsensusConfig() + consConfig := DefaultConfigProfile(consensusConfig, tables, enablePrometheus) + cmtconfig.WriteConfigFile(filepath.Join(rootDir, "config.toml"), consConfig) + + // the sdk requires a global template be set just to save a toml file without panicking + serverconfig.SetConfigTemplate(serverconfig.DefaultConfigTemplate) + + appconfig := app.DefaultAppConfig() + appconfig.GRPC.Enable = true + appconfig.GRPC.Address = "0.0.0.0:9091" + + // Enable app telemetry when observability is enabled + if enablePrometheus { + appconfig.Telemetry.Enabled = true + appconfig.Telemetry.PrometheusRetentionTime = 60 + // Expose /metrics on the API server for Prometheus scraping. + appconfig.API.Enable = true + appconfig.API.Address = "tcp://0.0.0.0:1317" + } + + serverconfig.WriteConfigFile(filepath.Join(rootDir, "app.toml"), appconfig) + + return nil + }, + } + + homeDir, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get user home directory: %v", err) + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&srcRoot, "src-root", "r", homeDir, "directory which is a repo root or home directory for celestia app") + cmd.Flags().StringVarP(&chainID, "chainID", "c", "", "Chain ID (required)") + _ = cmd.MarkFlagRequired("chainID") + cmd.Flags().StringVarP(&experiment, "experiment", "e", "test", "the name of the experiment (required)") + _ = cmd.MarkFlagRequired("experiment") + cmd.Flags().StringArrayVarP(&tables, "tables", "t", []string{"consensus_round_state", "consensus_block", "mempool_tx"}, "the traces that will be collected") + cmd.Flags().BoolVar(&withObservability, "with-observability", false, "add a observability node and enable Prometheus on validators") + cmd.Flags().StringVarP(&provider, "provider", "p", "digitalocean", "provider for observability node when --with-observability is set (digitalocean, googlecloud, aws)") + cmd.Flags().StringVar(&observabilityRegion, "observability-region", "random", "region for the observability node — set to match your validator region to reduce scrape latency") + cmd.Flags().StringVar(&observabilitySlug, "observability-slug", "", "instance size for the observability node (default: provider's default — "+DODefaultObservabilitySlug+" for DigitalOcean, "+GCDefaultObservabilityMachineType+" for Google Cloud, "+AWSDefaultObservabilityInstanceType+" for AWS)") + cmd.Flags().StringVar(&awsZone, "aws-zone", "", "availability zone for AWS instances (default: "+AWSDefaultZone+"). All AWS instances share this AZ + a cluster placement group for free intra-AZ traffic and low latency.") + + defaultKeyPath := filepath.Join(homeDir, ".ssh", "id_ed25519.pub") + cmd.Flags().StringVarP(&SSHPubKeyPath, "ssh-pub-key-path", "s", defaultKeyPath, "path to the user's SSH public key") + + user, err := user.Current() + if err != nil { + log.Fatalf("failed to get current user: %v", err) + } + defaultKeyName := user.Username + cmd.Flags().StringVarP(&SSHKeyName, "ssh-key-name", "n", defaultKeyName, "name for the SSH key") + + return cmd +} + +func DefaultConfigProfile(cfg *cmtconfig.Config, tables []string, enablePrometheus bool) *cmtconfig.Config { + cfg.Instrumentation.TracingTables = strings.Join(tables, ",") + cfg.Instrumentation.TraceType = "local" + cfg.Instrumentation.Prometheus = enablePrometheus + cfg.Instrumentation.PrometheusListenAddr = ":26660" + cfg.P2P.SendRate = 100 * mebibyte + cfg.P2P.RecvRate = 110 * mebibyte + cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" + cfg.RPC.GRPCListenAddress = "tcp://0.0.0.0:9090" + return cfg +} + +func initDirs(rootDir string) error { + // 1) create the sub‑directories + for _, d := range []string{"payload", "data", "scripts"} { + dir := filepath.Join(rootDir, d) + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("failed to create %s: %w", dir, err) + } + } + + return nil +} + +// CopyTalisScripts copies the talis scripts directory into destDir. +// It checks multiple possible locations for the scripts. +func CopyTalisScripts(destDir string, root string) error { + candidates := []string{ + // here root can have different meanings + // repo root: + filepath.Join(root, "tools", "talis", "scripts"), + // root of all repos: + filepath.Join(root, "celestia-app", "tools", "talis", "scripts"), + // legacy root with src: + filepath.Join(root, "src", "celestia-app", "tools", "talis", "scripts"), + } + + var src string + for _, candidate := range candidates { + if fi, err := os.Stat(candidate); err == nil && fi.IsDir() { + src = candidate + break + } + } + + // Fallback to git clone if not found locally + if src == "" { + tmp, err := os.MkdirTemp("", "celestia-scripts-*") + if err != nil { + return fmt.Errorf("mktemp: %w", err) + } + defer os.RemoveAll(tmp) + + repo := "https://github.com/celestiaorg/celestia-app.git" + cmd := exec.Command("git", "clone", "--depth=1", repo, tmp) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("git clone failed: %w", err) + } + + src = filepath.Join(tmp, "tools", "talis", "scripts") + } + + // copy directory tree including subdirectories + return copyDir(src, filepath.Join(destDir, "scripts")) +} + +// copyDir recursively copies a directory tree, attempting to preserve permissions. +func copyDir(src string, dest string) error { + // walk through source + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + + target := filepath.Join(dest, rel) + + if info.IsDir() { + // create directory + if err := os.MkdirAll(target, 0o755); err != nil { + return err + } + return nil + } + + // it's a file; copy it + return copyFile(path, target, info.Mode()) + }) +} + +// copyFile copies a single file from src to dest, preserving permissions and creating parent directories if needed. +func copyFile(srcFile, destFile string, perm os.FileMode) error { + destDir := filepath.Dir(destFile) + if err := os.MkdirAll(destDir, 0o755); err != nil { + return fmt.Errorf("failed to create parent directory %s: %w", destDir, err) + } + + src, err := os.Open(srcFile) + if err != nil { + return fmt.Errorf("failed to open source file %s: %w", srcFile, err) + } + defer src.Close() + + dest, err := os.OpenFile(destFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, perm) + if err != nil { + return fmt.Errorf("failed to open destination file %s: %w", destFile, err) + } + defer dest.Close() + + if _, err = io.Copy(dest, src); err != nil { + return fmt.Errorf("failed to copy data: %w", err) + } + + return nil +} diff --git a/tools/talis/kpi_reproduction_steps.md b/tools/talis/kpi_reproduction_steps.md new file mode 100644 index 0000000000..ba96260b36 --- /dev/null +++ b/tools/talis/kpi_reproduction_steps.md @@ -0,0 +1,210 @@ +# Celestia KPI Reproduction Steps + +This document provides instructions for reproducing the core-app KPIs. These KPIs measure transaction submission performance and sync to tip duration. + +## Prerequisites + +1. **Verify block time configuration for 32MB/3sec blocks:** + + Modify `app_consts.go` and set `DelayedPrecommitTimeout = time.Millisecond * 2800` for 3s block time. + +2. **Install celestia-app and dependencies:** + + ```bash + # Build all necessary binaries (must be done after modifying DelayedPrecommitTimeout) + make build-talis-bins + + # Install talis + go install ./tools/talis/ + ``` + +3. **Set up cloud provider credentials:** + + Google Cloud is recommended for high-throughput tests. Ask the DevOps team for access to Celestia's Google Cloud fibreda workspace. + + ```bash + # Create a .env file + talis init-env --provider googlecloud + + # Fill in the .env file with your credentials: + GOOGLE_CLOUD_PROJECT="fibreda" + GOOGLE_CLOUD_KEY_JSON_PATH="/path/to/service-account-key.json" + ``` + +4. **SSH key is required for running experiments:** + + Create a new SSH key or use existing one. For Google Cloud the SSH key is automatically added to instance metadata by talis. + + Configure these variables in `.env`: + + ```bash + TALIS_SSH_KEY_PATH=your-key-path + TALIS_SSH_KEY_NAME=your-key-name + ``` + +5. **S3 bucket for faster deployment (optional):** + + For faster deployments using S3 upload instead of direct payload upload, configure an S3 bucket: + + ```bash + AWS_ACCESS_KEY_ID=your-access-key + AWS_SECRET_ACCESS_KEY=your-secret-key + AWS_DEFAULT_REGION=fra1 + AWS_S3_ENDPOINT=https://fra1.digitaloceanspaces.com + AWS_S3_BUCKET=your-bucket-name + ``` + +## Talis Network Deployment + +1. **Initialize Talis Network** + + ```bash + # Initialize with observability for metrics collection + talis init -c kpi-test-chain -e tx-kpi --with-observability --provider googlecloud + + # Add validator nodes (50-100 validators recommended for realistic network) + talis add -t validator -c 50 --provider googlecloud + ``` + +2. **Deploy Network** + + ```bash + # Spin up cloud instances (specify SSH key if not using defaults) + talis up --provider googlecloud --workers 20 + + # Create genesis with appropriate square size + # Square size 256 allows for ~32MB blocks + talis genesis -s 256 -b ./build + + # Deploy the network (specify SSH key if needed) + # Note: For faster deployment, use S3 upload instead of direct payload upload instead of --direct-payload-upload: + talis deploy --workers 20 + + # After deployment completes, talis will output the Grafana access information: + # URL, credentials. + + # Wait for network to start and optionally confirm all validators are online + talis status + ``` + +## Transaction Submission KPIs + +**NOTE** Reset the network between KPI experiments for fresh state/accurate results. + +```bash +talis reset +talis deploy --workers 20 +``` + +### KPI 1: 8MB/1sec (Single Submitter) + +**Target:** One latency monitor submitting 8MB blobs every second + +```bash +talis latency-monitor -i 1 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Results:** + +- Success rate: >=99.9% +- Average user latency: 6-8 seconds +- No Evictions + +### KPI 2: Load Shedding (Two Submitters, 8MB/1sec each) + +**Target:** Two latency monitors submitting 8MB blobs every second (total 16MB/1sec) + +```bash +talis latency-monitor -i 2 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Observations:** + +- Gas price increases under load +- Some broadcast failures due to full mempool +- Higher latency due to eviction timeouts +- Sequence mismatch errors from resubmission race conditions +- Network attempts load shedding by evicting low fee transactions + +### Test 3: Parallel Submission (Multiple Workers) + +**Target:** Single latency monitor with multiple parallel workers trying to fill up the throughput. + +```bash +# example: 15 workers submitting 2-8MB txs every 100ms +talis latency-monitor --instances 1 -w 15 -b 8000000 -z 2000000 --submission-delay 100ms +``` + +**Expected Results:** + +- Consistent throughput >9MB/1sec +- Good mempool distribution + +### Test 4: No Eviction (Optimal Conditions) + +This can already be measured in the first experiment but if you have to re-run: + +```bash +talis latency-monitor -i 1 -b 8000000 -z 8000000 -s 1000ms +``` + +**Expected Results:** + +- Transactions included with zero evictions + +## Collect Metrics and Results + +### From Grafana + +At `http://:3000` as displayed during `talis deploy`: + +- Access celestia grafana dashboards displaying network data +- Access Latency monitor dashboards displaying submission statistics and latency monitor logs + +## Cleanup + +```bash +# Destroy cloud instances +talis down --workers 20 +``` + +## Sync to Tip KPIs + +These KPIs measure how quickly a new node can sync to the network tip using state sync and block sync. + +**Target:** Total sync time <10 minutes (state sync + block sync) + +### Running Sync Tests + +#### Option 1: Local node (Mocha Testnet) + +This script runs multiple iterations and provides statistical analysis: + +```bash +# Single iteration +./scripts/mocha-measure-tip-sync.sh + +# Multiple iterations (20 iterations with 30s cooldown) +./scripts/mocha-measure-tip-sync.sh --iterations 20 --cooldown 30 +``` + +#### Option 2: Cloud Testing on DigitalOcean + +Use the `measure-tip-sync` tool which automates droplet creation, node setup, and sync measurement: + +1. **Install the tool** + + ```bash + go install ./tools/measure-tip-sync + ``` + +1. **Running Tests:** + +```bash +# Multiple iterations (20 iterations with 30s cooldown between each) +measure-tip-sync -k ~/.ssh/id_ed25519 -n 20 -c 30 +``` + +### Analyzing Sync Results + +The combined sync (state + block sync) must take less than 10 minutes. diff --git a/tools/talis/latency_monitor.go b/tools/talis/latency_monitor.go new file mode 100644 index 0000000000..13f964bf37 --- /dev/null +++ b/tools/talis/latency_monitor.go @@ -0,0 +1,382 @@ +package main + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/cobra" +) + +const ( + LatencyMonitorSessionName = "latency-monitor" +) + +// startLatencyMonitorCmd creates a cobra command for starting the latency monitor on remote instances. +func startLatencyMonitorCmd() *cobra.Command { + var ( + instances int + blobSize int + blobSizeMin int + submissionDelay string + namespace string + observabilityPort int + promtailConfig string + rootDir string + SSHKeyPath string + stop bool + workers int + ) + + cmd := &cobra.Command{ + Use: "latency-monitor", + Short: "Starts or stops the latency monitor on remote validators", + Long: "Connects to remote validators and starts/stops the latency monitor in a detached tmux session.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + if promtailConfig == "" { + promtailConfig = filepath.Join(rootDir, "observability", "promtail", "promtail-config.yml") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Only operate on the number of instances that were specified + insts := []Instance{} + for i, val := range cfg.Validators { + if i >= instances || i >= len(cfg.Validators) { + break + } + insts = append(insts, val) + } + + if stop { + fmt.Printf("Stopping latency monitor on %d instance(s)...\n", len(insts)) + return stopTmuxSession(insts, resolvedSSHKeyPath, LatencyMonitorSessionName, time.Minute*5) + } + + // Derive Loki URL from observability public IP + var lokiURL string + if len(cfg.Observability) > 0 { + if err := updateLatencyTargets(cfg, cfg.Observability[0], resolvedSSHKeyPath, insts); err != nil { + return err + } + + if cfg.Observability[0].PublicIP != "" { + lokiURL = fmt.Sprintf("http://%s:3100", cfg.Observability[0].PublicIP) + fmt.Printf("Using Loki URL from observability node: %s\n", lokiURL) + } + } + + latencyMonitorCmd := fmt.Sprintf( + "stdbuf -oL latency-monitor -k .celestia-app -a txsim -e localhost:9091 -b %d -z %d -d %s -n %s --observability-port %d -w %d 2>&1 | tee -a /root/latency-monitor-logs", + blobSize, + blobSizeMin, + submissionDelay, + namespace, + observabilityPort, + workers, + ) + + latencyMonitorScript := latencyMonitorCmd + if lokiURL != "" { + script, err := promtailScript(rootDir, promtailConfig, lokiURL, latencyMonitorCmd) + if err != nil { + return err + } + latencyMonitorScript = script + } + + fmt.Printf("Starting latency monitor on %d instance(s)...\n", len(insts)) + + if err := runScriptInTMux(insts, resolvedSSHKeyPath, latencyMonitorScript, LatencyMonitorSessionName, time.Minute*5); err != nil { + return err + } + return verifyLatencyMonitorStart(insts, resolvedSSHKeyPath, lokiURL != "", 30*time.Second) + }, + } + + // Define flags for the command + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key (overrides environment variable and default)") + cmd.Flags().IntVarP(&instances, "instances", "i", 1, "the number of instances of latency monitor, each ran on its own validator") + cmd.Flags().IntVarP(&blobSize, "blob-size", "b", 1024, "the max number of bytes in each blob") + cmd.Flags().IntVarP(&blobSizeMin, "blob-size-min", "z", 1024, "the min number of bytes in each blob") + cmd.Flags().StringVarP(&submissionDelay, "submission-delay", "s", "4000ms", "delay between transaction submissions") + cmd.Flags().StringVarP(&namespace, "namespace", "n", "test", "namespace for blob submission") + cmd.Flags().IntVarP(&observabilityPort, "observability-port", "m", 9464, "port for Prometheus observability HTTP server (0 to disable)") + cmd.Flags().StringVar(&promtailConfig, "promtail-config", "", "path to promtail config template (defaults to ./observability/promtail/promtail-config.yml)") + cmd.Flags().BoolVar(&stop, "stop", false, "stop the latency monitor instead of starting it") + cmd.Flags().IntVarP(&workers, "workers", "w", 1, "number of parallel worker accounts for submission (1 = sequential, >1 = parallel)") + _ = cmd.MarkFlagRequired("instances") + + return cmd +} + +func promtailScript(rootDir, promtailConfigPath, lokiURL, latencyMonitorCmd string) (string, error) { + configBytes, err := os.ReadFile(promtailConfigPath) + if err != nil { + return "", fmt.Errorf("failed to read promtail config %q: %w", promtailConfigPath, err) + } + + normalizedLokiURL := normalizeLokiURL(strings.TrimRight(lokiURL, "/")) + configIncludesPushPath := strings.Contains(string(configBytes), "__LOKI_URL__/loki/api/v1/push") + normalizedLokiURL = ensureLokiPushURL(normalizedLokiURL, configIncludesPushPath) + renderedConfig := strings.ReplaceAll(string(configBytes), "__LOKI_URL__", normalizedLokiURL) + configB64 := base64.StdEncoding.EncodeToString([]byte(renderedConfig)) + + scriptPath := filepath.Join(rootDir, "tools", "talis", "scripts", "promtail.sh") + scriptBytes, err := os.ReadFile(scriptPath) + if err != nil { + return "", fmt.Errorf("failed to read promtail script template %q: %w", scriptPath, err) + } + + renderedScript := strings.NewReplacer( + "__PROMTAIL_CONFIG_B64__", configB64, + "__LATENCY_MONITOR_CMD__", latencyMonitorCmd, + ).Replace(string(scriptBytes)) + + return renderedScript, nil +} + +func normalizeLokiURL(raw string) string { + if strings.HasPrefix(raw, "http:/") && !strings.HasPrefix(raw, "http://") { + return "http://" + strings.TrimPrefix(raw, "http:/") + } + if strings.HasPrefix(raw, "https:/") && !strings.HasPrefix(raw, "https://") { + return "https://" + strings.TrimPrefix(raw, "https:/") + } + return raw +} + +func ensureLokiPushURL(lokiURL string, configIncludesPushPath bool) string { + if configIncludesPushPath { + return strings.TrimSuffix(lokiURL, "/loki/api/v1/push") + } + if strings.HasSuffix(lokiURL, "/loki/api/v1/push") { + return lokiURL + } + return lokiURL + "/loki/api/v1/push" +} + +// updateLatencyTargets updates the latency monitor targets on the observability monitoring node. It shows the nodes that are currently running the latency monitor. +func updateLatencyTargets(cfg Config, observabilityNode Instance, sshKeyPath string, instances []Instance) error { + groups, skipped, err := buildObservabilityTargetsForInstances(instances, cfg, latencyMonitorMetricsPort, "public", "validator") + if err != nil { + return err + } + + payload, err := marshalTargets(groups, true) + if err != nil { + return err + } + + if skipped > 0 { + log.Printf("skipped %d nodes for latency monitor targets (missing IP)", skipped) + } + + encoded := base64.StdEncoding.EncodeToString(payload) + remotePath := "/root/observability/docker/targets/latency_targets.json" + writeCmd := fmt.Sprintf("printf '%%s' %q | base64 -d > %s", encoded, remotePath) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + defer cancel() + + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", observabilityNode.PublicIP), + writeCmd, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("failed to update latency targets on %s: %w\n%s", observabilityNode.PublicIP, err, out) + } + + log.Printf("updated latency monitor targets on observability node %s (%d entries)", observabilityNode.PublicIP, len(groups)) + return nil +} + +func verifyLatencyMonitorStart(instances []Instance, sshKeyPath string, expectPromtail bool, timeout time.Duration) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + runSSH := func(cmd string) ([]byte, error) { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + cmd, + ) + return ssh.CombinedOutput() + } + + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if _, err := runSSH("pgrep -a latency-monitor"); err == nil { + if !expectPromtail { + return + } + if _, err := runSSH("pgrep -a promtail"); err == nil { + return + } + } + time.Sleep(2 * time.Second) + } + + promtailOut, _ := runSSH("tail -200 /root/promtail.log 2>/dev/null || true") + latmonOut, _ := runSSH("tail -200 /root/latency-monitor-logs 2>/dev/null || true") + errCh <- fmt.Errorf( + "[%s:%s] latency-monitor did not start within %s\n-- promtail.log --\n%s\n-- latency-monitor-logs --\n%s", + inst.Name, + inst.PublicIP, + timeout, + strings.TrimSpace(string(promtailOut)), + strings.TrimSpace(string(latmonOut)), + ) + }(inst) + } + + wg.Wait() + close(errCh) + + var errs []error //nolint:prealloc + for e := range errCh { + errs = append(errs, e) + } + if len(errs) > 0 { + sb := strings.Builder{} + sb.WriteString("latency-monitor failed to start on one or more hosts:\n") + for _, e := range errs { + sb.WriteString("- ") + sb.WriteString(e.Error()) + sb.WriteByte('\n') + } + return errors.New(sb.String()) + } + return nil +} + +const ( + gracefulShutdownPollInterval = 5 * time.Second + gracefulShutdownTimeout = 60 * time.Second +) + +// stopTmuxSession SSHes into each remote host in parallel and gracefully stops the tmux session. +// It sends Ctrl+C to initiate graceful shutdown, polls for session termination, and falls back +// to force-killing the session if it doesn't stop within the timeout. +func stopTmuxSession( + instances []Instance, + sshKeyPath string, + sessionName string, + timeout time.Duration, +) error { + var wg sync.WaitGroup + errCh := make(chan error, len(instances)) + counter := atomic.Uint32{} + + for _, inst := range instances { + wg.Add(1) + go func(inst Instance) { + defer wg.Done() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Helper to run SSH commands + runSSH := func(cmd string) ([]byte, error) { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", inst.PublicIP), + cmd, + ) + return ssh.CombinedOutput() + } + + // Check if session exists first + if _, err := runSSH(fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName)); err != nil { + log.Printf("[%s] no %s session found, nothing to stop\n", inst.Name, sessionName) + counter.Add(1) + return + } + + // Send Ctrl+C to initiate graceful shutdown + log.Printf("[%s] sending Ctrl+C to %s session...\n", inst.Name, sessionName) + if _, err := runSSH(fmt.Sprintf("tmux send-keys -t %s C-c", sessionName)); err != nil { + errCh <- fmt.Errorf("[%s:%s] failed to send Ctrl+C: %v", inst.Name, inst.PublicIP, err) + return + } + + // Poll for session termination + deadline := time.Now().Add(gracefulShutdownTimeout) + for time.Now().Before(deadline) { + time.Sleep(gracefulShutdownPollInterval) + + // Check if session still exists + if _, err := runSSH(fmt.Sprintf("tmux has-session -t %s 2>/dev/null", sessionName)); err != nil { + // Session no longer exists - graceful shutdown succeeded + log.Printf("[%s] %s session gracefully stopped ✓ – %d/%d\n", + inst.Name, sessionName, counter.Add(1), len(instances)) + return + } + + log.Printf("[%s] %s session still running, waiting...\n", inst.Name, sessionName) + } + + // Timeout reached - force kill the session + log.Printf("[%s] timeout reached, force killing %s session...\n", inst.Name, sessionName) + if out, err := runSSH(fmt.Sprintf("tmux kill-session -t %s 2>/dev/null || true", sessionName)); err != nil { + errCh <- fmt.Errorf("[%s:%s] failed to force kill session: %v\n%s", + inst.Name, inst.PublicIP, err, out) + return + } + + log.Printf("[%s] %s session force killed ⚠️ – %d/%d\n", + sessionName, inst.Name, counter.Add(1), len(instances)) + }(inst) + } + + wg.Wait() + close(errCh) + + errs := make([]error, 0, len(instances)) + for err := range errCh { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + + return fmt.Errorf("errors stopping tmux session:\n%w", errors.Join(errs...)) +} diff --git a/tools/talis/main.go b/tools/talis/main.go new file mode 100644 index 0000000000..016b1ee340 --- /dev/null +++ b/tools/talis/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "log" + + "github.com/spf13/cobra" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "talis", + Short: "Talis CLI", + Long: "Talis CLI is a command line interface for running performance experiments.", + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + + rootCmd.AddCommand( + downloadCmd(), + generateCmd(), + initCmd(), + initEnvCmd(), + statusCmd(), + listCmd(), + upCmd(), + downCmd(), + deployCmd(), + addCmd(), + startTxsimCmd(), + startLatencyMonitorCmd(), + uploadDataCmd(), + killTmuxSessionCmd(), + resetCmd(), + setupFibreCmd(), + startFibreCmd(), + fibreTxsimCmd(), + fibreThroughputCmd(), + fibreBootstrapEvnodeCmd(), + resourceMonitorCmd(), + downloadResourcesCmd(), + syncNodeCmd(), + ) + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/tools/talis/monitor.go b/tools/talis/monitor.go new file mode 100644 index 0000000000..09a4a8939e --- /dev/null +++ b/tools/talis/monitor.go @@ -0,0 +1,98 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const ResourceMonitorSessionName = "monitor" + +func resourceMonitorCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + nodes string + interval int + stop bool + ) + + cmd := &cobra.Command{ + Use: "resource-monitor", + Short: "Start network and CPU monitoring on remote validators", + Long: `Deploys a monitoring script to remote validators that records per-port +network bandwidth (iptables accounting for ports 9091, 26656, 26657) and +per-process CPU/memory usage (celestia-appd, fibre-txsim, txsim). + +Output is written to /root/monitor.jsonl on each validator. Use +'talis download-resources' to collect the results.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + validators, err := filterMatchingInstances(cfg.Validators, nodes) + if err != nil { + return fmt.Errorf("failed to filter nodes: %w", err) + } + if len(validators) == 0 { + return fmt.Errorf("no matching validators found for pattern %q", nodes) + } + + if stop { + fmt.Printf("Stopping monitor on %d validator(s)...\n", len(validators)) + return stopTmuxSession(validators, resolvedSSHKeyPath, ResourceMonitorSessionName, 5*time.Minute) + } + + // Read the monitor.sh script from the scripts directory. + scriptPath := filepath.Join(rootDir, "tools", "talis", "scripts", "monitor.sh") + scriptBytes, err := os.ReadFile(scriptPath) + if err != nil { + return fmt.Errorf("failed to read monitor script %q: %w", scriptPath, err) + } + + // Prepend the interval env var so the script picks it up. + script := fmt.Sprintf("export MONITOR_INTERVAL=%d\n%s", interval, string(scriptBytes)) + + fmt.Printf("Starting monitor on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, script, ResourceMonitorSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start monitor sessions: %w", err) + } + + fmt.Println() + fmt.Println("=== monitor sessions started ===") + fmt.Printf(" tmux session: %s\n", ResourceMonitorSessionName) + fmt.Printf(" output file: /root/monitor.jsonl\n") + fmt.Printf(" log file: /root/talis-%s.log\n", ResourceMonitorSessionName) + fmt.Println(" validators:") + for _, val := range validators { + fmt.Printf(" - %s (%s)\n", val.Name, val.PublicIP) + } + fmt.Println() + fmt.Printf(" To stop: talis resource-monitor --stop\n") + fmt.Printf(" To kill: talis kill-session -s %s\n", ResourceMonitorSessionName) + fmt.Printf(" To download: talis download-resources\n") + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&nodes, "nodes", "n", "validator-*", "glob pattern for which validators to monitor") + cmd.Flags().IntVar(&interval, "interval", 1, "sampling interval in seconds") + cmd.Flags().BoolVar(&stop, "stop", false, "stop monitoring instead of starting it") + + return cmd +} diff --git a/tools/talis/network.go b/tools/talis/network.go new file mode 100644 index 0000000000..c55bc4d47b --- /dev/null +++ b/tools/talis/network.go @@ -0,0 +1,354 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + sdkmath "cosmossdk.io/math" + "github.com/celestiaorg/celestia-app/v9/app" + "github.com/celestiaorg/celestia-app/v9/app/encoding" + "github.com/celestiaorg/celestia-app/v9/test/util/genesis" + blobtypes "github.com/celestiaorg/celestia-app/v9/x/blob/types" + minfeetypes "github.com/celestiaorg/celestia-app/v9/x/minfee/types" + "github.com/celestiaorg/go-square/v4/share" + cmtconfig "github.com/cometbft/cometbft/config" + cmtjson "github.com/cometbft/cometbft/libs/json" + cmtos "github.com/cometbft/cometbft/libs/os" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/p2p/pex" + "github.com/cometbft/cometbft/privval" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + serverconfig "github.com/cosmos/cosmos-sdk/server/config" + "github.com/spf13/viper" +) + +// NodeInfo is a struct that contains the name, IP address, and network address +// of a node. +type NodeInfo struct { + Name string `json:"name"` + IP string `json:"ip"` + NetworkAddress string `json:"network_address"` + Region string `json:"region"` +} + +func (n NodeInfo) PeerID() string { + return fmt.Sprintf("%s@%s:26656", n.NetworkAddress, n.IP) +} + +// Network maintains the initial state of the network. This includes the +// genesis, all relevant validators included in the genesis, and all accounts. +type Network struct { + genesis *genesis.Genesis + ecfg encoding.Config + + validators map[string]NodeInfo + accounts []string +} + +func NewNetwork(chainID string, squareSize int, mods ...genesis.Modifier) (*Network, error) { + codec := encoding.MakeConfig(app.ModuleEncodingRegisters...) + blobParams := blobtypes.DefaultParams() + blobParams.GovMaxSquareSize = uint64(squareSize) + cparams := app.DefaultConsensusParams() + cparams.Block.MaxBytes = int64(squareSize * squareSize * share.ContinuationSparseShareContentSize) + + mods = append(mods, genesis.ImmediateProposals(codec.Codec)) + mods = append(mods, genesis.SetBlobParams(codec.Codec, blobParams)) + + g := genesis.NewDefaultGenesis(). + WithChainID(chainID). + WithModifiers(mods...). + WithConsensusParams(cparams) + + return &Network{ + genesis: g, + validators: make(map[string]NodeInfo), + ecfg: codec, + }, nil +} + +func SetMinFee(codec codec.Codec, minFee float64) genesis.Modifier { + return func(state map[string]json.RawMessage) map[string]json.RawMessage { + minFeeGenState := minfeetypes.DefaultGenesis() + gasPrice, err := sdkmath.LegacyNewDecFromStr(fmt.Sprintf("%f", minFee)) + if err != nil { + panic(err) + } + minFeeGenState.NetworkMinGasPrice = gasPrice + state[minfeetypes.ModuleName] = codec.MustMarshalJSON(minFeeGenState) + return state + } +} + +// AddValidator adds a validator to the network. The validator is identified by +// its name which is assigned by pulumi as hardware is allocated. An additional +// account and keyring are saved to the payload directory that can be used by +// txsim. Pre-funded fibre accounts are also created for each validator. +// if the stake is set to 0, a default value is used. +func (n *Network) AddValidator(name, ip, payLoadRoot, region string, stake int64, fibreAccounts int) error { + n.validators[name] = NodeInfo{ + Name: name, + IP: ip, + Region: region, + } + + val := genesis.NewDefaultValidator(name) + if stake != 0 { + val.Stake = stake + } + err := n.genesis.NewValidator(val) + if err != nil { + return err + } + + // add a txsim key and keyring to each validator + kr, err := keyring.New(app.Name, keyring.BackendTest, + filepath.Join(payLoadRoot, name), nil, n.ecfg.Codec) + if err != nil { + return err + } + + // import the validator's state key to its own personal keyring + gkr := n.genesis.Keyring() + + valPriv, err := gkr.ExportPrivKeyArmor(name, "congest") + if err != nil { + return err + } + + err = kr.ImportPrivKey("validator", valPriv, "congest") + if err != nil { + return err + } + + if err := addFundedAccount(kr, n.genesis, "txsim"); err != nil { + return err + } + + fmt.Printf("creating %d fibre accounts for validator %s\n", fibreAccounts, name) + for i := range fibreAccounts { + if err := addFundedAccount(kr, n.genesis, fmt.Sprintf("fibre-%d", i)); err != nil { + return err + } + } + + return nil +} + +// AddEncoder creates a keyring for a dedicated encoder instance with uniquely +// prefixed fibre accounts (enc0-0, enc0-1, ...) so that multiple encoders can +// each fund their own escrow without blocking one another. +func (n *Network) AddEncoder(name, payLoadRoot string, fibreAccounts int) error { + kr, err := keyring.New(app.Name, keyring.BackendTest, + filepath.Join(payLoadRoot, name), nil, n.ecfg.Codec) + if err != nil { + return err + } + + index := extractIndexFromName(name) + keyPrefix := fmt.Sprintf("enc%d", index) + + fmt.Printf("creating %d fibre accounts for encoder %s (prefix=%s)\n", fibreAccounts, name, keyPrefix) + for i := range fibreAccounts { + if err := addFundedAccount(kr, n.genesis, fmt.Sprintf("%s-%d", keyPrefix, i)); err != nil { + return err + } + } + + return nil +} + +// addFundedAccount creates a new key in the local keyring and registers it as a +// funded account in genesis. The key lives in the validator's keyring so the +// binary (txsim, fibre-txsim) can sign transactions at runtime. +func addFundedAccount(kr keyring.Keyring, g *genesis.Genesis, name string) error { + key, _, err := kr.NewMnemonic(name, keyring.English, "", "", hd.Secp256k1) + if err != nil { + return err + } + pk, err := key.GetPubKey() + if err != nil { + return err + } + return g.AddAccount(genesis.Account{ + PubKey: pk, + Balance: 9999999999999999, + Name: name, + }) +} + +func (n *Network) Peers() []string { + var peers []string //nolint:prealloc + for _, v := range n.validators { + if v.IP == "" { + continue + } + peers = append(peers, v.PeerID()) + } + return peers +} + +func (n *Network) InitNodes(rootDir string) error { + if len(n.accounts) != 0 { + n.genesis.WithKeyringAccounts(genesis.NewKeyringAccounts(genesis.DefaultInitialBalance, n.accounts...)...) + } + + // save the genesis file + genesisPath := filepath.Join(rootDir, "genesis.json") + + genDoc, err := n.genesis.Export() + if err != nil { + return err + } + + genBytes, err := cmtjson.MarshalIndent(genDoc, "", " ") + if err != nil { + return err + } + + // save the genesis file as configured + err = cmtos.WriteFile(genesisPath, genBytes, 0o644) + if err != nil { + return err + } + + fmt.Println("genesis file saved to", genesisPath, "with", len(n.validators), "validators") + + vals := n.genesis.Validators() + + // Pass 1: write per-validator node_key.json + priv_validator files, and + // stamp NetworkAddress into n.validators so pass 2 can build a complete + // persistent_peers list. + for _, v := range vals { + valPath := filepath.Join(rootDir, v.Name) + nodeKeyFile := filepath.Join(valPath, "node_key.json") + if err := cmtos.EnsureDir(filepath.Dir(nodeKeyFile), 0o777); err != nil { + return err + } + + // add the network key assigned by the genesis to that validator's payload + nodeKey := &p2p.NodeKey{ + PrivKey: v.NetworkKey, + } + if err := nodeKey.SaveAs(nodeKeyFile); err != nil { + return err + } + ninfo, has := n.validators[v.Name] + if !has { + return fmt.Errorf("no validator found %s", v.Name) + } + ninfo.NetworkAddress = string(nodeKey.ID()) + n.validators[v.Name] = ninfo + + // generate remaining private key file using the assigned consensus key + pvStateFile := filepath.Join(valPath, "priv_validator_state.json") + if err := cmtos.EnsureDir(filepath.Dir(pvStateFile), 0o777); err != nil { + return err + } + pvKeyFile := filepath.Join(valPath, "priv_validator_key.json") + if err := cmtos.EnsureDir(filepath.Dir(pvKeyFile), 0o777); err != nil { + return err + } + filePV := privval.NewFilePV(v.ConsensusKey, pvKeyFile, pvStateFile) + filePV.Save() + } + + // Pass 2: now that every validator's NetworkAddress is known, write + // config.toml with a populated persistent_peers list. Without this the + // chain has no bootstrap mechanism — addrbook alone is not enough — and + // validators come up with zero peers and never reach quorum. + // + // Use the templated config.toml that `talis init` wrote one level up + // (built from app.DefaultConsensusConfig + DefaultConfigProfile, see + // init.go:137-139). That carries the celestia-specific overrides + // AND the talis profile bits (TracingTables, Prometheus enable/listen + // addr, RPC.GRPCListenAddress=0.0.0.0:9090). Falling back to + // app.DefaultConsensusConfig directly would silently drop the talis + // profile — observability would break on --with-observability runs. + baseCfgPath := filepath.Join(filepath.Dir(rootDir), "config.toml") + v := viper.New() + v.SetConfigFile(baseCfgPath) + if err := v.ReadInConfig(); err != nil { + return fmt.Errorf("failed to read base config %q: %w", baseCfgPath, err) + } + + for _, val := range vals { + selfInfo := n.validators[val.Name] + selfID := selfInfo.NetworkAddress + var peers []string + for _, peer := range n.validators { + if peer.NetworkAddress == "" || peer.NetworkAddress == selfID || peer.IP == "" || peer.IP == "TBD" { + continue + } + peers = append(peers, peer.PeerID()) + } + + // Start from app.DefaultConsensusConfig so any field absent from the + // templated TOML still inherits celestia defaults, then layer the + // templated values on top. + cmtcfg := app.DefaultConsensusConfig() + if err := v.Unmarshal(cmtcfg); err != nil { + return fmt.Errorf("failed to unmarshal base config: %w", err) + } + + // Without persistent_peers the chain has no bootstrap mechanism on + // a fresh testnet — addrbook alone is not enough — and validators + // come up with zero peers and never reach quorum. + cmtcfg.P2P.PersistentPeers = strings.Join(peers, ",") + // Enable the priv-validator gRPC endpoint that fibre needs to fetch + // the validator's public key for shard-assignment verification. + cmtcfg.PrivValidatorGRPCListenAddr = "127.0.0.1:26659" + cmtconfig.WriteConfigFile(filepath.Join(rootDir, val.Name, "config.toml"), cmtcfg) + + appcfg := app.DefaultAppConfig() + serverconfig.WriteConfigFile(filepath.Join(rootDir, val.Name, "app.toml"), appcfg) + } + + return nil +} + +// SaveValidatorsToFile saves the validators map as a JSON to the given file. +func (n *Network) SaveValidatorsToFile(filename string) error { + // Open the file for writing. Create it if it doesn't exist. + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Encode the validators map to JSON and write it to the file. + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") // Optional: format the JSON with indentation + err = encoder.Encode(n.validators) + if err != nil { + return err + } + + return nil +} + +func (n *Network) SaveAddressBook(payloadRoot string, peers []string) error { + addrBookFile := filepath.Join(payloadRoot, "addrbook.json") + return WriteAddressBook(peers, addrBookFile) +} + +func WriteAddressBook(peers []string, file string) error { + book := pex.NewAddrBook(file, false) + for _, peer := range peers { + addr, err := p2p.NewNetAddressString(peer) + if err != nil { + return fmt.Errorf("parsing peer address %s: %w", peer, err) + } + err = book.AddAddress(addr, addr) + if err != nil { + return fmt.Errorf("adding peer address %s: %w", peer, err) + } + } + book.Save() + return nil +} diff --git a/tools/talis/observability_payload.go b/tools/talis/observability_payload.go new file mode 100644 index 0000000000..225a4e8ed0 --- /dev/null +++ b/tools/talis/observability_payload.go @@ -0,0 +1,143 @@ +package main + +import ( + "crypto/rand" + "fmt" + "log" + "math/big" + "os" + "path/filepath" +) + +const ( + defaultMetricsPort = 26660 + appTelemetryPort = 1317 + latencyMonitorMetricsPort = 9464 + grafanaPasswordLength = 16 +) + +// generateGrafanaPassword generates a random alphanumeric password. +func generateGrafanaPassword() (string, error) { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + password := make([]byte, grafanaPasswordLength) + for i := range password { + n, err := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) + if err != nil { + return "", fmt.Errorf("failed to generate random number: %w", err) + } + password[i] = charset[n.Int64()] + } + return string(password), nil +} + +// stageObservabilityPayload copies the observability directory (docker-compose, Prometheus config, +// Grafana dashboards, and setup scripts) into the payload directory and generates +// the targets.json file from the config. +// +// If no observability monitoring nodes are configured, this function does nothing. +// If observability monitoring nodes are configured but observabilitySrcDir is empty, it returns an error. +func stageObservabilityPayload(cfg Config, observabilitySrcDir, payloadDir string) error { + // Skip if no observability monitoring nodes configured + if len(cfg.Observability) == 0 { + return nil + } + + // Error if observability monitoring nodes configured but no observability directory provided + if observabilitySrcDir == "" { + return fmt.Errorf("observability monitoring nodes are configured but --observability-dir flag not provided") + } + + // Validate source directory exists + if fi, err := os.Stat(observabilitySrcDir); err != nil || !fi.IsDir() { + return fmt.Errorf("observability directory %q does not exist or is not a directory", observabilitySrcDir) + } + + dockerSrc := filepath.Join(observabilitySrcDir, "docker") + observabilityDest := filepath.Join(payloadDir, "observability") + dockerDest := filepath.Join(observabilityDest, "docker") + + if err := copyDir(dockerSrc, dockerDest); err != nil { + return fmt.Errorf("failed to copy observability docker assets: %w", err) + } + + for _, script := range []string{"install_metrics.sh", "start_metrics.sh"} { + src := filepath.Join(observabilitySrcDir, script) + dest := filepath.Join(observabilityDest, script) + if err := copyFile(src, dest, 0o755); err != nil { + return fmt.Errorf("failed to copy observability script %s: %w", script, err) + } + } + + // Generate validator observability targets (CometBFT on port 26660) + groups, skipped, err := buildObservabilityTargets(cfg, defaultMetricsPort, "public") + if err != nil { + return err + } + + payload, err := marshalTargets(groups, true) + if err != nil { + return err + } + + targetsDir := filepath.Join(dockerDest, "targets") + if err := os.MkdirAll(targetsDir, 0o755); err != nil { + return fmt.Errorf("failed to create targets directory: %w", err) + } + + targetsPath := filepath.Join(targetsDir, "targets.json") + if err := os.WriteFile(targetsPath, payload, 0o644); err != nil { + return fmt.Errorf("failed to write targets file: %w", err) + } + + // Generate latency monitor targets (same validators, port 9464) + latencyGroups, _, err := buildObservabilityTargets(cfg, latencyMonitorMetricsPort, "public") + if err != nil { + return err + } + + latencyPayload, err := marshalTargets(latencyGroups, true) + if err != nil { + return err + } + + latencyTargetsPath := filepath.Join(targetsDir, "latency_targets.json") + if err := os.WriteFile(latencyTargetsPath, latencyPayload, 0o644); err != nil { + return fmt.Errorf("failed to write latency targets file: %w", err) + } + + // Generate app telemetry targets (same validators, port 1317) + appGroups, _, err := buildObservabilityTargets(cfg, appTelemetryPort, "public") + if err != nil { + return err + } + + appPayload, err := marshalTargets(appGroups, true) + if err != nil { + return err + } + + appTargetsPath := filepath.Join(targetsDir, "app_targets.json") + if err := os.WriteFile(appTargetsPath, appPayload, 0o644); err != nil { + return fmt.Errorf("failed to write app targets file: %w", err) + } + + // Fibre metrics are pushed via OTel Collector (OTLP), not scraped directly. + + // Generate random Grafana password and write .env file + grafanaPassword, err := generateGrafanaPassword() + if err != nil { + return fmt.Errorf("failed to generate Grafana password: %w", err) + } + envContent := fmt.Sprintf("GRAFANA_PASSWORD=%s\n", grafanaPassword) + envPath := filepath.Join(dockerDest, ".env") + if err := os.WriteFile(envPath, []byte(envContent), 0o644); err != nil { + return fmt.Errorf("failed to write .env file: %w", err) + } + + log.Printf("staged observability payload with %d targets", len(groups)) + if skipped > 0 { + log.Printf("⚠️ skipped %d nodes for observability targets (missing private/public IP)", skipped) + } + + return nil +} diff --git a/tools/talis/observability_targets.go b/tools/talis/observability_targets.go new file mode 100644 index 0000000000..5a3d3aa404 --- /dev/null +++ b/tools/talis/observability_targets.go @@ -0,0 +1,108 @@ +package main + +import ( + "encoding/json" + "fmt" +) + +type targetGroup struct { + Targets []string `json:"targets"` + Labels map[string]string `json:"labels,omitempty"` +} + +func buildObservabilityTargets(cfg Config, port int, addressSource string) ([]targetGroup, int, error) { + if addressSource != "public" && addressSource != "private" { + return nil, 0, fmt.Errorf("invalid address source %q (use public or private)", addressSource) + } + + var groups []targetGroup //nolint:prealloc + var skipped int + + appendTargets := func(nodes []Instance, role string) { + for _, node := range nodes { + address, ok := nodeAddress(node, port, addressSource) + if !ok { + skipped++ + continue + } + + groups = append(groups, targetGroup{ + Targets: []string{address}, + Labels: map[string]string{ + "chain_id": cfg.ChainID, + "experiment": cfg.Experiment, + "role": role, + "region": node.Region, + "provider": string(node.Provider), + "node_id": node.Name, + }, + }) + } + } + + appendTargets(cfg.Validators, "validator") + appendTargets(cfg.Bridges, "bridge") + appendTargets(cfg.Lights, "light") + + return groups, skipped, nil +} + +func buildObservabilityTargetsForInstances(instances []Instance, cfg Config, port int, addressSource, role string) ([]targetGroup, int, error) { + if addressSource != "public" && addressSource != "private" { + return nil, 0, fmt.Errorf("invalid address source %q (use public or private)", addressSource) + } + + var groups []targetGroup //nolint:prealloc + var skipped int + + for _, node := range instances { + address, ok := nodeAddress(node, port, addressSource) + if !ok { + skipped++ + continue + } + + groups = append(groups, targetGroup{ + Targets: []string{address}, + Labels: map[string]string{ + "chain_id": cfg.ChainID, + "experiment": cfg.Experiment, + "role": role, + "region": node.Region, + "provider": string(node.Provider), + "node_id": node.Name, + }, + }) + } + + return groups, skipped, nil +} + +func marshalTargets(groups []targetGroup, pretty bool) ([]byte, error) { + if pretty { + return json.MarshalIndent(groups, "", " ") + } + return json.Marshal(groups) +} + +func nodeAddress(node Instance, port int, source string) (string, bool) { + var ip string + switch source { + case "public": + ip = node.PublicIP + if ip == "" || ip == "TBD" { + ip = node.PrivateIP + } + case "private": + ip = node.PrivateIP + if ip == "" || ip == "TBD" { + ip = node.PublicIP + } + } + + if ip == "" || ip == "TBD" { + return "", false + } + + return fmt.Sprintf("%s:%d", ip, port), true +} diff --git a/tools/talis/reset.go b/tools/talis/reset.go new file mode 100644 index 0000000000..28c2ca4d69 --- /dev/null +++ b/tools/talis/reset.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" +) + +func resetCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + validators []string + workers int + ) + + cmd := &cobra.Command{ + Use: "reset", + Short: "Reset the specified validators or all validators", + Long: "Stops the running services and removes files created by the deploy command for specified validators or all validators", + RunE: func(cmd *cobra.Command, args []string) error { + // Load config + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedKey := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Filter validators if specific ones were requested + targetValidators := cfg.Validators + if len(validators) > 0 { + targetValidators = make([]Instance, 0) + for _, v := range cfg.Validators { + for _, requested := range validators { + if strings.Contains(v.Name, requested) { + targetValidators = append(targetValidators, v) + break + } + } + } + if len(targetValidators) == 0 { + return fmt.Errorf("no matching validators found") + } + } + + cleanupScript := ` + tmux kill-session -t app 2>/dev/null || true + tmux kill-session -t txsim 2>/dev/null || true + tmux kill-session -t latency-monitor 2>/dev/null || true + tmux kill-session -t fibre 2>/dev/null || true + tmux kill-session -t fibre-txsim 2>/dev/null || true + rm -rf .celestia-app .celestia-fibre logs payload payload.tar.gz /bin/celestia* /bin/txsim /bin/fibre /bin/fibre-txsim + ` + // Run cleanup on each validator + var wg sync.WaitGroup + workerChan := make(chan struct{}, workers) + for _, val := range targetValidators { + wg.Add(1) + go func(v Instance) { + defer wg.Done() + workerChan <- struct{}{} + defer func() { <-workerChan }() + fmt.Printf("Resetting validator %s...\n", v.Name) + if err := runScriptInTMux([]Instance{v}, resolvedKey, cleanupScript, "cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", v.Name, err) + } + }(val) + } + wg.Wait() + + // Clean up encoder instances. + if len(cfg.Encoders) > 0 { + encoderCleanup := ` + tmux kill-session -t app 2>/dev/null || true + tmux kill-session -t fibre-txsim 2>/dev/null || true + tmux kill-session -t setup-fibre 2>/dev/null || true + rm -rf .celestia-app encoder-payload encoder-payload.tar.gz /bin/celestia* /bin/fibre-txsim + ` + var encWG sync.WaitGroup + encWorkerChan := make(chan struct{}, workers) + for _, enc := range cfg.Encoders { + encWG.Add(1) + go func(e Instance) { + defer encWG.Done() + encWorkerChan <- struct{}{} + defer func() { <-encWorkerChan }() + fmt.Printf("Resetting encoder %s...\n", e.Name) + if err := runScriptInTMux([]Instance{e}, resolvedKey, encoderCleanup, "cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", e.Name, err) + } + }(enc) + } + encWG.Wait() + } + + // Clean up observability stack (Grafana/Prometheus/Loki) if configured. + if len(cfg.Observability) > 0 { + observabilityCleanup := ` + if [ -d /root/observability/docker ]; then + cd /root/observability/docker && docker compose down -v + fi + rm -rf /root/observability /root/observability-payload.tar.gz + ` + var obsWG sync.WaitGroup + obsWorkerChan := make(chan struct{}, workers) + for _, obs := range cfg.Observability { + obsWG.Add(1) + go func(o Instance) { + defer obsWG.Done() + obsWorkerChan <- struct{}{} + defer func() { <-obsWorkerChan }() + fmt.Printf("Resetting observability node %s...\n", o.Name) + if err := runScriptInTMux([]Instance{o}, resolvedKey, observabilityCleanup, "obs-cleanup", time.Minute*5); err != nil { + fmt.Printf("Warning: error while cleaning up %s: %v\n", o.Name, err) + } + }(obs) + } + obsWG.Wait() + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory to load config from") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "config file name") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + cmd.Flags().StringSliceVarP(&validators, "validators", "v", []string{}, "optional list of validator names to reset (e.g. validator-0,validator-1)") + cmd.Flags().IntVarP(&workers, "workers", "w", 10, "number of concurrent workers for parallel operations (should be > 0)") + + return cmd +} diff --git a/tools/talis/s3.go b/tools/talis/s3.go new file mode 100644 index 0000000000..7bb65f47cb --- /dev/null +++ b/tools/talis/s3.go @@ -0,0 +1,168 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/spf13/cobra" +) + +type S3Config struct { + Region string `json:"region"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + BucketName string `json:"bucket_name"` + Endpoint string `json:"endpoint"` +} + +// downloadS3DataCmd creates a cobra command for downloading a chain's data from S3. +func downloadS3DataCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + outDir string + chainID string + ) + + cmd := &cobra.Command{ + Use: "s3", + Short: "Download all files from S3 under / into a local directory", + Long: `Loads the network config, instantiates an AWS S3 client using the +credentials in it, then recursively downloads everything under +"//" into the output directory you specify.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + client, err := createS3Client(cmd.Context(), cfg) + if err != nil { + return fmt.Errorf("failed to create S3 client: %w", err) + } + if chainID != "" { + cfg.ChainID = chainID + } + + // 4. Compute prefix and download + prefix := cfg.ChainID + "/" + if err := downloadS3Directory(cmd.Context(), client, cfg.S3Config.BucketName, prefix, outDir); err != nil { + return fmt.Errorf("failed to download S3 objects: %w", err) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to find your config.json") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config file (under the directory)") + cmd.Flags().StringVarP(&outDir, "out", "o", "./data", "local directory into which to download the S3 objects") + cmd.Flags().StringVarP(&chainID, "chain-id", "i", "", "override the chain-id in the config") + + return cmd +} + +// downloadS3Directory lists and downloads all objects under the given prefix. +func downloadS3Directory(ctx context.Context, client *s3.Client, bucket, prefix, dest string) error { + paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + }) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return err + } + + for _, obj := range page.Contents { + // compute local file path: strip the prefix + relPath := strings.TrimPrefix(*obj.Key, prefix) + if relPath == "" { + // skip the "directory" marker itself + continue + } + localPath := filepath.Join(dest, relPath) + if err := os.MkdirAll(filepath.Dir(localPath), 0o755); err != nil { + return err + } + + // download each object + f, err := os.Create(localPath) + if err != nil { + return err + } + defer f.Close() + + _, err = client.GetObject(ctx, &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: obj.Key, + }, func(o *s3.Options) { + // no special options + }) + if err != nil { + return err + } + + // stream body into file + downloader := manager.NewDownloader(client) + _, err = downloader.Download(ctx, f, + &s3.GetObjectInput{Bucket: aws.String(bucket), Key: obj.Key}, + ) + if err != nil { + return fmt.Errorf("download %s: %w", *obj.Key, err) + } + + log.Println("Downloaded", *obj.Key) + } + } + + return nil +} + +func createS3Client(ctx context.Context, cfg Config) (*s3.Client, error) { + s3cfg := cfg.S3Config + + opts := []func(*config.LoadOptions) error{config.WithRegion(s3cfg.Region)} + + // If static creds are provided in config (typical for DO Spaces), use + // them. Otherwise fall back to the SDK default credential chain — env + // vars, AWS_PROFILE in ~/.aws/credentials, IAM role, etc. — so the AWS + // compute path works with named profiles. + if s3cfg.AccessKeyID != "" && s3cfg.SecretAccessKey != "" { + opts = append(opts, config.WithCredentialsProvider( + aws.NewCredentialsCache( + credentials.NewStaticCredentialsProvider( + s3cfg.AccessKeyID, + s3cfg.SecretAccessKey, + "", + ), + ), + )) + } + + if s3cfg.Endpoint != "" { + customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...any) (aws.Endpoint, error) { //nolint:staticcheck + return aws.Endpoint{ //nolint:staticcheck + URL: s3cfg.Endpoint, + SigningRegion: region, + }, nil + }) + opts = append(opts, config.WithEndpointResolverWithOptions(customResolver)) //nolint:staticcheck + } + + awsCfg, err := config.LoadDefaultConfig(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("failed to build AWS config: %w", err) + } + return s3.NewFromConfig(awsCfg), nil +} diff --git a/tools/talis/scripts/monitor.sh b/tools/talis/scripts/monitor.sh new file mode 100644 index 0000000000..8403a53be0 --- /dev/null +++ b/tools/talis/scripts/monitor.sh @@ -0,0 +1,215 @@ +#!/usr/bin/env bash +# monitor.sh — per-port network bandwidth + per-process CPU/memory monitoring. +# Writes JSONL to /root/monitor.jsonl (one sample per INTERVAL seconds). +# Designed to run on Linux validators with iptables and /proc. + +set -euo pipefail + +INTERVAL="${MONITOR_INTERVAL:-1}" +OUTPUT="/root/monitor.jsonl" +PORTS="9091 26656 26657" +PROCESS_NAMES="celestia-appd fibre-txsim txsim" + +# ---------- iptables accounting setup ---------- + +setup_iptables() { + iptables -N MONITOR_IN 2>/dev/null || true + iptables -N MONITOR_OUT 2>/dev/null || true + + # Remove old jump rules (ignore errors if absent) + iptables -D INPUT -j MONITOR_IN 2>/dev/null || true + iptables -D OUTPUT -j MONITOR_OUT 2>/dev/null || true + + # Flush any previous per-port rules + iptables -F MONITOR_IN + iptables -F MONITOR_OUT + + # Insert jump rules at the top of INPUT/OUTPUT + iptables -I INPUT 1 -j MONITOR_IN + iptables -I OUTPUT 1 -j MONITOR_OUT + + # Add per-port accounting rules + for port in $PORTS; do + iptables -A MONITOR_IN -p tcp --dport "$port" + iptables -A MONITOR_OUT -p tcp --sport "$port" + done +} + +cleanup_iptables() { + iptables -D INPUT -j MONITOR_IN 2>/dev/null || true + iptables -D OUTPUT -j MONITOR_OUT 2>/dev/null || true + iptables -F MONITOR_IN 2>/dev/null || true + iptables -F MONITOR_OUT 2>/dev/null || true + iptables -X MONITOR_IN 2>/dev/null || true + iptables -X MONITOR_OUT 2>/dev/null || true +} + +trap cleanup_iptables EXIT +setup_iptables + +# ---------- helpers ---------- + +# read_iptables_bytes +# Outputs one line per rule: " " +read_iptables_bytes() { + local chain="$1" + iptables -L "$chain" -v -n -x 2>/dev/null | awk ' + /tcp/ { + # Find the port: look for dpt: or spt: field + for (i = 1; i <= NF; i++) { + if ($i ~ /^[ds]pt:/) { + split($i, a, ":") + print a[2], $2 # port, bytes + } + } + } + ' +} + +# get_proc_stat — outputs "utime stime num_threads" from /proc//stat +get_proc_stat() { + local pid="$1" + # Fields: pid (comm) state ... (field 14=utime, 15=stime, 20=num_threads) + awk '{print $14, $15, $20}' "/proc/$pid/stat" 2>/dev/null || echo "0 0 0" +} + +# get_proc_rss_mb — outputs VmRSS in MB from /proc//status +get_proc_rss_mb() { + local pid="$1" + awk '/^VmRSS:/ {printf "%.1f", $2/1024}' "/proc/$pid/status" 2>/dev/null || echo "0" +} + +# get_total_cpu_ticks — sum of all fields from first line of /proc/stat +get_total_cpu_ticks() { + awk '/^cpu / {sum=0; for(i=2;i<=NF;i++) sum+=$i; print sum}' /proc/stat +} + +# get_system_mem — outputs "used_mb total_mb" +get_system_mem() { + awk ' + /^MemTotal:/ { total=$2 } + /^MemAvailable:/ { avail=$2 } + END { printf "%.0f %.0f", (total-avail)/1024, total/1024 } + ' /proc/meminfo +} + +# ---------- initial snapshot ---------- + +declare -A prev_in_bytes +declare -A prev_out_bytes +declare -A prev_proc_ticks + +# Seed network counters +while IFS=' ' read -r port bytes; do + prev_in_bytes["$port"]="$bytes" +done < <(read_iptables_bytes MONITOR_IN) + +while IFS=' ' read -r port bytes; do + prev_out_bytes["$port"]="$bytes" +done < <(read_iptables_bytes MONITOR_OUT) + +# Seed CPU counters +prev_total_ticks=$(get_total_cpu_ticks) +for name in $PROCESS_NAMES; do + pid=$(pgrep -x "$name" 2>/dev/null | head -1 || true) + if [ -n "$pid" ]; then + read -r ut st _threads <<< "$(get_proc_stat "$pid")" + prev_proc_ticks["$name"]=$((ut + st)) + else + prev_proc_ticks["$name"]=0 + fi +done + +sleep "$INTERVAL" + +# ---------- main loop ---------- + +while true; do + ts=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + + # --- network deltas --- + net_json="{" + first=true + while IFS=' ' read -r port bytes; do + prev=${prev_in_bytes["$port"]:-0} + delta=$(( (bytes - prev) / INTERVAL )) + prev_in_bytes["$port"]="$bytes" + if [ "$first" = true ]; then first=false; else net_json+=","; fi + net_json+="\"in_${port}_bytes_sec\":${delta}" + done < <(read_iptables_bytes MONITOR_IN) + + while IFS=' ' read -r port bytes; do + prev=${prev_out_bytes["$port"]:-0} + delta=$(( (bytes - prev) / INTERVAL )) + prev_out_bytes["$port"]="$bytes" + net_json+=",\"out_${port}_bytes_sec\":${delta}" + done < <(read_iptables_bytes MONITOR_OUT) + net_json+="}" + + # --- per-process CPU + memory --- + cur_total_ticks=$(get_total_cpu_ticks) + total_delta=$((cur_total_ticks - prev_total_ticks)) + prev_total_ticks=$cur_total_ticks + + proc_json="{" + first=true + for name in $PROCESS_NAMES; do + pid=$(pgrep -x "$name" 2>/dev/null | head -1 || true) + if [ -n "$pid" ]; then + read -r ut st threads <<< "$(get_proc_stat "$pid")" + cur_ticks=$((ut + st)) + prev_t=${prev_proc_ticks["$name"]:-0} + if [ "$total_delta" -gt 0 ]; then + # cpu_pct with one decimal: (proc_delta * 10000 / total_delta) then insert decimal + raw=$(( (cur_ticks - prev_t) * 10000 / total_delta )) + cpu_pct="$((raw / 10)).$((raw % 10))" + else + cpu_pct="0.0" + fi + prev_proc_ticks["$name"]=$cur_ticks + rss_mb=$(get_proc_rss_mb "$pid") + else + cpu_pct="0.0" + rss_mb="0" + threads="0" + prev_proc_ticks["$name"]=0 + fi + + if [ "$first" = true ]; then first=false; else proc_json+=","; fi + proc_json+="\"${name}\":{\"cpu_pct\":${cpu_pct},\"rss_mb\":${rss_mb},\"threads\":${threads}}" + done + proc_json+="}" + + # --- system-wide stats --- + if [ "$total_delta" -gt 0 ]; then + # System idle ticks are field 5 of /proc/stat cpu line + idle_ticks=$(awk '/^cpu / {print $5}' /proc/stat) + # We need current and previous idle, but for simplicity compute from total usage. + # Instead, use load average which is readily available. + : + fi + read -r mem_used mem_total <<< "$(get_system_mem)" + load_1m=$(awk '{print $1}' /proc/loadavg) + # System CPU%: from /proc/stat, compute as (1 - idle_delta/total_delta) * 100 + idle_now=$(awk '/^cpu / {print $5}' /proc/stat) + # We need the previous idle, store it + if [ -z "${prev_idle:-}" ]; then + prev_idle=$idle_now + fi + idle_delta=$((idle_now - prev_idle)) + prev_idle=$idle_now + if [ "$total_delta" -gt 0 ]; then + busy_delta=$((total_delta - idle_delta)) + raw=$(( busy_delta * 1000 / total_delta )) + sys_cpu="$((raw / 10)).$((raw % 10))" + else + sys_cpu="0.0" + fi + + sys_json="{\"cpu_pct\":${sys_cpu},\"load_1m\":${load_1m},\"mem_used_mb\":${mem_used},\"mem_total_mb\":${mem_total}}" + + # --- emit JSONL line --- + echo "{\"ts\":\"${ts}\",\"net\":${net_json},\"proc\":${proc_json},\"sys\":${sys_json}}" >> "$OUTPUT" + + sleep "$INTERVAL" +done diff --git a/tools/talis/scripts/promtail.sh b/tools/talis/scripts/promtail.sh new file mode 100644 index 0000000000..e200168505 --- /dev/null +++ b/tools/talis/scripts/promtail.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +export HOSTNAME=$(hostname) +PROMTAIL_CONFIG=/root/promtail-config.yml +printf "%s" "__PROMTAIL_CONFIG_B64__" | base64 -d > "$PROMTAIL_CONFIG" + +if ! command -v promtail >/dev/null 2>&1; then + arch=$(uname -m) + if [ "$arch" = "x86_64" ] || [ "$arch" = "amd64" ]; then arch=amd64; + elif [ "$arch" = "aarch64" ] || [ "$arch" = "arm64" ]; then arch=arm64; + else echo "unsupported arch: $arch" >&2; exit 1; fi + apt-get update -y >/dev/null + apt-get install -y curl unzip >/dev/null + tmpdir=$(mktemp -d) + curl -fsSL -o "$tmpdir/promtail.zip" "https://github.com/grafana/loki/releases/download/v2.9.3/promtail-linux-$arch.zip" + unzip -o "$tmpdir/promtail.zip" -d "$tmpdir" >/dev/null + install -m 0755 "$tmpdir/promtail-linux-$arch" /usr/local/bin/promtail +fi + +promtail -config.file="$PROMTAIL_CONFIG" -config.expand-env -server.http-listen-port=9080 > /root/promtail.log 2>&1 & +sleep 1 +pgrep -a promtail >/dev/null 2>&1 || (echo "promtail failed to start:" >&2; tail -200 /root/promtail.log >&2; exit 1) + +__LATENCY_MONITOR_CMD__ diff --git a/tools/talis/scripts/upload_traces.sh b/tools/talis/scripts/upload_traces.sh new file mode 100644 index 0000000000..1a587da1ac --- /dev/null +++ b/tools/talis/scripts/upload_traces.sh @@ -0,0 +1,45 @@ +#!/bin/bash +export DEBIAN_FRONTEND=noninteractive +export NEEDRESTART_MODE=a + +apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" apt-transport-https ca-certificates gnupg curl -y + +# ensure that the env vars are exported here +source /root/payload/vars.sh +echo "CHAIN_ID after sourcing vars.sh: $CHAIN_ID" + +# Set environment variables +PROJECT_ID="numeric-mile-433416-e9" +DATASET_ID="traces" + +CHAIN_ID=$CHAIN_ID + +LOCAL_DIR="/root/.celestia-app/data/traces" + +tmux kill-session -t app + +# Get the hostname +hostname=$(hostname) + +# Parse the first part of the hostname +nodeID=$(echo $hostname | awk -F'-' '{print $1 "-" $2}') + +source_dir="/root/.celestia-app/data/traces" +logs_path="/root/logs" + +# clean the data by removing the last line +find $source_dir -type f -name "*.jsonl" -exec sed -i '$d' {} \; + +AWS_DEFAULT_REGION="us-east-2" +S3_BUCKET_NAME="block-prop-traces-ef" +echo "All files loaded." + +snap install aws-cli --classic +destination_file="/tmp/${CHAIN_ID}_${nodeID}_traces.tar.gz" + +# Set the base S3 path +base_s3_path="s3://${S3_BUCKET_NAME}/${CHAIN_ID}/${nodeID}/" + +# Upload the directory structure to S3 +aws s3 cp "$source_dir" "$base_s3_path" --recursive --region $AWS_DEFAULT_REGION +aws s3 cp "$logs_path" "$base_s3_path" --region $AWS_DEFAULT_REGION diff --git a/tools/talis/scripts/validator_init.sh b/tools/talis/scripts/validator_init.sh new file mode 100644 index 0000000000..15345f9ea4 --- /dev/null +++ b/tools/talis/scripts/validator_init.sh @@ -0,0 +1,173 @@ +#!/bin/bash +CELES_HOME=".celestia-app" +MONIKER="validator" +ARCHIVE_NAME="payload.tar.gz" + +export DEBIAN_FRONTEND=noninteractive +apt-get update -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" +apt-get install git build-essential ufw curl jq chrony snapd btop nethogs unzip --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" + +ufw allow 26657/tcp +ufw allow 26656/tcp +ufw allow 26657/udp +ufw allow 26656/udp + +systemctl enable chrony +systemctl start chrony + +# Ensure the script is run as root +if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run as root. Please run with sudo or as root." + exit 1 +fi + +# Load the BBR module +echo "Loading BBR module..." +modprobe tcp_bbr + +# Verify if the BBR module is loaded +if lsmod | grep -q "tcp_bbr"; then + echo "BBR module loaded successfully." +else + echo "Failed to load BBR module." + exit 1 +fi + +# Add BBR to the list of available congestion control algorithms +echo "Updating sysctl settings..." +sysctl -w net.core.default_qdisc=fq +sysctl -w net.ipv4.tcp_congestion_control=bbr + +# Enable MPTCP +sysctl -w net.mptcp.enabled=1 + +# Set the path manager to ndiffports +sysctl -w net.mptcp.mptcp_path_manager=ndiffports + +# Specify the number of subflows +SUBFLOWS=16 +sysctl -w net.mptcp.mptcp_ndiffports=$SUBFLOWS + +# Make the changes persistent across reboots +echo "Making changes persistent..." +echo "net.core.default_qdisc=fq" >> /etc/sysctl.conf +echo "net.ipv4.tcp_congestion_control=bbr" >> /etc/sysctl.conf + +#Verify the current TCP congestion control algorithm +current_algo=$(sysctl net.ipv4.tcp_congestion_control | awk '{print $3}') +if [ "$current_algo" == "bbr" ]; then + echo "Successfully switched to BBR congestion control algorithm." +else + echo "Failed to switch to BBR. Current algorithm is $current_algo." + exit 1 +fi + +echo "Script completed successfully." + +# === Mount local NVMe instance store (if any) at fibre's data dir === +# +# c6id / i3en / i4i / i7i and similar instance families ship one or more +# unmounted ephemeral NVMe disks. They are 10–20× faster than the EBS +# root volume (gp3 baseline ~125 MB/s, instance NVMe is multi-GB/s). +# Without this block, fibre's `store.Put` writes to the EBS root and +# becomes the upload-path bottleneck — the disk saturates around 125 +# MB/s while the instance store sits idle. +# +# This is a no-op when: +# - the instance type has no second NVMe (DigitalOcean droplets, +# non-c6id EC2 types, Google Cloud) +# - the disk is already formatted+mounted from a previous run +# (idempotent on talis re-deploy) +mount_instance_nvme() { + local fibre_dir="/root/.celestia-fibre" + local label="celestia-fibre" + local dev="" + # Pick the largest unmounted whole-disk NVMe that has no partitions. + # Sort by size desc, take the first. + while read -r name size mp; do + [ "$mp" = "" ] || continue + [ -n "$(ls /sys/block/${name}/${name}p* 2>/dev/null)" ] && continue + dev="/dev/${name}" + break + done < <(lsblk -bdno NAME,SIZE,MOUNTPOINT 2>/dev/null \ + | awk '$1 ~ /^nvme/ {print}' | sort -k2 -nr) + if [ -z "$dev" ]; then + echo "no spare NVMe instance store found — fibre will run on the root volume" + return 0 + fi + if ! blkid "$dev" >/dev/null 2>&1; then + echo "Formatting $dev (ephemeral NVMe instance store) as ext4..." + mkfs.ext4 -F -E lazy_itable_init=1,lazy_journal_init=1 -L "$label" "$dev" + fi + mkdir -p "$fibre_dir" + if ! mountpoint -q "$fibre_dir"; then + mount -o noatime,nodiratime "$dev" "$fibre_dir" + fi + chown root:root "$fibre_dir" + echo "Mounted $dev at $fibre_dir ($(df -h "$fibre_dir" | tail -1))" +} +mount_instance_nvme + +tar -xzf /root/$ARCHIVE_NAME -C /root/ + +source ./vars.sh + +sudo snap install go --channel=1.26/stable --classic + +echo 'export GOPATH="$HOME/go"' >> ~/.profile +echo 'export GOBIN="$GOPATH/bin"' >> ~/.profile +echo 'export PATH="$GOBIN:$PATH"' >> ~/.profile +source ~/.profile + +cd $HOME + +# Get the hostname +hostname=$(hostname) + +# Parse the first part of the hostname +parsed_hostname=$(echo $hostname | awk -F'-' '{print $1 "-" $2}') + +cp payload/build/celestia-appd /bin/celestia-appd +cp payload/build/txsim /bin/txsim +cp payload/build/latency-monitor /bin/latency-monitor +cp payload/build/fibre /bin/fibre +cp payload/build/fibre-txsim /bin/fibre-txsim + +cd $HOME + +rm -rf .celestia-app/ + +celestia-appd config chain-id $CHAIN_ID + +celestia-appd init --chain-id=$CHAIN_ID --home $CELES_HOME $MONIKER + +mv payload/$parsed_hostname/node_key.json $HOME/$CELES_HOME/config/node_key.json + +mv payload/$parsed_hostname/priv_validator_key.json $HOME/$CELES_HOME/config/priv_validator_key.json + +mv payload/$parsed_hostname/priv_validator_state.json $HOME/$CELES_HOME/data/priv_validator_state.json + +cp payload/genesis.json $HOME/$CELES_HOME/config/genesis.json + +cp payload/addrbook.json $HOME/$CELES_HOME/config/addrbook.json + +mv payload/$parsed_hostname/app.toml $HOME/$CELES_HOME/config/app.toml + +mv payload/$parsed_hostname/config.toml $HOME/$CELES_HOME/config/config.toml + +cp -r payload/$parsed_hostname/keyring-test $HOME/$CELES_HOME + +# run txsim script which starts a sleep timer and txsim in a different tmux session +source payload/txsim.sh + +# Get the hostname of the machine +HOSTNAME=$(hostname) + +# Base command +COMMAND="celestia-appd start" + +# Define log file path +LOG_FILE="/root/logs" + +# Execute the command and redirect output to the log file +eval $COMMAND 2>&1 | tee -a "$LOG_FILE" diff --git a/tools/talis/scripts/vars.sh b/tools/talis/scripts/vars.sh new file mode 100644 index 0000000000..69b7857893 --- /dev/null +++ b/tools/talis/scripts/vars.sh @@ -0,0 +1,2 @@ +#!/bin/bash +# this file holds env vars for remote machines diff --git a/tools/talis/start_fibre.go b/tools/talis/start_fibre.go new file mode 100644 index 0000000000..4007573d61 --- /dev/null +++ b/tools/talis/start_fibre.go @@ -0,0 +1,91 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const StartFibreSessionName = "fibre" + +func startFibreCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + instances int + metricsAddress string + pyroscopeEndpoint string + ) + + cmd := &cobra.Command{ + Use: "start-fibre", + Short: "Start fibre server on remote validators via SSH + tmux", + Long: "Starts fibre server tmux sessions on remote validators. The fibre binary must already be deployed via 'talis deploy'.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Select first N validators (default all) + if instances <= 0 || instances > len(cfg.Validators) { + instances = len(cfg.Validators) + } + validators := cfg.Validators[:instances] + + // Build the remote command + // OTEL_METRICS_EXEMPLAR_FILTER=always_on attaches trace exemplars to all metric observations + remoteCmd := "OTEL_METRICS_EXEMPLAR_FILTER=always_on fibre start --home .celestia-fibre --app-grpc-address localhost:9091" + // Auto-enable metrics when observability nodes are configured + if metricsAddress == "" && len(cfg.Observability) > 0 { + metricsAddress = fmt.Sprintf("http://%s:4318", cfg.Observability[0].PublicIP) + } + if metricsAddress != "" { + remoteCmd += fmt.Sprintf(" --otel-endpoint %s", metricsAddress) + } + // Auto-wire Pyroscope endpoint when observability nodes are configured + if pyroscopeEndpoint == "" && len(cfg.Observability) > 0 { + pyroscopeEndpoint = fmt.Sprintf("http://%s:4040", cfg.Observability[0].PublicIP) + } + if pyroscopeEndpoint != "" { + remoteCmd += fmt.Sprintf(" --pyroscope-endpoint %s", pyroscopeEndpoint) + } + + fmt.Printf("Starting fibre sessions on %d validator(s)...\n", len(validators)) + + if err := runScriptInTMux(validators, resolvedSSHKeyPath, remoteCmd, StartFibreSessionName, 5*time.Minute); err != nil { + return fmt.Errorf("failed to start remote sessions: %w", err) + } + + // Print summary + fmt.Println() + fmt.Println("=== fibre sessions started ===") + fmt.Printf(" tmux session: %s\n", StartFibreSessionName) + fmt.Printf(" log file: /root/talis-%s.log\n", StartFibreSessionName) + fmt.Println(" validators:") + for _, val := range validators { + fmt.Printf(" - %s (%s)\n", val.Name, val.PublicIP) + } + fmt.Println() + fmt.Printf(" To kill all: talis kill-session -s %s\n", StartFibreSessionName) + fmt.Printf(" To view logs: ssh root@ 'cat /root/talis-%s.log'\n", StartFibreSessionName) + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory (for config.json)") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().IntVar(&instances, "instances", 0, "number of validators to start fibre on (default all)") + cmd.Flags().StringVar(&metricsAddress, "otel-endpoint", "", "OTLP HTTP endpoint for metrics/traces (e.g. http://host:4318; empty = disabled)") + cmd.Flags().StringVar(&pyroscopeEndpoint, "pyroscope-endpoint", "", "Pyroscope endpoint for continuous profiling (default: auto-detected from observability config, e.g. http://host:4040)") + + return cmd +} diff --git a/tools/talis/status.go b/tools/talis/status.go new file mode 100644 index 0000000000..47bedcf6bd --- /dev/null +++ b/tools/talis/status.go @@ -0,0 +1,74 @@ +package main + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "github.com/cometbft/cometbft/rpc/client/http" + "github.com/spf13/cobra" +) + +func statusCmd() *cobra.Command { + var rootDir string + + cmd := &cobra.Command{ + Use: "status", + Short: "Ping a set of CometBFT nodes and report their latest block height", + Long: "Loads a JSON config containing validator instances, then asynchronously queries each node’s /status endpoint (port 26657) and prints its latest block height.", + Aliases: []string{"s"}, + RunE: func(cmd *cobra.Command, args []string) error { // 1) Load configuration from disk + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config from %q: %w", rootDir, err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + var wg sync.WaitGroup + for _, val := range cfg.Validators { + ip := val.PublicIP + if ip == "" { + fmt.Printf("Skipping %q: no public_ip defined\n", val.Name) + continue + } + + wg.Add(1) + go func(nodeName, nodeIP string) { + defer wg.Done() + + remote := fmt.Sprintf("http://%s:26657", nodeIP) + client, err := http.New(remote, "/websocket") + if err != nil { + log.Printf("Failed to create RPC client for %s (%s:26657): %v\n", nodeName, nodeIP, err) + return + } + + // 4) Call the typed Status endpoint + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + res, err := client.Status(ctx) + if err != nil { + log.Printf("Failed to get status from %s (%s:26657): %v\n", nodeName, nodeIP, err) + return + } + + height := res.SyncInfo.LatestBlockHeight + + log.Printf("%s (%s): height %d\n", nodeName, nodeIP, height) + }(val.Name, ip) + } + + wg.Wait() + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + return cmd +} diff --git a/tools/talis/sync_node_cmd.go b/tools/talis/sync_node_cmd.go new file mode 100644 index 0000000000..b4b57cb9f3 --- /dev/null +++ b/tools/talis/sync_node_cmd.go @@ -0,0 +1,677 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "math" + "math/rand" + "os" + "os/exec" + "os/signal" + "regexp" + "sort" + "strconv" + "strings" + "syscall" + "time" + + "github.com/spf13/cobra" +) + +type syncResult struct { + stateSyncDuration int + blockSyncDuration int + totalDuration int + finalHeight int +} + +var ( + syncResultRe = regexp.MustCompile(`(?m)^State sync duration:\s+(\d+)s\r?$`) + blockSyncRe = regexp.MustCompile(`(?m)^Block sync duration:\s+(\d+)s\r?$`) + totalDurationRe = regexp.MustCompile(`(?m)^Total sync duration:\s+(\d+)s\r?$`) + finalHeightRe = regexp.MustCompile(`(?m)^Final height:\s+(\d+)\r?$`) +) + +func parseSyncResult(output string) (syncResult, error) { + extract := func(re *regexp.Regexp) (int, error) { + m := re.FindStringSubmatch(output) + if m == nil { + return 0, fmt.Errorf("pattern %q not found in output", re.String()) + } + return strconv.Atoi(m[1]) + } + var r syncResult + var err error + if r.stateSyncDuration, err = extract(syncResultRe); err != nil { + return r, err + } + if r.blockSyncDuration, err = extract(blockSyncRe); err != nil { + return r, err + } + if r.totalDuration, err = extract(totalDurationRe); err != nil { + return r, err + } + if r.finalHeight, err = extract(finalHeightRe); err != nil { + return r, err + } + return r, nil +} + +func printSummary(results []syncResult) { + n := len(results) + if n == 0 { + return + } + + totals := make([]float64, n) + blocks := make([]float64, n) + states := make([]float64, n) + bps := make([]float64, n) + for i, r := range results { + totals[i] = float64(r.totalDuration) + blocks[i] = float64(r.blockSyncDuration) + states[i] = float64(r.stateSyncDuration) + if r.blockSyncDuration > 0 { + bps[i] = float64(r.finalHeight) / float64(r.blockSyncDuration) + } + } + + fmt.Printf("\n=========================================\n") + fmt.Printf("SUMMARY (%d iterations)\n", n) + fmt.Printf("=========================================\n") + fmt.Printf("%-26s %8s %8s %8s %8s\n", "", "avg", "min", "max", "p99") + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "State sync duration:", avg(states), minVal(states), maxVal(states), percentile(states, 99)) + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "Block sync duration:", avg(blocks), minVal(blocks), maxVal(blocks), percentile(blocks, 99)) + fmt.Printf("%-26s %8.1fs %8.1fs %8.1fs %8.1fs\n", "Total sync duration:", avg(totals), minVal(totals), maxVal(totals), percentile(totals, 99)) + fmt.Printf("%-26s %8.2f %8.2f %8.2f %8.2f\n", "Blocks/sec:", avg(bps), minVal(bps), maxVal(bps), percentile(bps, 99)) + fmt.Printf("=========================================\n") +} + +func minVal(vals []float64) float64 { + m := vals[0] + for _, v := range vals[1:] { + if v < m { + m = v + } + } + return m +} + +func maxVal(vals []float64) float64 { + m := vals[0] + for _, v := range vals[1:] { + if v > m { + m = v + } + } + return m +} + +func avg(vals []float64) float64 { + sum := 0.0 + for _, v := range vals { + sum += v + } + return sum / float64(len(vals)) +} + +func percentile(vals []float64, p float64) float64 { + sorted := make([]float64, len(vals)) + copy(sorted, vals) + sort.Float64s(sorted) + idx := (p / 100) * float64(len(sorted)-1) + lower := int(math.Floor(idx)) + upper := int(math.Ceil(idx)) + if lower == upper { + return sorted[lower] + } + frac := idx - float64(lower) + return sorted[lower]*(1-frac) + sorted[upper]*frac +} + +const ( + syncTestMachineType = "c3d-highcpu-16" + syncTestDiskSizeGB = 400 +) + +func syncNodeCmd() *cobra.Command { + var ( + rootDir string + sshPubKeyPath string + sshKeyPath string + gcProject string + gcKeyJSONPath string + region string + iterations int + cooldown int + keep bool + binaryPath string + blockSyncOnly bool + ) + + cmd := &cobra.Command{ + Use: "sync-node", + Short: "Measure sync-to-tip speed on a Talis network", + Long: `Spins up a fresh GCP instance, deploys celestia-appd, syncs to tip +using the existing Talis validators (state sync by default, or block sync +from genesis with --block-sync-only), measures sync time, and tears down the instance.`, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + cfg.SSHPubKeyPath = resolveValue(sshPubKeyPath, EnvVarSSHKeyPath, cfg.SSHPubKeyPath) + cfg.GoogleCloudProject = resolveValue(gcProject, EnvVarGoogleCloudProject, cfg.GoogleCloudProject) + cfg.GoogleCloudKeyJSONPath = resolveValue(gcKeyJSONPath, EnvVarGoogleCloudKeyJSONPath, cfg.GoogleCloudKeyJSONPath) + + if cfg.GoogleCloudProject == "" { + return fmt.Errorf("google cloud project is required (use --gc-project, env GOOGLE_CLOUD_PROJECT, or config)") + } + + resolvedSSHKeyPath := resolveValue(sshKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + if resolvedSSHKeyPath == "" { + return fmt.Errorf("SSH private key path is required (use --ssh-key-path or set ssh_pub_key_path in config)") + } + + sshPubKey, err := os.ReadFile(cfg.SSHPubKeyPath) + if err != nil { + return fmt.Errorf("failed to read SSH public key at %s: %w", cfg.SSHPubKeyPath, err) + } + + if binaryPath == "" { + binaryPath = "build/celestia-appd" + } + if _, err := os.Stat(binaryPath); err != nil { + return fmt.Errorf("binary not found at %s: %w", binaryPath, err) + } + + opts, err := gcClientOptions(cfg) + if err != nil { + return fmt.Errorf("failed to create GCP client options: %w", err) + } + + // Pick region + if region == "" || region == RandomRegion { + region = RandomGCRegion() + } + + // Pick a random validator as RPC source + validator := cfg.Validators[rand.Intn(len(cfg.Validators))] + if validator.PublicIP == "" || validator.PublicIP == "TBD" { + return fmt.Errorf("selected validator %s has no public IP", validator.Name) + } + rpcEndpoint := fmt.Sprintf("http://%s:26657", validator.PublicIP) + log.Printf("Using validator %s (%s) as RPC source", validator.Name, validator.PublicIP) + + // Build peer list from all validators + var peers []string + for _, v := range cfg.Validators { + if v.PublicIP == "" || v.PublicIP == "TBD" { + continue + } + peers = append(peers, fmt.Sprintf("%s:26656", v.PublicIP)) + } + + // Create the sync test instance + syncInst := Instance{ + NodeType: Validator, + Name: fmt.Sprintf("sync-test-%d", time.Now().Unix()), + Provider: GoogleCloud, + Slug: syncTestMachineType, + Region: region, + Tags: []string{"talis", "sync-test"}, + } + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + log.Printf("Creating sync-test instance in region %s...", region) + created, err := CreateGCInstances(ctx, cfg.GoogleCloudProject, []Instance{syncInst}, string(sshPubKey), opts, 1) + if err != nil { + return fmt.Errorf("failed to create GCP instance: %w", err) + } + if len(created) == 0 { + return fmt.Errorf("no instance was created") + } + + inst := created[0] + log.Printf("Instance %s created with IP %s", inst.Name, inst.PublicIP) + + // Setup cleanup on interrupt + teardown := func() { + if keep { + log.Printf("--keep flag set, leaving instance %s (%s) running", inst.Name, inst.PublicIP) + return + } + log.Printf("Tearing down instance %s...", inst.Name) + teardownCtx, teardownCancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer teardownCancel() + destroyInst := inst + destroyInst.Region = region + if _, err := DestroyGCInstances(teardownCtx, cfg.GoogleCloudProject, []Instance{destroyInst}, opts, 1); err != nil { + log.Printf("Warning: failed to destroy instance %s: %v", inst.Name, err) + } + } + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + sig := <-sigCh + log.Printf("Received signal %v, cleaning up...", sig) + cancel() // Stop all in-flight operations (SSH loop, etc.) + teardown() + os.Exit(1) + }() + + defer func() { + signal.Stop(sigCh) + teardown() + }() + + // Wait for SSH to become available + log.Printf("Waiting for SSH to become available on %s...", inst.PublicIP) + log.Printf(" SSH private key: %s", resolvedSSHKeyPath) + log.Printf(" SSH public key: %s", cfg.SSHPubKeyPath) + if err := waitForSSH(ctx, inst.PublicIP, resolvedSSHKeyPath, 2*time.Minute); err != nil { + return fmt.Errorf("SSH not available: %w", err) + } + log.Printf("SSH is available") + + // SCP the binary to the instance + log.Printf("Uploading celestia-appd binary to %s...", inst.PublicIP) + if err := scpFile(ctx, binaryPath, inst.PublicIP, "/usr/local/bin/celestia-appd", resolvedSSHKeyPath); err != nil { + return fmt.Errorf("failed to upload binary: %w", err) + } + log.Printf("Binary uploaded successfully") + + // Make binary executable + if err := runSSHCommand(ctx, inst.PublicIP, resolvedSSHKeyPath, "chmod +x /usr/local/bin/celestia-appd"); err != nil { + return fmt.Errorf("failed to chmod binary: %w", err) + } + + var results []syncResult + + for i := 1; i <= iterations; i++ { + if iterations > 1 { + log.Printf("=== Starting iteration %d/%d ===", i, iterations) + } + + script := buildSyncScript(cfg.ChainID, rpcEndpoint, peers, i, iterations, blockSyncOnly) + log.Printf("Starting sync measurement on %s...", inst.PublicIP) + + output, err := runSSHStreaming(ctx, inst.PublicIP, resolvedSSHKeyPath, script) + if err != nil { + return fmt.Errorf("sync test failed on iteration %d: %w", i, err) + } + + result, err := parseSyncResult(output) + if err != nil { + log.Printf("Warning: could not parse results for iteration %d: %v", i, err) + } else { + results = append(results, result) + } + + if i < iterations { + log.Printf("Cooldown for %ds before next iteration...", cooldown) + select { + case <-time.After(time.Duration(cooldown) * time.Second): + case <-ctx.Done(): + return ctx.Err() + } + } + } + + if len(results) > 1 { + printSummary(results) + } + + return nil + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory with config.json") + cmd.Flags().StringVarP(&sshPubKeyPath, "ssh-pub-key-path", "s", "", "path to SSH public key") + cmd.Flags().StringVar(&sshKeyPath, "ssh-key-path", "", "path to SSH private key (default: derived from config's ssh_pub_key_path)") + cmd.Flags().StringVar(&gcProject, "gc-project", "", "Google Cloud project") + cmd.Flags().StringVar(&gcKeyJSONPath, "gc-key-json-path", "", "path to Google Cloud service account key JSON") + cmd.Flags().StringVarP(®ion, "region", "r", "random", "GCP region for the sync node") + cmd.Flags().IntVarP(&iterations, "iterations", "n", 1, "number of sync iterations") + cmd.Flags().IntVar(&cooldown, "cooldown", 30, "seconds between iterations") + cmd.Flags().BoolVar(&keep, "keep", false, "don't tear down the instance after (for debugging)") + cmd.Flags().BoolVar(&blockSyncOnly, "block-sync-only", false, "skip state sync and only block sync from genesis") + cmd.Flags().StringVar(&binaryPath, "binary-path", "", "path to celestia-appd binary to deploy (default: build/celestia-appd)") + + return cmd +} + +// waitForSSH polls until an SSH connection succeeds or the timeout is reached. +func waitForSSH(ctx context.Context, ip, sshKeyPath string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + attempt := 0 + var lastErr error + var lastOut string + for time.Now().Before(deadline) { + attempt++ + sshCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + ssh := exec.CommandContext(sshCtx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=5", + fmt.Sprintf("root@%s", ip), + "echo ok", + ) + out, err := ssh.CombinedOutput() + cancel() + outStr := strings.TrimSpace(string(out)) + if err == nil && strings.Contains(outStr, "ok") { + return nil + } + lastErr = err + lastOut = outStr + fmt.Fprintf(os.Stderr, " SSH attempt %d: err=%v out=%q\n", attempt, err, truncateOutput(outStr, 200)) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(5 * time.Second): + } + } + return fmt.Errorf("SSH not available on %s after %v (%d attempts), last error: %v, last output: %s", ip, timeout, attempt, lastErr, truncateOutput(lastOut, 500)) +} + +func truncateOutput(s string, maxLen int) string { + // Only keep the last maxLen characters for readability + s = strings.TrimSpace(s) + if len(s) > maxLen { + return "..." + s[len(s)-maxLen:] + } + return s +} + +// scpFile copies a local file to a remote path via SCP. +func scpFile(ctx context.Context, localPath, ip, remotePath, sshKeyPath string) error { + scp := exec.CommandContext(ctx, + "scp", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + localPath, + fmt.Sprintf("root@%s:%s", ip, remotePath), + ) + if out, err := scp.CombinedOutput(); err != nil { + return fmt.Errorf("scp error: %v\n%s", err, out) + } + return nil +} + +// runSSHCommand runs a command on a remote host via SSH and returns the error if any. +func runSSHCommand(ctx context.Context, ip, sshKeyPath, command string) error { + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + fmt.Sprintf("root@%s", ip), + command, + ) + if out, err := ssh.CombinedOutput(); err != nil { + return fmt.Errorf("ssh error: %v\n%s", err, out) + } + return nil +} + +// runSSHStreaming runs a command on a remote host via SSH, streaming stdout/stderr +// directly to the user's terminal for real-time output. It also captures stdout +// and returns it for parsing. +func runSSHStreaming(ctx context.Context, ip, sshKeyPath, command string) (string, error) { + var buf bytes.Buffer + ssh := exec.CommandContext(ctx, + "ssh", + "-i", sshKeyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ServerAliveInterval=30", + "-o", "ServerAliveCountMax=5", + fmt.Sprintf("root@%s", ip), + command, + ) + ssh.Stdout = io.MultiWriter(os.Stdout, &buf) + ssh.Stderr = os.Stderr + err := ssh.Run() + return buf.String(), err +} + +// buildSyncScript generates the shell script that runs on the remote instance to +// perform state sync configuration, start the node, and measure sync times. +func buildSyncScript(chainID, rpcEndpoint string, peerIPs []string, iteration, totalIterations int, blockSyncOnly bool) string { + peersStr := strings.Join(peerIPs, ",") + blockSyncOnlyStr := "false" + if blockSyncOnly { + blockSyncOnlyStr = "true" + } + + return fmt.Sprintf(`#!/bin/bash +set -euo pipefail + +CHAIN_ID="%s" +RPC="%s" +PEERS="%s" +ITERATION=%d +TOTAL_ITERATIONS=%d +BLOCK_SYNC_ONLY=%s +HOME_DIR="/root/.celestia-app-sync" +POLL_INTERVAL=5 +SYNC_TIMEOUT=7200 + +printf "\n=========================================\n" +printf "SYNC TEST - ITERATION %%d/%%d\n" "$ITERATION" "$TOTAL_ITERATIONS" +printf "=========================================\n" +printf "Chain ID: %%s\n" "$CHAIN_ID" +printf "RPC: %%s\n" "$RPC" +printf "=========================================\n\n" + +# Install jq if not present +if ! command -v jq &>/dev/null; then + echo "Installing jq..." + apt-get update -qq && apt-get install -y -qq jq >/dev/null 2>&1 +fi + +# Clean up any previous run +rm -rf "$HOME_DIR" + +echo "Initializing celestia-appd..." +celestia-appd init sync-node --chain-id "$CHAIN_ID" --home "$HOME_DIR" >/dev/null 2>&1 + +# Fetch genesis from validator RPC +echo "Fetching genesis from $RPC..." +for attempt in $(seq 1 5); do + if curl -sf "$RPC/genesis" | jq '.result.genesis' > "$HOME_DIR/config/genesis.json" 2>/dev/null; then + GENESIS_SIZE=$(wc -c < "$HOME_DIR/config/genesis.json") + echo "Genesis saved ($GENESIS_SIZE bytes)" + break + fi + echo "Attempt $attempt failed, retrying in 5s..." + sleep 5 +done + +if [ ! -s "$HOME_DIR/config/genesis.json" ]; then + echo "ERROR: Failed to fetch genesis" + exit 1 +fi + +# Fetch node IDs and build persistent_peers +echo "Building peer list..." +PERSISTENT_PEERS="" +for peer_addr in $(echo "$PEERS" | tr ',' ' '); do + PEER_IP=$(echo "$peer_addr" | cut -d: -f1) + PEER_PORT=$(echo "$peer_addr" | cut -d: -f2) + PEER_RPC="http://${PEER_IP}:26657" + + NODE_ID=$(curl -sf "$PEER_RPC/status" 2>/dev/null | jq -r '.result.node_info.id // empty' 2>/dev/null || true) + if [ -n "$NODE_ID" ]; then + if [ -n "$PERSISTENT_PEERS" ]; then + PERSISTENT_PEERS="${PERSISTENT_PEERS}," + fi + PERSISTENT_PEERS="${PERSISTENT_PEERS}${NODE_ID}@${PEER_IP}:${PEER_PORT}" + echo " Added peer: ${NODE_ID}@${PEER_IP}:${PEER_PORT}" + else + echo " Warning: could not get node ID for $PEER_IP" + fi +done + +if [ -z "$PERSISTENT_PEERS" ]; then + echo "ERROR: No peers found" + exit 1 +fi + +echo "Found $(echo "$PERSISTENT_PEERS" | tr ',' '\n' | wc -l | tr -d ' ') peers" + +# Configure persistent peers +sed -i "s|^persistent_peers *=.*|persistent_peers = \"$PERSISTENT_PEERS\"|" "$HOME_DIR/config/config.toml" + +# Disable block sync verification for faster sync +sed -i -E "s|^verify_data *=.*|verify_data = false|" "$HOME_DIR/config/config.toml" + +# Query network for latest height +echo "Querying network for latest height..." +LATEST_HEIGHT=$(curl -sf "$RPC/block" | jq -r '.result.block.header.height') +echo "Latest height: $LATEST_HEIGHT" + +if [ "$BLOCK_SYNC_ONLY" = "true" ]; then + echo "Block sync only mode: skipping state sync, syncing from genesis" + BLOCK_HEIGHT=0 +else + BLOCK_HEIGHT=$((LATEST_HEIGHT - 1000)) + TRUST_HASH=$(curl -sf "$RPC/block?height=$BLOCK_HEIGHT" | jq -r '.result.block_id.hash') + + echo "Trust height: $BLOCK_HEIGHT" + echo "Trust hash: $TRUST_HASH" + + # Enable state sync + sed -i -E "s|^(enable[[:space:]]+=[[:space:]]+).*$|\1true|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(rpc_servers[[:space:]]+=[[:space:]]+).*$|\1\"$RPC,$RPC\"|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(trust_height[[:space:]]+=[[:space:]]+).*$|\1$BLOCK_HEIGHT|" "$HOME_DIR/config/config.toml" + sed -i -E "s|^(trust_hash[[:space:]]+=[[:space:]]+).*$|\1\"$TRUST_HASH\"|" "$HOME_DIR/config/config.toml" +fi + +echo "" +echo "Starting celestia-appd..." +START_TIME=$(date +%%s) + +celestia-appd start --home "$HOME_DIR" --force-no-bbr >/root/sync-node.log 2>&1 & +NODE_PID=$! + +cleanup() { + kill -TERM "$NODE_PID" 2>/dev/null || true +} +trap cleanup EXIT INT TERM + +# Wait for RPC to be available +echo "Waiting for local RPC..." +for i in $(seq 1 60); do + if curl -sf http://localhost:26657/status >/dev/null 2>&1; then + echo "Local RPC is available" + break + fi + sleep 2 +done + +if ! curl -sf http://localhost:26657/status >/dev/null 2>&1; then + echo "ERROR: Local RPC not available after 120s" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 +fi + +printf "\n=== Monitoring Sync Progress ===\n" +STATE_SYNC_COMPLETE=false +STATE_SYNC_END_TIME="" +PREV_HEIGHT=0 +STALL_COUNT=0 +MAX_STALLS=24 + +elapsed=0 +while [ $elapsed -lt $SYNC_TIMEOUT ]; do + # Check if process is still alive + if ! kill -0 $NODE_PID 2>/dev/null; then + echo "ERROR: celestia-appd process died" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 + fi + + STATUS=$(curl -sf http://localhost:26657/status 2>/dev/null || echo "{}") + CATCHING_UP=$(echo "$STATUS" | jq -r '.result.sync_info.catching_up // "true"') + CURRENT_HEIGHT=$(echo "$STATUS" | jq -r '.result.sync_info.latest_block_height // "0"') + NETWORK_TIP=$(curl -sf "$RPC/block" 2>/dev/null | jq -r '.result.block.header.height // "0"' 2>/dev/null || echo "0") + BLOCKS_BEHIND=$((NETWORK_TIP - CURRENT_HEIGHT)) + [ $BLOCKS_BEHIND -lt 0 ] && BLOCKS_BEHIND=0 + + # Detect stalled sync + if [ "$CURRENT_HEIGHT" = "$PREV_HEIGHT" ] && [ "$CURRENT_HEIGHT" != "0" ] && [ "$BLOCKS_BEHIND" -gt "5" ]; then + STALL_COUNT=$((STALL_COUNT + 1)) + if [ $STALL_COUNT -ge $MAX_STALLS ]; then + NUM_PEERS=$(curl -sf http://localhost:26657/net_info 2>/dev/null | jq -r '.result.n_peers // "0"' 2>/dev/null || echo "0") + echo "ERROR: Sync stalled for 2 minutes at height $CURRENT_HEIGHT" + echo "Peers connected: $NUM_PEERS" + echo "Last 50 lines of log:" + tail -50 /root/sync-node.log || true + exit 1 + fi + echo "[$(date +%%T)] Height: $CURRENT_HEIGHT / $NETWORK_TIP (${BLOCKS_BEHIND} behind) | STALLED ($STALL_COUNT/${MAX_STALLS})" + else + STALL_COUNT=0 + echo "[$(date +%%T)] Height: $CURRENT_HEIGHT / $NETWORK_TIP (${BLOCKS_BEHIND} behind) | Catching up: $CATCHING_UP" + fi + PREV_HEIGHT=$CURRENT_HEIGHT + + # Check state sync completion + if [ "$STATE_SYNC_COMPLETE" = "false" ] && [ "$CURRENT_HEIGHT" -ge "$BLOCK_HEIGHT" ] 2>/dev/null; then + STATE_SYNC_END_TIME=$(date +%%s) + STATE_SYNC_DURATION=$((STATE_SYNC_END_TIME - START_TIME)) + printf "\nState sync complete! Reached trust height %%s (%%ss)\n=== Monitoring Block Sync to Tip ===\n" "$BLOCK_HEIGHT" "$STATE_SYNC_DURATION" + STATE_SYNC_COMPLETE=true + fi + + # Check if fully synced + if [ "$BLOCKS_BEHIND" -le "0" ] 2>/dev/null; then + TOTAL_END_TIME=$(date +%%s) + TOTAL_DURATION=$((TOTAL_END_TIME - START_TIME)) + BLOCK_SYNC_DURATION=$((TOTAL_END_TIME - ${STATE_SYNC_END_TIME:-$START_TIME})) + + if [ -z "${STATE_SYNC_END_TIME:-}" ]; then + STATE_SYNC_DURATION=$TOTAL_DURATION + fi + + printf "\n=========================================\n" + printf "ITERATION %%d/%%d COMPLETE\n" "$ITERATION" "$TOTAL_ITERATIONS" + printf "=========================================\n" + printf "State sync duration: %%ss\n" "${STATE_SYNC_DURATION:-$TOTAL_DURATION}" + printf "Block sync duration: %%ss\n" "$BLOCK_SYNC_DURATION" + printf "Total sync duration: %%ss\n" "$TOTAL_DURATION" + printf "Final height: %%s\n" "$CURRENT_HEIGHT" + printf "Network tip: %%s\n" "$NETWORK_TIP" + printf "=========================================\n" + + kill -TERM "$NODE_PID" 2>/dev/null || true + trap - EXIT INT TERM + exit 0 + fi + + sleep $POLL_INTERVAL + elapsed=$((elapsed + POLL_INTERVAL)) +done + +echo "ERROR: Sync timeout (${SYNC_TIMEOUT}s)" +exit 1 +`, chainID, rpcEndpoint, peersStr, iteration, totalIterations, blockSyncOnlyStr) +} diff --git a/tools/talis/txsim.go b/tools/talis/txsim.go new file mode 100644 index 0000000000..390b2137b7 --- /dev/null +++ b/tools/talis/txsim.go @@ -0,0 +1,140 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +const ( + TxSimSessionName = "txsim" +) + +// startTxsimCmd creates a cobra command for starting txsim on remote instances. +func startTxsimCmd() *cobra.Command { + var ( + instances int + seqCount int + blobsPerPFB int + startSize int + endSize int + rootDir string + cfgPath string + SSHKeyPath string + fireAndForget bool + fireAndForgetDelay time.Duration + ) + + cmd := &cobra.Command{ + Use: "txsim", + Short: "Starts the txsim command on remote validators", + Long: "Connects to remote validators and starts the txsim command in a detached tmux session.", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + resolvedSSHKeyPath := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + txsimScript := fmt.Sprintf( + "txsim .celestia-app/ --blob %d --blob-amounts %d --blob-sizes %d-%d --grpc-endpoint localhost:9091 --feegrant", + seqCount, + blobsPerPFB, + startSize, + endSize, + ) + if fireAndForget { + txsimScript += fmt.Sprintf(" --fire-and-forget --fire-and-forget-delay %s", fireAndForgetDelay.String()) + } + txsimScript += " > txsim.log" + + // only spin up txsim on the number of instances that were specified. + insts := []Instance{} + for i, val := range cfg.Validators { + if i >= instances || i >= len(cfg.Validators) { + break + } + insts = append(insts, val) + } + + fmt.Println(insts, "\n", txsimScript) + + return runScriptInTMux(insts, resolvedSSHKeyPath, txsimScript, TxSimSessionName, time.Minute*5) + }, + } + + // Define flags for the command + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory in which to initialize") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "name of the config") // Keep cfgPath flag for consistency with other commands, although not strictly used after LoadConfig. + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to the user's SSH key (overrides environment variable and default)") + cmd.Flags().IntVarP(&seqCount, "sequences", "s", 1, "the number of sequences (concurrent PFB streams) ran by each txsim instance") + cmd.Flags().IntVarP(&instances, "instances", "i", 1, "the number of instances of txsim, each ran on its own validator") + cmd.Flags().IntVarP(&blobsPerPFB, "blobs-per-pfb", "b", 1, "the number of blobs in each PFB") + cmd.Flags().IntVarP(&startSize, "min-blob-size", "m", 1000000, "the min number of bytes in each blob") + cmd.Flags().IntVarP(&endSize, "max-blob-size", "x", 1900000, "the max number of bytes in each blob") + cmd.Flags().BoolVar(&fireAndForget, "fire-and-forget", false, "enable fire-and-forget mode (broadcast txs without waiting for inclusion)") + cmd.Flags().DurationVar(&fireAndForgetDelay, "fire-and-forget-delay", 500*time.Millisecond, "delay between submissions in fire-and-forget mode") + _ = cmd.MarkFlagRequired("sequences") + _ = cmd.MarkFlagRequired("instances") + return cmd +} + +// killTmuxSessionCmd creates a cobra command for killing a tmux session on remote validators. +func killTmuxSessionCmd() *cobra.Command { + var ( + rootDir string + cfgPath string + SSHKeyPath string + session string + timeout time.Duration + ) + + cmd := &cobra.Command{ + Use: "kill-session", + Short: "Kills a detached tmux session on remote validators", + Long: "Connects to remote validator nodes and kills the named tmux session (errors suppressed).", + Aliases: []string{"k"}, + RunE: func(cmd *cobra.Command, args []string) error { + // Load config + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators found in config") + } + + // Resolve SSH key + resolvedKey := resolveValue(SSHKeyPath, EnvVarSSHKeyPath, strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", "")) + + // Raw kill session (suppress errors so no output if session doesn't exist) + killScript := fmt.Sprintf( + "tmux kill-session -t %s 2>/dev/null", + session, + ) + + // Target all instance types: validators + encoders + targets := append([]Instance{}, cfg.Validators...) + targets = append(targets, cfg.Encoders...) + + // Run the kill script in its own tmux on each host + return runScriptInTMux(targets, resolvedKey, killScript, "kill", timeout) + }, + } + + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory to load config from") + cmd.Flags().StringVarP(&cfgPath, "config", "c", "config.json", "config file name") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "path to SSH private key (overrides env/default)") + cmd.Flags().StringVarP(&session, "session", "s", "txsim", "name of the tmux session to kill") + _ = cmd.MarkFlagRequired("session") + cmd.Flags().DurationVarP(&timeout, "timeout", "t", time.Minute*2, "how long to wait for SSH/tmux commands to complete") + + return cmd +} diff --git a/tools/talis/upload_data.go b/tools/talis/upload_data.go new file mode 100644 index 0000000000..a23a5b8cb9 --- /dev/null +++ b/tools/talis/upload_data.go @@ -0,0 +1,54 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" +) + +// uploadDataCmd creates a cobra command for kicking off trace collection +func uploadDataCmd() *cobra.Command { + var ( + rootDir string + SSHKeyPath string + ) + + cmd := &cobra.Command{ + Use: "upload-data", + Short: "Upload data from the talis network", + Long: "Connects to every node in the network and starts the upload_traces.sh script in a detached tmux session.", + Aliases: []string{"u"}, + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := LoadConfig(rootDir) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + if len(cfg.Validators) == 0 { + return fmt.Errorf("no validators (nodes) found in config") + } + + resolvedKey := resolveValue( + SSHKeyPath, + EnvVarSSHKeyPath, + strings.ReplaceAll(cfg.SSHPubKeyPath, ".pub", ""), + ) + + const sessionName = "traces" + return runScriptInTMux( + cfg.Validators, + resolvedKey, + "source /root/payload/upload_traces.sh", + sessionName, + time.Minute*5, + ) + }, + } + + // define your flags + cmd.Flags().StringVarP(&rootDir, "directory", "d", ".", "root directory containing your config") + cmd.Flags().StringVarP(&SSHKeyPath, "ssh-key-path", "k", "", "override path to your SSH private key") + return cmd +} diff --git a/tools/talis/util_test.go b/tools/talis/util_test.go new file mode 100644 index 0000000000..109c1beb65 --- /dev/null +++ b/tools/talis/util_test.go @@ -0,0 +1,35 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMatchPattern(t *testing.T) { + tests := []struct { + name string + pattern string + input string + expected bool + }{ + {"wildcard suffix match", "validator-*", "validator-0", true}, + {"wildcard matches anything", "*", "anything", true}, + {"wildcard matches exact", "node-*", "node-123", true}, + {"wildcard mismatch", "node-*", "validator-1", false}, + {"exact match", "node-1", "node-1", true}, + {"exact mismatch", "node-1", "node-2", false}, + {"prefix only", "*-0", "validator-0", true}, + {"suffix only", "validator-*", "node-0", false}, + {"empty pattern matches nothing", "", "anything", false}, + {"wildcard middle", "val*-1", "validator-1", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + match, err := matchPattern(tt.pattern, tt.input) + require.NoError(t, err) + require.Equal(t, tt.expected, match) + }) + } +}