From 3a27baeeef4544e24dcb8890fd1efd236fabfeab Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Thu, 7 May 2026 14:41:54 +0800 Subject: [PATCH 01/17] fix devnet docker submitter config --- ops/docker/docker-compose-4nodes.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 32ea8b79b..39febd04a 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -403,7 +403,7 @@ services: - "7546:8546" - "7551:8551" healthcheck: - test: curl -f http://localhost:8545 + test: ["CMD-SHELL", "wget -qO- --header='Content-Type: application/json' --post-data='{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' http://localhost:8545 | grep -q '\"result\"'"] interval: 30s timeout: 5s retries: 3 @@ -483,6 +483,7 @@ services: - TX_SUBMITTER_FINALIZE=true - TX_SUBMITTER_MAX_FINALIZE_NUM=100 - TX_SUBMITTER_PRIORITY_ROLLUP=false + - TX_SUBMITTER_SEAL_BATCH=true - TX_SUBMITTER_METRICS_SERVER_ENABLE=false - TX_SUBMITTER_METRICS_HOSTNAME=0.0.0.0 - TX_SUBMITTER_METRICS_PORT=6060 @@ -525,6 +526,7 @@ services: - TX_SUBMITTER_FINALIZE=false - TX_SUBMITTER_MAX_FINALIZE_NUM=100 - TX_SUBMITTER_PRIORITY_ROLLUP=false + - TX_SUBMITTER_SEAL_BATCH=true - TX_SUBMITTER_METRICS_SERVER_ENABLE=false - TX_SUBMITTER_METRICS_HOSTNAME=0.0.0.0 - TX_SUBMITTER_METRICS_PORT=6060 @@ -567,6 +569,7 @@ services: - TX_SUBMITTER_FINALIZE=false - TX_SUBMITTER_MAX_FINALIZE_NUM=100 - TX_SUBMITTER_PRIORITY_ROLLUP=false + - TX_SUBMITTER_SEAL_BATCH=true - TX_SUBMITTER_METRICS_SERVER_ENABLE=false - TX_SUBMITTER_METRICS_HOSTNAME=0.0.0.0 - TX_SUBMITTER_METRICS_PORT=6060 @@ -609,6 +612,7 @@ services: - TX_SUBMITTER_FINALIZE=false - TX_SUBMITTER_MAX_FINALIZE_NUM=100 - TX_SUBMITTER_PRIORITY_ROLLUP=false + - TX_SUBMITTER_SEAL_BATCH=true - TX_SUBMITTER_METRICS_SERVER_ENABLE=false - TX_SUBMITTER_METRICS_HOSTNAME=0.0.0.0 - TX_SUBMITTER_METRICS_PORT=6060 From b793c64ea3468f631eaea6d545613069ce2e3e20 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Thu, 7 May 2026 15:56:52 +0800 Subject: [PATCH 02/17] support reth execution client in devnet --- Makefile | 46 +++++++++-- node/ops-morph/docker-compose.yml | 16 ++-- node/ops-morph/testnet/docker-compose.yml | 48 ++++++------ node/ops-morph/testnet/static-nodes.json | 2 +- ops/devnet-morph/devnet/__init__.py | 14 +++- .../test_devnet_execution_client.py | 21 +++++ .../docker-compose.override.yml | 20 ++--- ops/docker-sequencer-test/run-test.sh | 20 ++--- .../scripts/tx-generator.sh | 2 +- ops/docker/docker-compose-4nodes.yml | 76 +++++++++---------- ops/docker/docker-compose-reth.yml | 69 +++++++++++++++++ ops/docker/static-nodes.json | 8 +- oracle/docker-compose.yml | 2 +- 13 files changed, 237 insertions(+), 107 deletions(-) create mode 100644 ops/devnet-morph/test_devnet_execution_client.py create mode 100644 ops/docker/docker-compose-reth.yml diff --git a/Makefile b/Makefile index c8a72db59..2ff3a00b5 100644 --- a/Makefile +++ b/Makefile @@ -137,25 +137,52 @@ go-ubuntu-builder: ################## devnet 4 nodes #################### -devnet-up: submodules go-ubuntu-builder - python3 ops/devnet-morph/main.py --polyrepo-dir=. +EXECUTION_CLIENT ?= geth +MORPH_RETH_DIR ?= ../morph-reth +MORPH_RETH_BUILD_PROFILE ?= release +MORPH_RETH_RUSTFLAGS ?= +MORPH_RETH_DOCKER_TARGET ?= builder +MORPH_RETH_ENTRYPOINT ?= /app/morph-reth +export MORPH_RETH_DIR +export MORPH_RETH_BUILD_PROFILE +export MORPH_RETH_RUSTFLAGS +export MORPH_RETH_DOCKER_TARGET +export MORPH_RETH_ENTRYPOINT +DEVNET_COMPOSE_FILES := -f docker-compose-4nodes.yml + +ifeq ($(EXECUTION_CLIENT),geth) +DEVNET_EXECUTION_DEPS := submodules +else ifeq ($(EXECUTION_CLIENT),reth) +DEVNET_EXECUTION_DEPS := reth +DEVNET_COMPOSE_FILES += -f docker-compose-reth.yml +else +$(error unsupported EXECUTION_CLIENT "$(EXECUTION_CLIENT)", expected "geth" or "reth") +endif + +devnet-up: $(DEVNET_EXECUTION_DEPS) go-ubuntu-builder + python3 ops/devnet-morph/main.py --polyrepo-dir=. --execution-client=$(EXECUTION_CLIENT) .PHONY: devnet-up -devnet-up-debugccc: - python3 ops/devnet-morph/main.py --polyrepo-dir=. --debugccc +devnet-up-reth: + $(MAKE) devnet-up EXECUTION_CLIENT=reth +.PHONY: devnet-up-reth + +devnet-up-debugccc: $(DEVNET_EXECUTION_DEPS) go-ubuntu-builder + python3 ops/devnet-morph/main.py --polyrepo-dir=. --execution-client=$(EXECUTION_CLIENT) --debugccc .PHONY: devnet-up-debugccc devnet-down: - cd ops/docker && docker compose -f docker-compose-4nodes.yml down + cd ops/docker && docker compose $(DEVNET_COMPOSE_FILES) down .PHONY: devnet-down devnet-clean-build: devnet-l1-clean - cd ops/docker && docker compose -f docker-compose-4nodes.yml down --volumes --remove-orphans + cd ops/docker && docker compose $(DEVNET_COMPOSE_FILES) down --volumes --remove-orphans docker volume ls --filter name=docker_ --format='{{.Name}}' | xargs docker volume rm 2>/dev/null || true rm -rf ops/l2-genesis/.devnet rm -rf ops/docker/.devnet rm -rf ops/docker/consensus/beacondata ops/docker/consensus/validatordata ops/docker/consensus/genesis.ssz rm -rf ops/docker/execution/geth + rm -rf ops/docker/execution/reth .PHONY: devnet-clean-build devnet-clean: devnet-clean-build @@ -171,9 +198,14 @@ devnet-l1-clean: .PHONY: devnet-l1-clean devnet-logs: - @(cd ops/docker && docker-compose logs -f) + @(cd ops/docker && docker compose $(DEVNET_COMPOSE_FILES) logs -f) .PHONY: devnet-logs +reth: + @test -d "$(MORPH_RETH_DIR)" || (echo "morph-reth directory not found: $(MORPH_RETH_DIR)" && exit 1) + docker build -t morph-reth:latest --target "$(MORPH_RETH_DOCKER_TARGET)" --build-arg BUILD_PROFILE="$(MORPH_RETH_BUILD_PROFILE)" --build-arg RUSTFLAGS="$(MORPH_RETH_RUSTFLAGS)" "$(MORPH_RETH_DIR)" +.PHONY: reth + # tx-submitter SUBMITTERS := $(shell grep -o 'tx-submitter-[0-9]*[^:]' ops/docker/docker-compose-4nodes.yml | sort | uniq) rebuild-all-tx-submitter: diff --git a/node/ops-morph/docker-compose.yml b/node/ops-morph/docker-compose.yml index 6557ba900..4f9106aad 100644 --- a/node/ops-morph/docker-compose.yml +++ b/node/ops-morph/docker-compose.yml @@ -1,11 +1,11 @@ version: '3.8' volumes: - sequencer_geth_data: + sequencer_el_data: sequencer_node_data: services: - sequencer_geth: + morph-el-0: image: morph/l2geth:latest ports: - "8545:8545" @@ -18,7 +18,7 @@ services: timeout: 5s retries: 3 volumes: - - "sequencer_geth_data:${GETH_DATA_DIR}" + - "sequencer_el_data:${GETH_DATA_DIR}" - "${PWD}/jwt-secret.txt:${JWT_SECRET_PATH}" - "${PWD}/genesis_geth.json:${GENESIS_FILE_PATH}" entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments @@ -27,7 +27,7 @@ services: sequencer_node: depends_on: - sequencer_geth: + morph-el-0: condition: service_started build: context: .. @@ -37,8 +37,8 @@ services: - "26656:26656" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://sequencer_geth:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://sequencer_geth:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-0:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-0:8551 - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt ## todo need to replace it to a public network - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} @@ -54,7 +54,7 @@ services: tx-submitter: depends_on: - sequencer_geth: + morph-el-0: condition: service_started sequencer_node: condition: service_started @@ -62,7 +62,7 @@ services: command: rollup environment: - TX_SUBMITTER_L1_PRIVATE_KEY=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 - - TX_SUBMITTER_L2_RPC_URL=http://sequencer_geth:8545 + - TX_SUBMITTER_L2_RPC_URL=http://morph-el-0:8545 - TX_SUBMITTER_L1_RPC_URL=${L1_ETH_RPC} - TX_SUBMITTER_ROLLUP_CONTRACT_ADDRESS=0x6900000000000000000000000000000000000010 - TX_SUBMITTER_EVENT_NAME=SubmitBatches diff --git a/node/ops-morph/testnet/docker-compose.yml b/node/ops-morph/testnet/docker-compose.yml index 95330c83e..d0322fb09 100644 --- a/node/ops-morph/testnet/docker-compose.yml +++ b/node/ops-morph/testnet/docker-compose.yml @@ -32,7 +32,7 @@ volumes: o: bind services: - morph-geth-0: + morph-el-0: image: morph/l2geth:latest ports: - "8545:8545" @@ -48,9 +48,9 @@ services: - "/bin/bash" - "/entrypoint.sh" - morph-geth-1: + morph-el-1: depends_on: - - morph-geth-0 + - morph-el-0 image: morph/l2geth:latest ports: - "8645:8545" @@ -63,14 +63,14 @@ services: - "${PWD}/../genesis_geth.json:/genesis.json" - "${PWD}/static-nodes.json:/db/geth/static-nodes.json" environment: - - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-geth-0:30303 + - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-el-0:30303 entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments - "/bin/bash" - "/entrypoint.sh" - morph-geth-2: + morph-el-2: depends_on: - - morph-geth-0 + - morph-el-0 image: morph/l2geth:latest ports: - "8745:8545" @@ -83,14 +83,14 @@ services: - "${PWD}/../genesis_geth.json:/genesis.json" - "${PWD}/static-nodes.json:/db/geth/static-nodes.json" environment: - - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-geth-0:30303 + - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-el-0:30303 entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments - "/bin/bash" - "/entrypoint.sh" - morph-geth-3: + morph-el-3: depends_on: - - morph-geth-0 + - morph-el-0 image: morph/l2geth:latest ports: - "8845:8545" @@ -103,14 +103,14 @@ services: - "${PWD}/../genesis_geth.json:/genesis.json" - "${PWD}/static-nodes.json:/db/geth/static-nodes.json" environment: - - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-geth-0:30303 + - BOOT_NODES=enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-el-0:30303 entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments - "/bin/bash" - "/entrypoint.sh" node-0: depends_on: - morph-geth-0: + morph-el-0: condition: service_started image: morph-node:latest ports: @@ -119,8 +119,8 @@ services: - "26658" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-0:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-0:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-0:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-0:8551 - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=0x6900000000000000000000000000000000000001 @@ -134,7 +134,7 @@ services: node-1: depends_on: - morph-geth-1: + morph-el-1: condition: service_started image: morph-node:latest ports: @@ -143,8 +143,8 @@ services: - "26658" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-1:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-1:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-1:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-1:8551 - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=0x6900000000000000000000000000000000000001 @@ -158,7 +158,7 @@ services: node-2: depends_on: - morph-geth-2: + morph-el-2: condition: service_started image: morph-node:latest ports: @@ -167,8 +167,8 @@ services: - "26658" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-2:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-2:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-2:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-2:8551 - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=0x6900000000000000000000000000000000000001 @@ -182,17 +182,17 @@ services: node-3: depends_on: - morph-geth-3: + morph-el-3: condition: service_started - image: -node:latest + image: morph-node:latest ports: - "26656" - "26657" - "26658" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-3:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-3:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-3:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-3:8551 - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=0x6900000000000000000000000000000000000001 @@ -202,4 +202,4 @@ services: command: > morphnode --dev-sequencer - --home $NODE_DATA_DIR \ No newline at end of file + --home $NODE_DATA_DIR diff --git a/node/ops-morph/testnet/static-nodes.json b/node/ops-morph/testnet/static-nodes.json index a8876e3dd..e3b48b8d5 100644 --- a/node/ops-morph/testnet/static-nodes.json +++ b/node/ops-morph/testnet/static-nodes.json @@ -1 +1 @@ -["enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-geth-0:30303"] \ No newline at end of file +["enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-el-0:30303"] \ No newline at end of file diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index 385a7a2a3..3fc3bff79 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -21,6 +21,8 @@ parser = argparse.ArgumentParser(description='devnet launcher') parser.add_argument('--polyrepo-dir', help='Directory of the polyrepo', default=os.getcwd()) parser.add_argument('--only-l1', help='Only bootstrap l1 geth', action="store_true") +parser.add_argument('--execution-client', choices=('geth', 'reth'), default='geth', + help='L2 execution client implementation to run') # parser.add_argument('--deploy', help='Whether the contracts should be predeployed or deployed', action="store_true") parser.add_argument('--debugccc', help='Whether set the debug log level for ccc', action="store_true") @@ -30,6 +32,13 @@ ETH = GWEI * GWEI +def compose_file_args(execution_client): + args = ['-f', 'docker-compose-4nodes.yml'] + if execution_client == 'reth': + args.extend(['-f', 'docker-compose-reth.yml']) + return args + + class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) @@ -255,12 +264,11 @@ def devnet_deploy(paths, args): envfile.truncate() envfile.close() - log.info('Bringing up L2.') + log.info(f'Bringing up L2 with {args.execution_client}.') - run_command(['docker', 'compose', '-f', 'docker-compose-4nodes.yml', 'up', - '--no-recreate','-d'], check=False, cwd=paths.ops_dir, + run_command(['docker', 'compose', *compose_file_args(args.execution_client), 'up', '-d'], check=False, cwd=paths.ops_dir, env={ 'MORPH_PORTAL': addresses['Proxy__L1MessageQueueWithGasPriceOracle'], 'MORPH_ROLLUP': addresses['Proxy__Rollup'], diff --git a/ops/devnet-morph/test_devnet_execution_client.py b/ops/devnet-morph/test_devnet_execution_client.py new file mode 100644 index 000000000..167a15bb0 --- /dev/null +++ b/ops/devnet-morph/test_devnet_execution_client.py @@ -0,0 +1,21 @@ +import unittest +from pathlib import Path +import sys + +sys.path.insert(0, str(Path(__file__).resolve().parent)) +from devnet import compose_file_args + + +class ExecutionClientComposeArgsTest(unittest.TestCase): + def test_geth_uses_base_compose_file(self): + self.assertEqual(compose_file_args("geth"), ["-f", "docker-compose-4nodes.yml"]) + + def test_reth_adds_reth_override_file(self): + self.assertEqual( + compose_file_args("reth"), + ["-f", "docker-compose-4nodes.yml", "-f", "docker-compose-reth.yml"], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/ops/docker-sequencer-test/docker-compose.override.yml b/ops/docker-sequencer-test/docker-compose.override.yml index 9cc69cae8..44aa1c3f7 100644 --- a/ops/docker-sequencer-test/docker-compose.override.yml +++ b/ops/docker-sequencer-test/docker-compose.override.yml @@ -3,20 +3,20 @@ version: '3.8' services: - morph-geth-0: - image: morph-geth-test:latest + morph-el-0: + image: morph-el-test:latest build: context: ../.. dockerfile: ops/docker-sequencer-test/Dockerfile.l2-geth-test - morph-geth-1: - image: morph-geth-test:latest + morph-el-1: + image: morph-el-test:latest - morph-geth-2: - image: morph-geth-test:latest + morph-el-2: + image: morph-el-test:latest - morph-geth-3: - image: morph-geth-test:latest + morph-el-3: + image: morph-el-test:latest node-0: image: morph-node-test:latest @@ -55,8 +55,8 @@ services: - MORPH_NODE_CONSENSUS_SWITCH_HEIGHT=${CONSENSUS_SWITCH_HEIGHT:-10} - sentry-geth-0: - image: morph-geth-test:latest + sentry-el-0: + image: morph-el-test:latest sentry-node-0: image: morph-node-test:latest diff --git a/ops/docker-sequencer-test/run-test.sh b/ops/docker-sequencer-test/run-test.sh index 81361fefa..9b0f0a553 100755 --- a/ops/docker-sequencer-test/run-test.sh +++ b/ops/docker-sequencer-test/run-test.sh @@ -109,9 +109,9 @@ build_test_images() { # log_warn "Build may fail due to network issues" # fi - # Build test geth image - log_info "Building morph-geth-test (using local go-ethereum)..." - docker build -t morph-geth-test:latest \ + # Build test execution image + log_info "Building morph-el-test (using local go-ethereum)..." + docker build -t morph-el-test:latest \ -f morph/ops/docker-sequencer-test/Dockerfile.l2-geth-test . # Build test node image @@ -275,17 +275,17 @@ start_l2_test() { # Stop any existing L2 containers $COMPOSE_CMD stop \ - morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 \ + morph-el-0 morph-el-1 morph-el-2 morph-el-3 \ node-0 node-1 node-2 node-3 2>/dev/null || true # Note: Test images should already be built by build_test_images() # Uncomment below if you need to rebuild during start # log_info "Building L2 containers with test images..." - # $COMPOSE_CMD build morph-geth-0 node-0 + # $COMPOSE_CMD build morph-el-0 node-0 - # Start L2 geth nodes - log_info "Starting L2 geth nodes..." - $COMPOSE_CMD up -d morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 + # Start L2 execution nodes + log_info "Starting L2 execution nodes..." + $COMPOSE_CMD up -d morph-el-0 morph-el-1 morph-el-2 morph-el-3 sleep 5 @@ -364,7 +364,7 @@ test_fullnode_sync() { # Start sentry node (fullnode) log_info "Starting fullnode (sentry-node-0)..." - $COMPOSE_CMD up -d sentry-geth-0 sentry-node-0 + $COMPOSE_CMD up -d sentry-el-0 sentry-node-0 sleep 10 wait_for_rpc "http://127.0.0.1:8945" @@ -522,7 +522,7 @@ case "${1:-}" in echo "Usage: $0 {build|setup|start|stop|clean|logs|test|tx|status|upgrade-height}" echo "" echo "Commands:" - echo " build - Build test Docker images (morph-geth-test, morph-node-test)" + echo " build - Build test Docker images (morph-el-test, morph-node-test)" echo " setup - Run full devnet setup (L1 + contracts + L2 genesis)" echo " start - Start L2 nodes with test images" echo " stop - Stop all containers" diff --git a/ops/docker-sequencer-test/scripts/tx-generator.sh b/ops/docker-sequencer-test/scripts/tx-generator.sh index 2311a64d5..d6ee40cdf 100644 --- a/ops/docker-sequencer-test/scripts/tx-generator.sh +++ b/ops/docker-sequencer-test/scripts/tx-generator.sh @@ -4,7 +4,7 @@ set -e -L2_RPC="${L2_RPC:-http://morph-geth-0:8545}" +L2_RPC="${L2_RPC:-http://morph-el-0:8545}" INTERVAL="${TX_INTERVAL:-5}" # seconds between txs PRIVATE_KEY="${PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}" diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 39febd04a..83d4f8b9e 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -8,13 +8,13 @@ volumes: morph_data_1: morph_data_2: morph_data_3: - sentry_geth_data: + sentry_el_data: node_data_0: node_data_1: node_data_2: node_data_3: sentry_node_data: - validator_geth_data: + validator_el_data: validator_node_data: layer1-el-data: layer1-cl-data: @@ -116,8 +116,8 @@ services: restart: unless-stopped # ========== L2 Services ========== - morph-geth-0: - container_name: morph-geth-0 + morph-el-0: + container_name: morph-el-0 depends_on: layer1-el: condition: service_started @@ -143,10 +143,10 @@ services: - "/bin/bash" - "/entrypoint.sh" - morph-geth-1: - container_name: morph-geth-1 + morph-el-1: + container_name: morph-el-1 depends_on: - - morph-geth-0 + - morph-el-0 image: morph-geth:latest restart: unless-stopped ports: @@ -167,10 +167,10 @@ services: - "/bin/bash" - "/entrypoint.sh" - morph-geth-2: - container_name: morph-geth-2 + morph-el-2: + container_name: morph-el-2 depends_on: - - morph-geth-0 + - morph-el-0 image: morph-geth:latest restart: unless-stopped ports: @@ -191,10 +191,10 @@ services: - "/bin/bash" - "/entrypoint.sh" - morph-geth-3: - container_name: morph-geth-3 + morph-el-3: + container_name: morph-el-3 depends_on: - - morph-geth-0 + - morph-el-0 image: morph-geth:latest restart: unless-stopped ports: @@ -219,7 +219,7 @@ services: node-0: container_name: node-0 depends_on: - morph-geth-0: + morph-el-0: condition: service_started image: morph-node:latest build: @@ -232,8 +232,8 @@ services: - "26658" - "26660" environment: - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-0:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-0:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-0:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-0:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} @@ -261,8 +261,8 @@ services: - "26658" - "26660" environment: - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-1:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-1:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-1:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-1:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} @@ -291,8 +291,8 @@ services: - "26660" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-2:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-2:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-2:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-2:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} @@ -321,8 +321,8 @@ services: - "26660" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://morph-geth-3:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://morph-geth-3:8551 + - MORPH_NODE_L2_ETH_RPC=http://morph-el-3:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://morph-el-3:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} @@ -337,8 +337,8 @@ services: morphnode --home $NODE_DATA_DIR - sentry-geth-0: - container_name: sentry-geth-0 + sentry-el-0: + container_name: sentry-el-0 depends_on: node-3: condition: service_started @@ -354,7 +354,7 @@ services: - "6060" - "30303" volumes: - - "sentry_geth_data:/db" + - "sentry_el_data:/db" - "${PWD}/jwt-secret.txt:/jwt-secret.txt" - "${PWD}/../l2-genesis/.devnet/genesis-l2.json:/genesis.json" - "${PWD}/static-nodes.json:/db/geth/static-nodes.json" @@ -376,8 +376,8 @@ services: - "26660" environment: - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://sentry-geth-0:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://sentry-geth-0:8551 + - MORPH_NODE_L2_ETH_RPC=http://sentry-el-0:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://sentry-el-0:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} @@ -392,8 +392,8 @@ services: --home $NODE_DATA_DIR - validator_geth: - container_name: validator_geth + validator-el: + container_name: validator-el image: morph-geth:latest depends_on: tx-submitter-0: @@ -408,7 +408,7 @@ services: timeout: 5s retries: 3 volumes: - - "validator_geth_data:${GETH_DATA_DIR}" + - "validator_el_data:${GETH_DATA_DIR}" - "${PWD}/jwt-secret.txt:${JWT_SECRET_PATH}" - "${PWD}/../l2-genesis/.devnet/genesis-l2.json:/genesis.json" entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments @@ -418,7 +418,7 @@ services: validator_node: container_name: validator_node depends_on: - validator_geth: + validator-el: condition: service_started node-0: condition: service_started @@ -426,8 +426,8 @@ services: ports: - "26660" environment: - - MORPH_NODE_L2_ETH_RPC=http://validator_geth:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://validator_geth:8551 + - MORPH_NODE_L2_ETH_RPC=http://validator-el:8545 + - MORPH_NODE_L2_ENGINE_RPC=http://validator-el:8551 - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} ## todo need to replace it to a public network - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} @@ -470,7 +470,7 @@ services: - TX_SUBMITTER_BUILD_ENV=dev - TX_SUBMITTER_L1_ETH_RPC=${L1_ETH_RPC} - TX_SUBMITTER_L1_PRIVATE_KEY=0xd99870855d97327d20c666abc78588f1449b1fac76ed0c86c1afb9ce2db85f32 - - TX_SUBMITTER_L2_ETH_RPCS=http://morph-geth-0:8545,http://morph-geth-1:8545 + - TX_SUBMITTER_L2_ETH_RPCS=http://morph-el-0:8545,http://morph-el-1:8545 - TX_SUBMITTER_MAX_BATCH_BUILD_TIME=60s - TX_SUBMITTER_MAX_TX_SIZE=125952 - TX_SUBMITTER_POLL_INTERVAL=3s @@ -513,7 +513,7 @@ services: - TX_SUBMITTER_BUILD_ENV=dev - TX_SUBMITTER_L1_ETH_RPC=${L1_ETH_RPC} - TX_SUBMITTER_L1_PRIVATE_KEY=0x0890c388c3bf5e04fee1d8f3c117e5f44f435ced7baf7bfd66c10e1f3a3f4b10 - - TX_SUBMITTER_L2_ETH_RPCS=http://morph-geth-0:8545,http://morph-geth-1:8545 + - TX_SUBMITTER_L2_ETH_RPCS=http://morph-el-0:8545,http://morph-el-1:8545 - TX_SUBMITTER_MAX_BATCH_BUILD_TIME=60s - TX_SUBMITTER_MAX_TX_SIZE=125952 - TX_SUBMITTER_POLL_INTERVAL=3s @@ -556,7 +556,7 @@ services: - TX_SUBMITTER_BUILD_ENV=dev - TX_SUBMITTER_L1_ETH_RPC=${L1_ETH_RPC} - TX_SUBMITTER_L1_PRIVATE_KEY=0x6fd437eef7a83c486bd2e0a802ae071b3912d125ac31ac08f60841fd891559ae - - TX_SUBMITTER_L2_ETH_RPCS=http://morph-geth-2:8545,http://morph-geth-3:8545 + - TX_SUBMITTER_L2_ETH_RPCS=http://morph-el-2:8545,http://morph-el-3:8545 - TX_SUBMITTER_MAX_BATCH_BUILD_TIME=60s - TX_SUBMITTER_MAX_TX_SIZE=125952 - TX_SUBMITTER_POLL_INTERVAL=3s @@ -599,7 +599,7 @@ services: - TX_SUBMITTER_BUILD_ENV=dev - TX_SUBMITTER_L1_ETH_RPC=${L1_ETH_RPC} - TX_SUBMITTER_L1_PRIVATE_KEY=0x9ae53aecdaebe4dcbfec96f3123a2a8c53f9596bf4b3d5adc9a388ccb361b4c0 - - TX_SUBMITTER_L2_ETH_RPCS=http://morph-geth-2:8545,http://morph-geth-3:8545 + - TX_SUBMITTER_L2_ETH_RPCS=http://morph-el-2:8545,http://morph-el-3:8545 - TX_SUBMITTER_MAX_BATCH_BUILD_TIME=60s - TX_SUBMITTER_MAX_TX_SIZE=125952 - TX_SUBMITTER_POLL_INTERVAL=3s @@ -641,7 +641,7 @@ services: environment: - GAS_ORACLE_L1_RPC=${L1_ETH_RPC} - GAS_ORACLE_L1_BEACON_RPC=${L1_BEACON_CHAIN_RPC} - - GAS_ORACLE_L2_RPC=http://morph-geth-0:8545 + - GAS_ORACLE_L2_RPC=http://morph-el-0:8545 - GAS_THRESHOLD=5 - INTERVAL=28000 - L2_GAS_PRICE_ORACLE=0x530000000000000000000000000000000000000F diff --git a/ops/docker/docker-compose-reth.yml b/ops/docker/docker-compose-reth.yml new file mode 100644 index 000000000..7a9d0420c --- /dev/null +++ b/ops/docker/docker-compose-reth.yml @@ -0,0 +1,69 @@ +x-reth-command: &reth-command + - node + - --chain + - /genesis.json + - --datadir + - /db + - --http + - --http.addr + - 0.0.0.0 + - --http.port + - "8545" + - --http.api + - web3,debug,eth,txpool,net,trace,admin,reth + - --ws + - --ws.addr + - 0.0.0.0 + - --ws.port + - "8546" + - --ws.api + - web3,debug,eth,txpool,net,trace,admin,reth + - --authrpc.addr + - 0.0.0.0 + - --authrpc.port + - "8551" + - --authrpc.jwtsecret + - /jwt-secret.txt + - --disable-nat + - --disable-discovery + - --engine.persistence-threshold + - "256" + - --engine.memory-block-buffer-target + - "16" + - --engine.persistence-backpressure-threshold + - "512" + +x-reth-service: &reth-service + image: morph-reth:latest + user: "0:0" + entrypoint: + - ${MORPH_RETH_ENTRYPOINT:-/app/morph-reth} + command: *reth-command + +services: + morph-el-0: + <<: *reth-service + build: + context: ${MORPH_RETH_DIR:-../../../morph-reth} + dockerfile: Dockerfile + target: ${MORPH_RETH_DOCKER_TARGET:-builder} + args: + BUILD_PROFILE: ${MORPH_RETH_BUILD_PROFILE:-release} + RUSTFLAGS: ${MORPH_RETH_RUSTFLAGS:-} + + morph-el-1: + <<: *reth-service + + morph-el-2: + <<: *reth-service + + morph-el-3: + <<: *reth-service + + sentry-el-0: + <<: *reth-service + + validator-el: + <<: *reth-service + healthcheck: + disable: true diff --git a/ops/docker/static-nodes.json b/ops/docker/static-nodes.json index 2142637e3..7502f805e 100644 --- a/ops/docker/static-nodes.json +++ b/ops/docker/static-nodes.json @@ -1,5 +1,5 @@ -["enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-geth-0:30303", - "enode://bd755ce0bc8c06b4444b9013e8d1215a02e2b53f39f746f060c292ba2f6877d7b702374f006a49a7b1506bf1bc027b43824859d081283e6bac97c8600cdf3fee@morph-geth-1:30303", - "enode://c91a993ace50749c89d37d554f12b2f4937d2ecca0232695bb33772d95a01f53564ad9dd71465c229be21e231e5c46929c2adaa78bea9d5f0966c46fca327c46@morph-geth-2:30303", - "enode://7211a9f1d896d6fef69154b97a868f1ac59e178eadfa54c3fc9644fa0f25ba2a0771927acdc08bb1d6ae2ea7a64f7ed9ddd74e97472e7d2e0df66dae5608fb10@morph-geth-3:30303" +["enode://58e698ea2dd8a76e0cb185d13c1faabf223b60c89fef988c8b89496571056d6c2922109537bb291cd87f2ec09a23ac37d59bde2c7a4885d07b7b641cadff2921@morph-el-0:30303", + "enode://bd755ce0bc8c06b4444b9013e8d1215a02e2b53f39f746f060c292ba2f6877d7b702374f006a49a7b1506bf1bc027b43824859d081283e6bac97c8600cdf3fee@morph-el-1:30303", + "enode://c91a993ace50749c89d37d554f12b2f4937d2ecca0232695bb33772d95a01f53564ad9dd71465c229be21e231e5c46929c2adaa78bea9d5f0966c46fca327c46@morph-el-2:30303", + "enode://7211a9f1d896d6fef69154b97a868f1ac59e178eadfa54c3fc9644fa0f25ba2a0771927acdc08bb1d6ae2ea7a64f7ed9ddd74e97472e7d2e0df66dae5608fb10@morph-el-3:30303" ] \ No newline at end of file diff --git a/oracle/docker-compose.yml b/oracle/docker-compose.yml index 18e6e8b2c..6612f321f 100644 --- a/oracle/docker-compose.yml +++ b/oracle/docker-compose.yml @@ -13,7 +13,7 @@ services: - STAKING_ORACLE_BUILD_ENV=dev - STAKING_ORACLE_L1_ETH_RPC=${L1_ETH_RPC} - STAKING_ORACLE_RECORD_PRIVATE_KEY=${RECORD_PRIVATE_KEY} - - STAKING_ORACLE_L2_ETH_RPC=http://morph-geth-0:8545 + - STAKING_ORACLE_L2_ETH_RPC=http://morph-el-0:8545 - STAKING_ORACLE_L2_TENDERMINT_RPC=http://node-0:26657 - STAKING_ORACLE_L2_WS_ENDPOINT=http://node-0:26656 - STAKING_ORACLE_ROLLUP=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} From 67128c8795762544f768397aa3bd9261394519f7 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Thu, 7 May 2026 22:09:41 +0800 Subject: [PATCH 03/17] use published reth image by default --- Makefile | 26 +++++++++++++-- ops/docker/docker-compose-reth.yml | 52 ++++++++---------------------- 2 files changed, 37 insertions(+), 41 deletions(-) diff --git a/Makefile b/Makefile index 2ff3a00b5..c26c638dd 100644 --- a/Makefile +++ b/Makefile @@ -138,11 +138,19 @@ go-ubuntu-builder: ################## devnet 4 nodes #################### EXECUTION_CLIENT ?= geth +MORPH_RETH_BUILD_FROM_SOURCE ?= false +ifeq ($(MORPH_RETH_BUILD_FROM_SOURCE),true) +MORPH_RETH_IMAGE ?= morph-reth:latest +MORPH_RETH_ENTRYPOINT ?= /app/morph-reth +else +MORPH_RETH_IMAGE ?= ghcr.io/morph-l2/morph-reth:latest +MORPH_RETH_ENTRYPOINT ?= /usr/local/bin/morph-reth +endif MORPH_RETH_DIR ?= ../morph-reth MORPH_RETH_BUILD_PROFILE ?= release MORPH_RETH_RUSTFLAGS ?= MORPH_RETH_DOCKER_TARGET ?= builder -MORPH_RETH_ENTRYPOINT ?= /app/morph-reth +export MORPH_RETH_IMAGE export MORPH_RETH_DIR export MORPH_RETH_BUILD_PROFILE export MORPH_RETH_RUSTFLAGS @@ -153,8 +161,12 @@ DEVNET_COMPOSE_FILES := -f docker-compose-4nodes.yml ifeq ($(EXECUTION_CLIENT),geth) DEVNET_EXECUTION_DEPS := submodules else ifeq ($(EXECUTION_CLIENT),reth) -DEVNET_EXECUTION_DEPS := reth DEVNET_COMPOSE_FILES += -f docker-compose-reth.yml +ifeq ($(MORPH_RETH_BUILD_FROM_SOURCE),true) +DEVNET_EXECUTION_DEPS := reth +else +DEVNET_EXECUTION_DEPS := reth-image +endif else $(error unsupported EXECUTION_CLIENT "$(EXECUTION_CLIENT)", expected "geth" or "reth") endif @@ -185,6 +197,10 @@ devnet-clean-build: devnet-l1-clean rm -rf ops/docker/execution/reth .PHONY: devnet-clean-build +devnet-clean-build-reth: + $(MAKE) devnet-clean-build EXECUTION_CLIENT=reth +.PHONY: devnet-clean-build-reth + devnet-clean: devnet-clean-build docker image ls '*morph*' --format='{{.Repository}}' | xargs -r docker rmi docker image ls '*sentry-*' --format='{{.Repository}}' | xargs -r docker rmi @@ -201,9 +217,13 @@ devnet-logs: @(cd ops/docker && docker compose $(DEVNET_COMPOSE_FILES) logs -f) .PHONY: devnet-logs +reth-image: + docker pull "$(MORPH_RETH_IMAGE)" +.PHONY: reth-image + reth: @test -d "$(MORPH_RETH_DIR)" || (echo "morph-reth directory not found: $(MORPH_RETH_DIR)" && exit 1) - docker build -t morph-reth:latest --target "$(MORPH_RETH_DOCKER_TARGET)" --build-arg BUILD_PROFILE="$(MORPH_RETH_BUILD_PROFILE)" --build-arg RUSTFLAGS="$(MORPH_RETH_RUSTFLAGS)" "$(MORPH_RETH_DIR)" + docker build -t "$(MORPH_RETH_IMAGE)" --target "$(MORPH_RETH_DOCKER_TARGET)" --build-arg BUILD_PROFILE="$(MORPH_RETH_BUILD_PROFILE)" --build-arg RUSTFLAGS="$(MORPH_RETH_RUSTFLAGS)" "$(MORPH_RETH_DIR)" .PHONY: reth # tx-submitter diff --git a/ops/docker/docker-compose-reth.yml b/ops/docker/docker-compose-reth.yml index 7a9d0420c..920c8af98 100644 --- a/ops/docker/docker-compose-reth.yml +++ b/ops/docker/docker-compose-reth.yml @@ -1,55 +1,31 @@ x-reth-command: &reth-command - node - - --chain - - /genesis.json - - --datadir - - /db + - --chain=/genesis.json + - --datadir=/db - --http - - --http.addr - - 0.0.0.0 - - --http.port - - "8545" - - --http.api - - web3,debug,eth,txpool,net,trace,admin,reth + - --http.addr=0.0.0.0 + - --http.port=8545 + - --http.api=web3,debug,eth,txpool,net,trace,admin,reth - --ws - - --ws.addr - - 0.0.0.0 - - --ws.port - - "8546" - - --ws.api - - web3,debug,eth,txpool,net,trace,admin,reth - - --authrpc.addr - - 0.0.0.0 - - --authrpc.port - - "8551" - - --authrpc.jwtsecret - - /jwt-secret.txt - - --disable-nat + - --ws.addr=0.0.0.0 + - --ws.port=8546 + - --ws.api=web3,debug,eth,txpool,net,trace,admin,reth + - --authrpc.addr=0.0.0.0 + - --authrpc.port=8551 + - --authrpc.jwtsecret=/jwt-secret.txt + - --nat=none - --disable-discovery - - --engine.persistence-threshold - - "256" - - --engine.memory-block-buffer-target - - "16" - - --engine.persistence-backpressure-threshold - - "512" x-reth-service: &reth-service - image: morph-reth:latest + image: ${MORPH_RETH_IMAGE:-ghcr.io/morph-l2/morph-reth:latest} user: "0:0" entrypoint: - - ${MORPH_RETH_ENTRYPOINT:-/app/morph-reth} + - ${MORPH_RETH_ENTRYPOINT:-/usr/local/bin/morph-reth} command: *reth-command services: morph-el-0: <<: *reth-service - build: - context: ${MORPH_RETH_DIR:-../../../morph-reth} - dockerfile: Dockerfile - target: ${MORPH_RETH_DOCKER_TARGET:-builder} - args: - BUILD_PROFILE: ${MORPH_RETH_BUILD_PROFILE:-release} - RUSTFLAGS: ${MORPH_RETH_RUSTFLAGS:-} morph-el-1: <<: *reth-service From 74eed9eb5da8bbfe60bb04ae22dd6d71cb76a891 Mon Sep 17 00:00:00 2001 From: panos-xyz Date: Thu, 7 May 2026 22:24:53 +0800 Subject: [PATCH 04/17] remove devnet execution client test --- .../test_devnet_execution_client.py | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 ops/devnet-morph/test_devnet_execution_client.py diff --git a/ops/devnet-morph/test_devnet_execution_client.py b/ops/devnet-morph/test_devnet_execution_client.py deleted file mode 100644 index 167a15bb0..000000000 --- a/ops/devnet-morph/test_devnet_execution_client.py +++ /dev/null @@ -1,21 +0,0 @@ -import unittest -from pathlib import Path -import sys - -sys.path.insert(0, str(Path(__file__).resolve().parent)) -from devnet import compose_file_args - - -class ExecutionClientComposeArgsTest(unittest.TestCase): - def test_geth_uses_base_compose_file(self): - self.assertEqual(compose_file_args("geth"), ["-f", "docker-compose-4nodes.yml"]) - - def test_reth_adds_reth_override_file(self): - self.assertEqual( - compose_file_args("reth"), - ["-f", "docker-compose-4nodes.yml", "-f", "docker-compose-reth.yml"], - ) - - -if __name__ == "__main__": - unittest.main() From d2bb14ec16bf7ce1b0fc0cf9182cd81164b94504 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 10:55:58 +0800 Subject: [PATCH 05/17] chore(docker-sequencer-test): rename bitget to polyrepo Replace the internal codename "bitget" with the neutral term "polyrepo" in build context references, variable names, and container paths. --- .../Dockerfile.l2-geth-test | 2 +- .../Dockerfile.l2-node-test | 36 +++++++++---------- ops/docker-sequencer-test/run-test.sh | 14 ++++---- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/ops/docker-sequencer-test/Dockerfile.l2-geth-test b/ops/docker-sequencer-test/Dockerfile.l2-geth-test index 1c053f44b..17de81dd1 100644 --- a/ops/docker-sequencer-test/Dockerfile.l2-geth-test +++ b/ops/docker-sequencer-test/Dockerfile.l2-geth-test @@ -1,5 +1,5 @@ # Build Geth for Sequencer Test -# Build context should be bitget/ (parent of morph) +# Build context should be the polyrepo root (parent of morph) FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder # Copy local go-ethereum (not submodule) diff --git a/ops/docker-sequencer-test/Dockerfile.l2-node-test b/ops/docker-sequencer-test/Dockerfile.l2-node-test index 1ece1eb81..c7ce80847 100644 --- a/ops/docker-sequencer-test/Dockerfile.l2-node-test +++ b/ops/docker-sequencer-test/Dockerfile.l2-node-test @@ -5,33 +5,33 @@ FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder # Order matters for cache efficiency # Copy go-ethereum dependency files -COPY ./go-ethereum/go.mod ./go-ethereum/go.sum /bitget/go-ethereum/ +COPY ./go-ethereum/go.mod ./go-ethereum/go.sum /polyrepo/go-ethereum/ # Copy tendermint dependency files -COPY ./tendermint/go.mod ./tendermint/go.sum /bitget/tendermint/ +COPY ./tendermint/go.mod ./tendermint/go.sum /polyrepo/tendermint/ # Copy morph go.work and all module dependency files -COPY ./morph/go.work ./morph/go.work.sum /bitget/morph/ -COPY ./morph/node/go.mod ./morph/node/go.sum /bitget/morph/node/ -COPY ./morph/bindings/go.mod ./morph/bindings/go.sum /bitget/morph/bindings/ -COPY ./morph/contracts/go.mod ./morph/contracts/go.sum /bitget/morph/contracts/ -COPY ./morph/oracle/go.mod ./morph/oracle/go.sum /bitget/morph/oracle/ -COPY ./morph/tx-submitter/go.mod ./morph/tx-submitter/go.sum /bitget/morph/tx-submitter/ -COPY ./morph/ops/l2-genesis/go.mod ./morph/ops/l2-genesis/go.sum /bitget/morph/ops/l2-genesis/ -COPY ./morph/ops/tools/go.mod ./morph/ops/tools/go.sum /bitget/morph/ops/tools/ -COPY ./morph/token-price-oracle/go.mod ./morph/token-price-oracle/go.sum /bitget/morph/token-price-oracle/ +COPY ./morph/go.work ./morph/go.work.sum /polyrepo/morph/ +COPY ./morph/node/go.mod ./morph/node/go.sum /polyrepo/morph/node/ +COPY ./morph/bindings/go.mod ./morph/bindings/go.sum /polyrepo/morph/bindings/ +COPY ./morph/contracts/go.mod ./morph/contracts/go.sum /polyrepo/morph/contracts/ +COPY ./morph/oracle/go.mod ./morph/oracle/go.sum /polyrepo/morph/oracle/ +COPY ./morph/tx-submitter/go.mod ./morph/tx-submitter/go.sum /polyrepo/morph/tx-submitter/ +COPY ./morph/ops/l2-genesis/go.mod ./morph/ops/l2-genesis/go.sum /polyrepo/morph/ops/l2-genesis/ +COPY ./morph/ops/tools/go.mod ./morph/ops/tools/go.sum /polyrepo/morph/ops/tools/ +COPY ./morph/token-price-oracle/go.mod ./morph/token-price-oracle/go.sum /polyrepo/morph/token-price-oracle/ # Download dependencies (this layer is cached if go.mod/go.sum don't change) -WORKDIR /bitget/morph/node +WORKDIR /polyrepo/morph/node RUN go mod download -x # Now copy all source code -COPY ./go-ethereum /bitget/go-ethereum -COPY ./tendermint /bitget/tendermint -COPY ./morph /bitget/morph +COPY ./go-ethereum /polyrepo/go-ethereum +COPY ./tendermint /polyrepo/tendermint +COPY ./morph /polyrepo/morph # Build (no need to download again, just compile) -WORKDIR /bitget/morph/node +WORKDIR /polyrepo/morph/node RUN make build # Final Stage @@ -41,7 +41,7 @@ RUN apt-get -qq update \ && apt-get -qq install -y --no-install-recommends ca-certificates \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /bitget/morph/node/build/bin/tendermint /usr/local/bin/ -COPY --from=builder /bitget/morph/node/build/bin/morphnode /usr/local/bin/ +COPY --from=builder /polyrepo/morph/node/build/bin/tendermint /usr/local/bin/ +COPY --from=builder /polyrepo/morph/node/build/bin/morphnode /usr/local/bin/ CMD ["morphnode", "--home", "/data"] diff --git a/ops/docker-sequencer-test/run-test.sh b/ops/docker-sequencer-test/run-test.sh index 9b0f0a553..d1928de7e 100755 --- a/ops/docker-sequencer-test/run-test.sh +++ b/ops/docker-sequencer-test/run-test.sh @@ -6,7 +6,7 @@ set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" MORPH_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -BITGET_ROOT="$(cd "$MORPH_ROOT/.." && pwd)" +POLYREPO_ROOT="$(cd "$MORPH_ROOT/.." && pwd)" OPS_DIR="$MORPH_ROOT/ops" DOCKER_DIR="$OPS_DIR/docker" DEVNET_DIR="$OPS_DIR/devnet-morph" @@ -87,17 +87,17 @@ set_upgrade_height() { } # Build test images (with -test suffix) -# Uses bitget/ as build context to access local go-ethereum and tendermint +# Uses the polyrepo root as build context to access local go-ethereum and tendermint build_test_images() { log_info "Building test Docker images..." - log_info "Using build context: $BITGET_ROOT" - + log_info "Using build context: $POLYREPO_ROOT" + # Build go-ubuntu-builder if needed cd "$MORPH_ROOT" make go-ubuntu-builder - - # Build from bitget/ directory to access all repos - cd "$BITGET_ROOT" + + # Build from the polyrepo root to access all repos + cd "$POLYREPO_ROOT" # # Copy go module cache to avoid network downloads # if [ -d "$HOME/go/pkg/mod" ]; then From 1672b69a1c4228f31203623d6c0434895c0b8c13 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 10:56:58 +0800 Subject: [PATCH 06/17] docs(devnet): add docstring to compose_file_args --- ops/devnet-morph/devnet/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index 3fc3bff79..92b92f8cb 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -33,6 +33,7 @@ def compose_file_args(execution_client): + """Return docker-compose -f flags for the chosen L2 execution client.""" args = ['-f', 'docker-compose-4nodes.yml'] if execution_client == 'reth': args.extend(['-f', 'docker-compose-reth.yml']) From 4a2e1f3f809cf5ef3e66b8536430b7425e37e0e2 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 11:22:19 +0800 Subject: [PATCH 07/17] docs(devnet): add docstring to devnet_deploy --- ops/devnet-morph/devnet/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index 92b92f8cb..ebc73e2b7 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -154,8 +154,8 @@ def devnet_build(paths): }) -# Bring up the devnet where the contracts are deployed to L1 def devnet_deploy(paths, args): + """Bring up the devnet where the contracts are deployed to L1.""" if not test_port(9545): devnet_l1(paths) done_file = pjoin(paths.devnet_dir, 'done') From f9327506c49074ba2291721e98e0e478746372d9 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 11:32:05 +0800 Subject: [PATCH 08/17] docs(devnet): add docstrings to remaining functions Cover the rest of the file with one-line docstrings to satisfy CodeRabbit's docstring coverage threshold. --- ops/devnet-morph/devnet/__init__.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index ebc73e2b7..d7b5b8f01 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -41,11 +41,15 @@ def compose_file_args(execution_client): class Bunch: + """Lightweight attribute container constructed from keyword arguments.""" + def __init__(self, **kwds): + """Store all keyword arguments as attributes on the instance.""" self.__dict__.update(kwds) def main(): + """Entry point: parse CLI arguments and bring up the L1-only or full devnet.""" args = parser.parse_args() polyrepo_dir = os.path.abspath(args.polyrepo_dir) @@ -82,6 +86,7 @@ def main(): def devnet_l1(paths, result=None): + """Start the L1 execution/consensus/validator stack and fund sequencer accounts.""" log.info('Starting L1.') layer1_dir = pjoin(paths.ops_dir, 'layer1') @@ -147,6 +152,7 @@ def devnet_l1(paths, result=None): def devnet_build(paths): + """Build the docker images declared in docker-compose-4nodes.yml.""" run_command(['docker', 'compose', '-f', 'docker-compose-4nodes.yml', 'build'], cwd=paths.ops_dir, env={ 'PWD': paths.ops_dir, 'DOCKER_BUILDKIT': '1', # (should be available by default in later versions, but explicitly enable it anyway) @@ -286,6 +292,7 @@ def devnet_deploy(paths, args): def wait_for_rpc_server(url): + """Block until the JSON-RPC server at url answers an eth_chainId call successfully.""" log.info(f'Waiting for RPC server at {url}') conn = http.client.HTTPConnection(url) @@ -306,6 +313,7 @@ def wait_for_rpc_server(url): def run_command(args, check=True, shell=False, cwd=None, env=None, output=None): + """Run a subprocess with the parent environment merged with the supplied env dict.""" env = env if env else {} return subprocess.run( args, @@ -323,6 +331,7 @@ def run_command(args, check=True, shell=False, cwd=None, env=None, output=None): def run_command_capture_output(args, check=True, shell=False, cwd=None, env=None): + """Run a subprocess and return its CompletedProcess with stdout/stderr captured.""" env = env if env else {} return subprocess.run( args, @@ -339,6 +348,7 @@ def run_command_capture_output(args, check=True, shell=False, cwd=None, env=None def wait_up(port, retries=10, wait_secs=1): + """Poll a TCP port on 127.0.0.1 until it accepts a connection or retries are exhausted.""" for i in range(0, retries): log.info(f'Trying 127.0.0.1:{port}') s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -354,6 +364,7 @@ def wait_up(port, retries=10, wait_secs=1): def test_port(port): + """Return True if a TCP connection to 127.0.0.1:port succeeds, False otherwise.""" log.info(f'Testing 127.0.0.1:{port}') s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: @@ -366,16 +377,19 @@ def test_port(port): def write_json(path, data): + """Serialize data to path as indented JSON.""" with open(path, 'w+') as f: json.dump(data, f, indent=' ') def read_json(path): + """Load and return the JSON document stored at path.""" with open(path, 'r') as f: return json.load(f) def eth_accounts(url): + """Call eth_accounts on url and return the raw JSON-RPC response body.""" log.info(f'Fetch eth_accounts {url}') conn = http.client.HTTPConnection(url) headers = {'Content-type': 'application/json'} From 6d8d745d955b8b3a23de7c8667fe0fa73d9ad646 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 14:40:48 +0800 Subject: [PATCH 09/17] fix(devnet): isolate geth build config to prevent reth from inheriting Dockerfile.l2-geth Move the morph-el-0 build: section from docker-compose-4nodes.yml into a dedicated docker-compose-geth-build.yml, included only when EXECUTION_CLIENT=geth. The reth overlay now sees no build: on morph-el-0, eliminating the risk of docker compose up building geth code and tagging it as the reth image when the reth image is absent. --- Makefile | 1 + ops/devnet-morph/devnet/__init__.py | 2 ++ ops/docker/docker-compose-4nodes.yml | 3 --- ops/docker/docker-compose-geth-build.yml | 5 +++++ 4 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 ops/docker/docker-compose-geth-build.yml diff --git a/Makefile b/Makefile index c26c638dd..39b0eab6b 100644 --- a/Makefile +++ b/Makefile @@ -160,6 +160,7 @@ DEVNET_COMPOSE_FILES := -f docker-compose-4nodes.yml ifeq ($(EXECUTION_CLIENT),geth) DEVNET_EXECUTION_DEPS := submodules +DEVNET_COMPOSE_FILES += -f docker-compose-geth-build.yml else ifeq ($(EXECUTION_CLIENT),reth) DEVNET_COMPOSE_FILES += -f docker-compose-reth.yml ifeq ($(MORPH_RETH_BUILD_FROM_SOURCE),true) diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index d7b5b8f01..ce4f789a5 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -37,6 +37,8 @@ def compose_file_args(execution_client): args = ['-f', 'docker-compose-4nodes.yml'] if execution_client == 'reth': args.extend(['-f', 'docker-compose-reth.yml']) + elif execution_client == 'geth': + args.extend(['-f', 'docker-compose-geth-build.yml']) return args diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 83d4f8b9e..180199ceb 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -122,9 +122,6 @@ services: layer1-el: condition: service_started image: morph-geth:latest - build: - context: ../.. - dockerfile: ops/docker/Dockerfile.l2-geth restart: unless-stopped ports: - "8545:8545" diff --git a/ops/docker/docker-compose-geth-build.yml b/ops/docker/docker-compose-geth-build.yml new file mode 100644 index 000000000..f8a3070ad --- /dev/null +++ b/ops/docker/docker-compose-geth-build.yml @@ -0,0 +1,5 @@ +services: + morph-el-0: + build: + context: ../.. + dockerfile: ops/docker/Dockerfile.l2-geth From 8a5fc0af223d75383b8446ab04904f1180c02665 Mon Sep 17 00:00:00 2001 From: panos Date: Fri, 8 May 2026 15:44:04 +0800 Subject: [PATCH 10/17] fix(devnet): reset reth inherited geth builds Keep the base devnet compose file self-contained for geth while using the reth overlay to explicitly reset inherited geth build definitions. Constraint: Do not include the devnet execution-client test file in this commit Rejected: Keep a separate geth build compose file | changes direct base compose usage Confidence: high Scope-risk: narrow --- Makefile | 1 - ops/devnet-morph/devnet/__init__.py | 2 -- ops/docker/docker-compose-4nodes.yml | 3 +++ ops/docker/docker-compose-geth-build.yml | 5 ----- ops/docker/docker-compose-reth.yml | 2 ++ 5 files changed, 5 insertions(+), 8 deletions(-) delete mode 100644 ops/docker/docker-compose-geth-build.yml diff --git a/Makefile b/Makefile index 39b0eab6b..c26c638dd 100644 --- a/Makefile +++ b/Makefile @@ -160,7 +160,6 @@ DEVNET_COMPOSE_FILES := -f docker-compose-4nodes.yml ifeq ($(EXECUTION_CLIENT),geth) DEVNET_EXECUTION_DEPS := submodules -DEVNET_COMPOSE_FILES += -f docker-compose-geth-build.yml else ifeq ($(EXECUTION_CLIENT),reth) DEVNET_COMPOSE_FILES += -f docker-compose-reth.yml ifeq ($(MORPH_RETH_BUILD_FROM_SOURCE),true) diff --git a/ops/devnet-morph/devnet/__init__.py b/ops/devnet-morph/devnet/__init__.py index ce4f789a5..d7b5b8f01 100644 --- a/ops/devnet-morph/devnet/__init__.py +++ b/ops/devnet-morph/devnet/__init__.py @@ -37,8 +37,6 @@ def compose_file_args(execution_client): args = ['-f', 'docker-compose-4nodes.yml'] if execution_client == 'reth': args.extend(['-f', 'docker-compose-reth.yml']) - elif execution_client == 'geth': - args.extend(['-f', 'docker-compose-geth-build.yml']) return args diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 180199ceb..83d4f8b9e 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -122,6 +122,9 @@ services: layer1-el: condition: service_started image: morph-geth:latest + build: + context: ../.. + dockerfile: ops/docker/Dockerfile.l2-geth restart: unless-stopped ports: - "8545:8545" diff --git a/ops/docker/docker-compose-geth-build.yml b/ops/docker/docker-compose-geth-build.yml deleted file mode 100644 index f8a3070ad..000000000 --- a/ops/docker/docker-compose-geth-build.yml +++ /dev/null @@ -1,5 +0,0 @@ -services: - morph-el-0: - build: - context: ../.. - dockerfile: ops/docker/Dockerfile.l2-geth diff --git a/ops/docker/docker-compose-reth.yml b/ops/docker/docker-compose-reth.yml index 920c8af98..fecc42f89 100644 --- a/ops/docker/docker-compose-reth.yml +++ b/ops/docker/docker-compose-reth.yml @@ -26,6 +26,7 @@ x-reth-service: &reth-service services: morph-el-0: <<: *reth-service + build: !reset null morph-el-1: <<: *reth-service @@ -38,6 +39,7 @@ services: sentry-el-0: <<: *reth-service + build: !reset null validator-el: <<: *reth-service From c82a14566673950d15b452eb948385602a67435d Mon Sep 17 00:00:00 2001 From: corey Date: Tue, 12 May 2026 18:04:39 +0800 Subject: [PATCH 11/17] refactor(node): remove validator/challenge bypass per SPEC-005 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Delete node/validator package (config.go, validator.go, validator_test.go) - Drop validator wiring from node/cmd/node/main.go and derivation.NewDerivationClient signature - Drop validator field from Derivation struct - Drop ChallengeEnable/ChallengeState invocation in derivation rollback path - Remove validator.challengeEnable / validator.privateKey CLI flags - Remove MORPH_NODE_VALIDATOR_PRIVATE_KEY env from docker compose files Refs: morph-l2/morph-specs SPEC-005 §4.1 Equivalent in intent to PR #948 commit 3e49457d, but applied directly to main without the Phase A reorg/halted/rollback context that 3e49457d brought along. --- node/cmd/node/main.go | 12 +- node/derivation/derivation.go | 12 +- node/flags/flags.go | 17 --- node/ops-morph/docker-compose-validator.yml | 1 - node/validator/config.go | 46 -------- node/validator/validator.go | 118 -------------------- node/validator/validator_test.go | 48 -------- ops/docker/docker-compose-4nodes.yml | 1 - 8 files changed, 2 insertions(+), 253 deletions(-) delete mode 100644 node/validator/config.go delete mode 100644 node/validator/validator.go delete mode 100644 node/validator/validator_test.go diff --git a/node/cmd/node/main.go b/node/cmd/node/main.go index 5884fe6fd..4b5d4fc3c 100644 --- a/node/cmd/node/main.go +++ b/node/cmd/node/main.go @@ -30,7 +30,6 @@ import ( "morph-l2/node/sequencer/mock" "morph-l2/node/sync" "morph-l2/node/types" - "morph-l2/node/validator" ) func main() { @@ -99,10 +98,6 @@ func L2NodeMain(ctx *cli.Context) error { if err != nil { return fmt.Errorf("failed to create syncer, error: %v", err) } - validatorCfg := validator.NewConfig() - if err := validatorCfg.SetCliContext(ctx); err != nil { - return fmt.Errorf("validator set cli context error: %v", err) - } l1Client, err := ethclient.Dial(derivationCfg.L1.Addr) if err != nil { return fmt.Errorf("dial l1 node error:%v", err) @@ -111,12 +106,7 @@ func L2NodeMain(ctx *cli.Context) error { if err != nil { return fmt.Errorf("NewRollup error:%v", err) } - vt, err := validator.NewValidator(validatorCfg, rollup, nodeConfig.Logger) - if err != nil { - return fmt.Errorf("new validator client error: %v", err) - } - - dvNode, err = derivation.NewDerivationClient(context.Background(), derivationCfg, syncer, store, vt, rollup, nodeConfig.Logger) + dvNode, err = derivation.NewDerivationClient(context.Background(), derivationCfg, syncer, store, rollup, nodeConfig.Logger) if err != nil { return fmt.Errorf("new derivation client error: %v", err) } diff --git a/node/derivation/derivation.go b/node/derivation/derivation.go index d5bf58681..565de1d89 100644 --- a/node/derivation/derivation.go +++ b/node/derivation/derivation.go @@ -27,7 +27,6 @@ import ( nodecommon "morph-l2/node/common" "morph-l2/node/sync" "morph-l2/node/types" - "morph-l2/node/validator" ) var ( @@ -42,7 +41,6 @@ type Derivation struct { RollupContractAddress common.Address confirmations rpc.BlockNumber l2Client *types.RetryableClient - validator *validator.Validator logger tmlog.Logger rollup *bindings.Rollup metrics *Metrics @@ -72,7 +70,7 @@ type DeployContractBackend interface { ethereum.TransactionReader } -func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, db Database, validator *validator.Validator, rollup *bindings.Rollup, logger tmlog.Logger) (*Derivation, error) { +func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, db Database, rollup *bindings.Rollup, logger tmlog.Logger) (*Derivation, error) { l1Client, err := ethclient.Dial(cfg.L1.Addr) if err != nil { return nil, err @@ -122,7 +120,6 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, db: db, l1Client: l1Client, syncer: syncer, - validator: validator, rollup: rollup, rollupABI: rollupAbi, legacyRollupABI: legacyRollupAbi, @@ -253,13 +250,6 @@ func (d *Derivation) derivationBlock(ctx context.Context) { if rootMismatch || withdrawalMismatch { d.metrics.SetBatchStatus(stateException) - // TODO The challenge switch is currently on and will be turned on in the future - if d.validator != nil && d.validator.ChallengeEnable() { - if err := d.validator.ChallengeState(batchInfo.batchIndex); err != nil { - d.logger.Error("challenge state failed", "batchIndex", batchInfo.batchIndex, "error", err) - return - } - } d.logger.Error("root hash or withdrawal hash is not equal", "originStateRootHash", batchInfo.root, "deriveStateRootHash", lastHeader.Root.Hex(), diff --git a/node/flags/flags.go b/node/flags/flags.go index 19325a4b0..5472464b5 100644 --- a/node/flags/flags.go +++ b/node/flags/flags.go @@ -168,19 +168,6 @@ var ( EnvVar: prefixEnvVar("VALIDATOR"), } - ChallengeEnable = cli.BoolFlag{ - Name: "validator.challengeEnable", - Usage: "Enable the validator challenge", - EnvVar: prefixEnvVar("VALIDATOR_CHALLENGE_ENABLE"), - } - - // validator - ValidatorPrivateKey = cli.StringFlag{ - Name: "validator.privateKey", - Usage: "Private Key corresponding to SUBSIDY Owner", - EnvVar: prefixEnvVar("VALIDATOR_PRIVATE_KEY"), - } - // derivation RollupContractAddress = cli.StringFlag{ Name: "derivation.rollupAddress", @@ -351,10 +338,6 @@ var Flags = []cli.Flag{ TendermintConfigPath, MockEnabled, ValidatorEnable, - ChallengeEnable, - - // validator - ValidatorPrivateKey, // derivation RollupContractAddress, diff --git a/node/ops-morph/docker-compose-validator.yml b/node/ops-morph/docker-compose-validator.yml index 09a1efa74..0b0bc4d63 100644 --- a/node/ops-morph/docker-compose-validator.yml +++ b/node/ops-morph/docker-compose-validator.yml @@ -21,7 +21,6 @@ services: ## todo need to replace it to a public network - MORPH_NODE_L1_ETH_RPC=http://host.docker.internal:9545 - MORPH_NODE_L1_ETH_BEACON_RPC=http://host.docker.internal:3500 - - MORPH_NODE_VALIDATOR_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000001 - MORPH_NODE_ROLLUP_ADDRESS=0xa513e6e4b8f2a923d98304ec87f64353c4d5c853 - MORPH_NODE_DERIVATION_START_HEIGHT=1 - MORPH_NODE_DERIVATION_FETCH_BLOCK_RANGE=1000 diff --git a/node/validator/config.go b/node/validator/config.go deleted file mode 100644 index 986fd16d5..000000000 --- a/node/validator/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package validator - -import ( - "crypto/ecdsa" - "math/big" - "strings" - - "github.com/morph-l2/go-ethereum/common" - "github.com/morph-l2/go-ethereum/crypto" - "github.com/urfave/cli" - - "morph-l2/node/flags" -) - -type Config struct { - l1RPC string - PrivateKey *ecdsa.PrivateKey - L1ChainID *big.Int - rollupContract common.Address - challengeEnable bool -} - -func NewConfig() *Config { - return &Config{} -} - -func (c *Config) SetCliContext(ctx *cli.Context) error { - l1NodeAddr := ctx.GlobalString(flags.L1NodeAddr.Name) - l1ChainID := ctx.GlobalUint64(flags.L1ChainID.Name) - c.challengeEnable = ctx.GlobalBool(flags.ChallengeEnable.Name) - if c.challengeEnable { - hexPrvKey := ctx.GlobalString(flags.ValidatorPrivateKey.Name) - hex := strings.TrimPrefix(hexPrvKey, "0x") - privateKey, err := crypto.HexToECDSA(hex) - if err != nil { - return err - } - c.PrivateKey = privateKey - } - addrHex := ctx.GlobalString(flags.RollupContractAddress.Name) - rollupContract := common.HexToAddress(addrHex) - c.l1RPC = l1NodeAddr - c.L1ChainID = big.NewInt(int64(l1ChainID)) - c.rollupContract = rollupContract - return nil -} diff --git a/node/validator/validator.go b/node/validator/validator.go deleted file mode 100644 index 224c8c3d8..000000000 --- a/node/validator/validator.go +++ /dev/null @@ -1,118 +0,0 @@ -package validator - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "math/big" - "time" - - "github.com/morph-l2/go-ethereum" - "github.com/morph-l2/go-ethereum/accounts/abi/bind" - ethtypes "github.com/morph-l2/go-ethereum/core/types" - "github.com/morph-l2/go-ethereum/ethclient" - "github.com/morph-l2/go-ethereum/log" - tmlog "github.com/tendermint/tendermint/libs/log" - - "morph-l2/bindings/bindings" -) - -type Validator struct { - cli DeployContractBackend - privateKey *ecdsa.PrivateKey - l1ChainID *big.Int - contract *bindings.Rollup - challengeEnable bool - logger tmlog.Logger -} - -type DeployContractBackend interface { - bind.DeployBackend - bind.ContractBackend -} - -func NewValidator(cfg *Config, rollup *bindings.Rollup, logger tmlog.Logger) (*Validator, error) { - cli, err := ethclient.Dial(cfg.l1RPC) - if err != nil { - return nil, fmt.Errorf("dial l1 node error:%v", err) - } - return &Validator{ - cli: cli, - contract: rollup, - privateKey: cfg.PrivateKey, - l1ChainID: cfg.L1ChainID, - challengeEnable: cfg.challengeEnable, - logger: logger, - }, nil -} - -func (v *Validator) SetLogger() { - v.logger = v.logger.With("module", "validator") -} - -func (v *Validator) ChallengeEnable() bool { - return v.challengeEnable -} - -func (v *Validator) ChallengeState(batchIndex uint64) error { - if !v.ChallengeEnable() { - return fmt.Errorf("the challenge is not enabled,please set challengeEnable is true") - } - opts, err := bind.NewKeyedTransactorWithChainID(v.privateKey, v.l1ChainID) - if err != nil { - return err - } - gasPrice, err := v.cli.SuggestGasPrice(opts.Context) - if err != nil { - return err - } - opts.GasPrice = gasPrice - opts.NoSend = true - batchHash, err := v.contract.CommittedBatches( - &bind.CallOpts{ - Pending: false, - Context: opts.Context, - }, - new(big.Int).SetUint64(batchIndex), - ) - if err != nil { - return err - } - tx, err := v.contract.ChallengeState(opts, batchIndex, batchHash) - if err != nil { - return err - } - log.Info("send ChallengeState transaction ", "txHash", tx.Hash().Hex()) - if err := v.cli.SendTransaction(context.Background(), tx); err != nil { - return err - } - // Wait for the receipt - receipt, err := waitForReceipt(v.cli, tx) - if err != nil { - return err - } - log.Info("Validator has already started the challenge", "hash", tx.Hash().Hex(), - "gas-used", receipt.GasUsed, "blocknumber", receipt.BlockNumber) - return nil -} - -func waitForReceipt(backend DeployContractBackend, tx *ethtypes.Transaction) (*ethtypes.Receipt, error) { - t := time.NewTicker(300 * time.Millisecond) - receipt := new(ethtypes.Receipt) - var err error - for range t.C { - receipt, err = backend.TransactionReceipt(context.Background(), tx.Hash()) - if errors.Is(err, ethereum.NotFound) { - continue - } - if err != nil { - return nil, err - } - if receipt != nil { - t.Stop() - break - } - } - return receipt, nil -} diff --git a/node/validator/validator_test.go b/node/validator/validator_test.go deleted file mode 100644 index 038a6f978..000000000 --- a/node/validator/validator_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package validator - -import ( - "crypto/ecdsa" - "math/big" - "testing" - - "github.com/morph-l2/go-ethereum/accounts/abi/bind" - "github.com/morph-l2/go-ethereum/accounts/abi/bind/backends" - "github.com/morph-l2/go-ethereum/core" - "github.com/morph-l2/go-ethereum/core/rawdb" - "github.com/morph-l2/go-ethereum/crypto" - "github.com/morph-l2/go-ethereum/ethdb" - "github.com/morph-l2/go-ethereum/log" - "github.com/stretchr/testify/require" - - "morph-l2/bindings/bindings" -) - -func TestValidator_ChallengeState(t *testing.T) { - key, _ := crypto.GenerateKey() - sim, _ := newSimulatedBackend(key) - opts, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - require.NoError(t, err) - addr, _, rollup, err := bindings.DeployRollup(opts, sim, 1337) - require.NoError(t, err) - sim.Commit() - v := Validator{ - cli: sim, - privateKey: key, - l1ChainID: big.NewInt(1), - contract: rollup, - challengeEnable: true, - } - err = v.ChallengeState(10) - log.Info("addr:", addr) - require.EqualError(t, err, "execution reverted: only challenger allowed") -} - -func newSimulatedBackend(key *ecdsa.PrivateKey) (*backends.SimulatedBackend, ethdb.Database) { - var gasLimit uint64 = 9_000_000 - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - genAlloc := make(core.GenesisAlloc) - genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)} - db := rawdb.NewMemoryDatabase() - sim := backends.NewSimulatedBackendWithDatabase(db, genAlloc, gasLimit) - return sim, db -} diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 83d4f8b9e..f1df910c2 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -433,7 +433,6 @@ services: - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - MORPH_NODE_L1_ETH_BEACON_RPC=${L1_BEACON_CHAIN_RPC} - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} - - MORPH_NODE_VALIDATOR_PRIVATE_KEY=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} - MORPH_NODE_DERIVATION_START_HEIGHT=1 - MORPH_NODE_SYNC_START_HEIGHT=1 From e2c6f57783a0868e463d495dc95502bb75d7ab89 Mon Sep 17 00:00:00 2001 From: corey Date: Tue, 12 May 2026 18:07:23 +0800 Subject: [PATCH 12/17] refactor(derivation): extract verifyBatchRoots into verify.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the inline state-root / withdrawal-root mismatch check out of derivationBlock into a standalone verifyBatchRoots(batchInfo, lastHeader) function in a new node/derivation/verify.go. Both roots are read from L1 calldata at parse time, so verifyBatchRoots is independent of blob data — this is the SPEC-005 §3.4 invariant that later allows Path B (local-rebuild verification) to reuse this same check without modification. No behavior change: the main loop still logs + returns on mismatch, sets stateException on the metric, and continues otherwise. Only the location and error-message format change. Refs: morph-l2/morph-specs SPEC-005 §4.2 / §6 ("两种模式共享同一份 verifyBatchRoots") --- node/derivation/derivation.go | 20 ++---------------- node/derivation/verify.go | 38 +++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 node/derivation/verify.go diff --git a/node/derivation/derivation.go b/node/derivation/derivation.go index b710e2ecc..c6ec0d250 100644 --- a/node/derivation/derivation.go +++ b/node/derivation/derivation.go @@ -237,25 +237,9 @@ func (d *Derivation) derivationBlock(ctx context.Context) { if lastHeader.Number.Uint64() <= d.baseHeight { continue } - withdrawalRoot, err := d.L2ToL1MessagePasser.MessageRoot(&bind.CallOpts{ - BlockNumber: lastHeader.Number, - }) - if err != nil { - d.logger.Error("get withdrawal root failed", "error", err) - return - } - - rootMismatch := !bytes.Equal(lastHeader.Root.Bytes(), batchInfo.root.Bytes()) - withdrawalMismatch := !bytes.Equal(withdrawalRoot[:], batchInfo.withdrawalRoot.Bytes()) - - if rootMismatch || withdrawalMismatch { + if err := d.verifyBatchRoots(batchInfo, lastHeader); err != nil { d.metrics.SetBatchStatus(stateException) - d.logger.Error("root hash or withdrawal hash is not equal", - "originStateRootHash", batchInfo.root, - "deriveStateRootHash", lastHeader.Root.Hex(), - "batchWithdrawalRoot", batchInfo.withdrawalRoot.Hex(), - "deriveWithdrawalRoot", common.BytesToHash(withdrawalRoot[:]).Hex(), - ) + d.logger.Error("batch roots verification failed", "batchIndex", batchInfo.batchIndex, "error", err) return } d.metrics.SetBatchStatus(stateNormal) diff --git a/node/derivation/verify.go b/node/derivation/verify.go new file mode 100644 index 000000000..c69b1b345 --- /dev/null +++ b/node/derivation/verify.go @@ -0,0 +1,38 @@ +package derivation + +import ( + "bytes" + "fmt" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/common" + eth "github.com/morph-l2/go-ethereum/core/types" +) + +// verifyBatchRoots verifies the local state root and withdrawal root against the +// values recorded in the L1 commit batch tx calldata. +// +// SPEC-005 §3.4 invariant: this check is independent of blob data — both +// batchInfo.root (postStateRoot) and batchInfo.withdrawalRoot are extracted +// from L1 calldata at parse time, so this function runs identically under +// Path A (online beacon blob) and Path B (local-rebuild) verification modes. +// +// Returns nil on match, error describing the mismatch otherwise. +func (d *Derivation) verifyBatchRoots(batchInfo *BatchInfo, lastHeader *eth.Header) error { + withdrawalRoot, err := d.L2ToL1MessagePasser.MessageRoot(&bind.CallOpts{ + BlockNumber: lastHeader.Number, + }) + if err != nil { + return fmt.Errorf("get withdrawal root failed: %w", err) + } + + rootMismatch := !bytes.Equal(lastHeader.Root.Bytes(), batchInfo.root.Bytes()) + withdrawalMismatch := !bytes.Equal(withdrawalRoot[:], batchInfo.withdrawalRoot.Bytes()) + + if rootMismatch || withdrawalMismatch { + return fmt.Errorf("root mismatch: stateRoot(l1=%s, local=%s) withdrawalRoot(l1=%s, local=%s)", + batchInfo.root.Hex(), lastHeader.Root.Hex(), + batchInfo.withdrawalRoot.Hex(), common.BytesToHash(withdrawalRoot[:]).Hex()) + } + return nil +} From 9f01c530a2c49e41473ff824fe2e3e4a8fddef87 Mon Sep 17 00:00:00 2001 From: corey Date: Tue, 12 May 2026 18:17:11 +0800 Subject: [PATCH 13/17] =?UTF-8?q?feat(derivation):=20SPEC-005=20Path=20B?= =?UTF-8?q?=20=E2=80=94=20local-rebuild=20blob=20verification?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces Path B verification mode (SPEC-005 §4) as a startup-time mutually-exclusive alternative to Path A. Selected via `--derivation.verify-mode` (default "pathA", preserves current behaviour). The two modes do not interact: under no circumstance does the node fall back from one to the other; switching requires changing the config and restarting. Path B mode: - Skips beacon-side blob fetch entirely. - Reads L1 commitBatch tx calldata + tx.BlobHashes() only. - Reads local L2 blocks for the batch range, replays the sequencer's encoding (parsingTxs + buildBlockContext + BatchData) to rebuild the blob bytes. - Compresses + builds a BlobTxSidecar; compares its versioned hashes against the values from L1 calldata. - Reuses the shared verifyBatchRoots for state/withdrawal root verification (independent of blob). Touches: - common/batch: export ParsingTxs / BuildBlockContext (rename from package-private). No semantic change for tx-submitter. - node/derivation/batch_info.go: add BatchInfo.blobHashes field; add ParseBatchMetadataOnly (calldata-only parse, no blob). - node/derivation/config.go + flags: add VerifyMode + flag with fail-fast validation on unknown values. - node/derivation/derivation.go: dispatch in main loop based on verifyMode; populate blobHashes in Path A's fetch helper too so BatchInfo is consistent across modes. - node/derivation/verify_path_b.go: fetchBatchInfoPathB + verifyBatchContentPathB + fetchLocalLastHeader. - node/derivation/metrics.go: path_b_triggered_total / path_b_failed_total counters. Verified clean: `go build ./node/... ./common/...`, `go vet`, test compile. Refs: morph-l2/morph-specs SPEC-005 §4 / §5 / §6 Resolves the WIP item flagged in PR #951. --- common/batch/batch_cache.go | 21 +++-- node/derivation/batch_info.go | 58 ++++++++++++ node/derivation/config.go | 23 +++++ node/derivation/derivation.go | 67 +++++++++---- node/derivation/metrics.go | 26 ++++++ node/derivation/verify_path_b.go | 156 +++++++++++++++++++++++++++++++ node/flags/flags.go | 8 ++ 7 files changed, 335 insertions(+), 24 deletions(-) create mode 100644 node/derivation/verify_path_b.go diff --git a/common/batch/batch_cache.go b/common/batch/batch_cache.go index 3b673db9b..f7f40c686 100644 --- a/common/batch/batch_cache.go +++ b/common/batch/batch_cache.go @@ -509,7 +509,7 @@ func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdraw } // Parse transactions, distinguish L1 and L2 transactions - txsPayload, l1TxHashes, newTotalL1MessagePopped, l2TxNum, err := parsingTxs(block.Transactions(), bc.totalL1MessagePopped) + txsPayload, l1TxHashes, newTotalL1MessagePopped, l2TxNum, err := ParsingTxs(block.Transactions(), bc.totalL1MessagePopped) if err != nil { return false, fmt.Errorf("failed to parse transactions: %w", err) } @@ -518,7 +518,7 @@ func (bc *BatchCache) CalculateCapWithProposalBlock(blockNumber uint64, withdraw txsNum := l2TxNum + l1TxNum // Build BlockContext (60 bytes) - blockContext := buildBlockContext(header, txsNum, l1TxNum) + blockContext := BuildBlockContext(header, txsNum, l1TxNum) // Store to current, do not immediately append to batch bc.currentBlockContext = blockContext @@ -840,8 +840,14 @@ func (bc *BatchCache) createBatchHeader(dataHash common.Hash, sidecar *ethtypes. return batchHeaderV0.Bytes() } -// parsingTxs parses transactions, distinguishes L1 and L2 transactions -func parsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( +// ParsingTxs encodes a block's transactions into the on-chain payload format +// used by the batch builder: L2 transactions are RLP-marshalled and concatenated +// in order; L1 message transactions are excluded from the payload but their +// hashes and queue indices are tracked separately. +// +// Exported for derivation Path B (SPEC-005), which must rebuild blob bytes from +// local L2 blocks using the same encoding the sequencer applied at seal time. +func ParsingTxs(transactions []*ethtypes.Transaction, totalL1MessagePoppedBefore uint64) ( txsPayload []byte, l1TxHashes []common.Hash, totalL1MessagePopped uint64, @@ -901,9 +907,12 @@ func (bc *BatchCache) effectiveMaxBlobCount(blockTimestamp uint64) int { return 1 } -// buildBlockContext builds BlockContext from block header (60 bytes) +// BuildBlockContext serialises a block header + tx counts into the 60-byte +// BlockContext blob the batch builder writes for each block. // Format: Number(8) || Timestamp(8) || BaseFee(32) || GasLimit(8) || numTxs(2) || numL1Messages(2) -func buildBlockContext(header *ethtypes.Header, txsNum, l1MsgNum int) []byte { +// +// Exported for derivation Path B (SPEC-005); see ParsingTxs. +func BuildBlockContext(header *ethtypes.Header, txsNum, l1MsgNum int) []byte { blsBytes := make([]byte, 60) // Number (8 bytes) diff --git a/node/derivation/batch_info.go b/node/derivation/batch_info.go index add7efe36..1e2261cad 100644 --- a/node/derivation/batch_info.go +++ b/node/derivation/batch_info.go @@ -59,6 +59,11 @@ type BatchInfo struct { root common.Hash withdrawalRoot common.Hash parentTotalL1MessagePopped uint64 + + // blobHashes is the ordered list of EIP-4844 blob versioned hashes + // declared by the L1 commitBatch tx. Path B uses this to compare + // against locally-rebuilt versioned hashes (SPEC-005 §4). + blobHashes []common.Hash } func (bi *BatchInfo) FirstBlockNumber() uint64 { @@ -77,6 +82,59 @@ func (bi *BatchInfo) TxNum() uint64 { return bi.txNum } +// ParseBatchMetadataOnly populates BatchInfo using only L1 calldata — +// it does NOT touch the blob sidecar and does NOT decode any transactions. +// +// Used by Path B (SPEC-005), which verifies the batch by rebuilding the +// blob locally rather than downloading and decoding it. Fields populated: +// batchIndex, version, root, withdrawalRoot, parentTotalL1MessagePopped, +// firstBlockNumber, lastBlockNumber. blockContexts / SafeL2Data / blobs +// are intentionally left empty; callers in Path B must not call derive(). +// +// blobHashes is populated separately by the caller from tx.BlobHashes(). +func (bi *BatchInfo) ParseBatchMetadataOnly(batch geth.RPCRollupBatch) error { + parentBatchHeader := commonbatch.BatchHeaderBytes(batch.ParentBatchHeader) + parentBatchIndex, err := parentBatchHeader.BatchIndex() + if err != nil { + return fmt.Errorf("decode batch header index error:%v", err) + } + totalL1MessagePopped, err := parentBatchHeader.TotalL1MessagePopped() + if err != nil { + return fmt.Errorf("decode batch header totalL1MessagePopped error:%v", err) + } + bi.parentTotalL1MessagePopped = totalL1MessagePopped + bi.root = batch.PostStateRoot + bi.batchIndex = parentBatchIndex + 1 + bi.withdrawalRoot = batch.WithdrawRoot + bi.version = uint64(batch.Version) + bi.lastBlockNumber = batch.LastBlockNumber + + // Derive firstBlockNumber from parent batch's LastBlockNumber + 1. + // V0 -> V1 transition leaves parent LastBlockNumber unset; in that + // case fall back to decoding the first BlockContext from calldata. + parentVersion, err := parentBatchHeader.Version() + if err != nil { + return fmt.Errorf("decode parent batch header version error:%v", err) + } + if parentVersion == 0 { + if len(batch.BlockContexts) < 2+60 { + return fmt.Errorf("calldata block contexts too short for first block context: have %d, need %d", len(batch.BlockContexts), 2+60) + } + var firstBlock BlockContext + if err := firstBlock.Decode(batch.BlockContexts[2 : 2+60]); err != nil { + return fmt.Errorf("decode first block context error:%v", err) + } + bi.firstBlockNumber = firstBlock.Number + } else { + parentLast, err := parentBatchHeader.LastBlockNumber() + if err != nil { + return fmt.Errorf("decode parent batch header lastBlockNumber error:%v", err) + } + bi.firstBlockNumber = parentLast + 1 + } + return nil +} + // ParseBatch This method is externally referenced for parsing Batch func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error { if len(batch.Sidecar.Blobs) == 0 { diff --git a/node/derivation/config.go b/node/derivation/config.go index 9d896f0b6..81a581049 100644 --- a/node/derivation/config.go +++ b/node/derivation/config.go @@ -29,6 +29,14 @@ const ( // DefaultLogProgressInterval is the frequency at which we log progress. DefaultLogProgressInterval = time.Second * 10 + + // VerifyMode values (SPEC-005 §4.2). Selected at startup; not switchable + // at runtime. Default is VerifyModePathA which preserves current behaviour. + VerifyModePathA = "pathA" + VerifyModePathB = "pathB" + + // DefaultVerifyMode is Path A (pull beacon blob, decode, derive, verify). + DefaultVerifyMode = VerifyModePathA ) type Config struct { @@ -41,6 +49,7 @@ type Config struct { PollInterval time.Duration `json:"poll_interval"` LogProgressInterval time.Duration `json:"log_progress_interval"` FetchBlockRange uint64 `json:"fetch_block_range"` + VerifyMode string `json:"verify_mode"` MetricsPort uint64 `json:"metrics_port"` MetricsHostname string `json:"metrics_hostname"` MetricsServerEnable bool `json:"metrics_server_enable"` @@ -54,6 +63,7 @@ func DefaultConfig() *Config { PollInterval: DefaultPollInterval, LogProgressInterval: DefaultLogProgressInterval, FetchBlockRange: DefaultFetchBlockRange, + VerifyMode: DefaultVerifyMode, L2: new(types.L2Config), } } @@ -110,6 +120,19 @@ func (c *Config) SetCliContext(ctx *cli.Context) error { } } + if ctx.GlobalIsSet(flags.DerivationVerifyMode.Name) { + c.VerifyMode = ctx.GlobalString(flags.DerivationVerifyMode.Name) + } + switch c.VerifyMode { + case VerifyModePathA, VerifyModePathB: + // ok + case "": + c.VerifyMode = DefaultVerifyMode + default: + return fmt.Errorf("invalid derivation.verify-mode %q (must be %q or %q)", + c.VerifyMode, VerifyModePathA, VerifyModePathB) + } + l2EthAddr := ctx.GlobalString(flags.L2EthAddr.Name) l2EngineAddr := ctx.GlobalString(flags.L2EngineAddr.Name) fileName := ctx.GlobalString(flags.L2EngineJWTSecret.Name) diff --git a/node/derivation/derivation.go b/node/derivation/derivation.go index c6ec0d250..17345dd63 100644 --- a/node/derivation/derivation.go +++ b/node/derivation/derivation.go @@ -60,6 +60,7 @@ type Derivation struct { fetchBlockRange uint64 pollInterval time.Duration logProgressInterval time.Duration + verifyMode string // SPEC-005 §4.2: "pathA" (default) or "pathB"; bound at startup, never switches. stop chan struct{} } @@ -135,6 +136,7 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, fetchBlockRange: cfg.FetchBlockRange, pollInterval: cfg.PollInterval, logProgressInterval: cfg.LogProgressInterval, + verifyMode: cfg.VerifyMode, metrics: metrics, l1BeaconClient: l1BeaconClient, L2ToL1MessagePasser: msgPasser, @@ -213,27 +215,55 @@ func (d *Derivation) derivationBlock(ctx context.Context) { d.logger.Info("fetched rollup tx", "txNum", len(logs), "latestBatchIndex", latestBatchIndex) for _, lg := range logs { - batchInfo, err := d.fetchRollupDataByTxHash(lg.TxHash, lg.BlockNumber) - if err != nil { - if errors.Is(err, types.ErrNotCommitBatchTx) { - continue + var ( + batchInfo *BatchInfo + lastHeader *eth.Header + ) + switch d.verifyMode { + case VerifyModePathB: + batchInfo, err = d.fetchBatchInfoPathB(ctx, lg.TxHash, lg.BlockNumber) + if err != nil { + if errors.Is(err, types.ErrNotCommitBatchTx) { + continue + } + d.logger.Error("path B fetch batch info failed", "txHash", lg.TxHash, "blockNumber", lg.BlockNumber, "error", err) + return } - d.logger.Error("fetch batch info failed", "txHash", lg.TxHash, "blockNumber", lg.BlockNumber, "error", err) - return + d.logger.Info("path B fetched batch metadata", "txNonce", batchInfo.nonce, "txHash", batchInfo.txHash, + "l1BlockNumber", batchInfo.l1BlockNumber, "firstL2BlockNumber", batchInfo.firstBlockNumber, "lastL2BlockNumber", batchInfo.lastBlockNumber) + if err := d.verifyBatchContentPathB(ctx, batchInfo); err != nil { + d.metrics.SetBatchStatus(stateException) + d.logger.Error("path B content verification failed", "batchIndex", batchInfo.batchIndex, "error", err) + return + } + lastHeader, err = d.fetchLocalLastHeader(ctx, batchInfo) + if err != nil { + d.logger.Error("path B local last-header fetch failed", "batchIndex", batchInfo.batchIndex, "error", err) + return + } + d.metrics.SetL2DeriveHeight(lastHeader.Number.Uint64()) + d.metrics.SetSyncedBatchIndex(batchInfo.batchIndex) + default: // VerifyModePathA + batchInfo, err = d.fetchRollupDataByTxHash(lg.TxHash, lg.BlockNumber) + if err != nil { + if errors.Is(err, types.ErrNotCommitBatchTx) { + continue + } + d.logger.Error("fetch batch info failed", "txHash", lg.TxHash, "blockNumber", lg.BlockNumber, "error", err) + return + } + d.logger.Info("fetch rollup transaction success", "txNonce", batchInfo.nonce, "txHash", batchInfo.txHash, + "l1BlockNumber", batchInfo.l1BlockNumber, "firstL2BlockNumber", batchInfo.firstBlockNumber, "lastL2BlockNumber", batchInfo.lastBlockNumber) + lastHeader, err = d.derive(batchInfo) + if err != nil { + d.logger.Error("derive blocks interrupt", "error", err) + return + } + d.logger.Info("batch derivation complete", "batch_index", batchInfo.batchIndex, "currentBatchEndBlock", lastHeader.Number.Uint64()) + d.metrics.SetL2DeriveHeight(lastHeader.Number.Uint64()) + d.metrics.SetSyncedBatchIndex(batchInfo.batchIndex) } - d.logger.Info("fetch rollup transaction success", "txNonce", batchInfo.nonce, "txHash", batchInfo.txHash, - "l1BlockNumber", batchInfo.l1BlockNumber, "firstL2BlockNumber", batchInfo.firstBlockNumber, "lastL2BlockNumber", batchInfo.lastBlockNumber) - // derivation - lastHeader, err := d.derive(batchInfo) - if err != nil { - d.logger.Error("derive blocks interrupt", "error", err) - return - } - // only last block of batch - d.logger.Info("batch derivation complete", "batch_index", batchInfo.batchIndex, "currentBatchEndBlock", lastHeader.Number.Uint64()) - d.metrics.SetL2DeriveHeight(lastHeader.Number.Uint64()) - d.metrics.SetSyncedBatchIndex(batchInfo.batchIndex) if lastHeader.Number.Uint64() <= d.baseHeight { continue } @@ -390,6 +420,7 @@ func (d *Derivation) fetchRollupDataByTxHash(txHash common.Hash, blockNumber uin rollupData.l1BlockNumber = blockNumber rollupData.txHash = txHash rollupData.nonce = tx.Nonce() + rollupData.blobHashes = tx.BlobHashes() return rollupData, nil } diff --git a/node/derivation/metrics.go b/node/derivation/metrics.go index da5e8937d..cd5c4c8db 100644 --- a/node/derivation/metrics.go +++ b/node/derivation/metrics.go @@ -24,6 +24,12 @@ type Metrics struct { BatchStatus metrics.Gauge LatestBatchIndex metrics.Gauge SyncedBatchIndex metrics.Gauge + + // SPEC-005 §4.6 Path B counters. PathBTriggered increments once per batch + // processed under VerifyModePathB; PathBFailed increments on local-block + // missing / encoding error / versioned hash mismatch. + PathBTriggered metrics.Counter + PathBFailed metrics.Counter } func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { @@ -68,6 +74,18 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "synced_batch_index", Help: "", }, labels).With(labelsAndValues...), + PathBTriggered: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "path_b_triggered_total", + Help: "Number of batches verified via SPEC-005 Path B (local-rebuild).", + }, labels).With(labelsAndValues...), + PathBFailed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "path_b_failed_total", + Help: "Path B failures: local block missing, encoding error, or versioned hash mismatch.", + }, labels).With(labelsAndValues...), } } @@ -95,6 +113,14 @@ func (m *Metrics) SetSyncedBatchIndex(batchIndex uint64) { m.SyncedBatchIndex.Set(float64(batchIndex)) } +func (m *Metrics) IncPathBTriggered() { + m.PathBTriggered.Add(1) +} + +func (m *Metrics) IncPathBFailed() { + m.PathBFailed.Add(1) +} + func (m *Metrics) Serve(hostname string, port uint64) (*http.Server, error) { mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) diff --git a/node/derivation/verify_path_b.go b/node/derivation/verify_path_b.go new file mode 100644 index 000000000..99f154d05 --- /dev/null +++ b/node/derivation/verify_path_b.go @@ -0,0 +1,156 @@ +package derivation + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/morph-l2/go-ethereum/common" + eth "github.com/morph-l2/go-ethereum/core/types" + + commonbatch "morph-l2/common/batch" + commonblob "morph-l2/common/blob" +) + +// SPEC-005 §4 Path B: blob-independent batch content verification. +// +// In VerifyModePathB the node does not pull blobs from the beacon chain. +// Instead it reads the L2 blocks in the batch range from local storage, +// reapplies the sequencer's encoding to rebuild the blob bytes, and compares +// the resulting versioned hashes against the values declared by the L1 +// commitBatch tx (carried in BatchInfo.blobHashes). +// +// State / withdrawal root verification (verify.go::verifyBatchRoots) is +// independent of this path and runs after success. +// +// Path A and Path B are mutually exclusive: the mode is fixed at startup by +// `--derivation.verify-mode` and cannot change at runtime. Path A failure +// (e.g. blob unavailable) does NOT auto-fall-back to Path B; the operator +// must restart with the alternate mode. + +// fetchBatchInfoPathB pulls the L1 commitBatch tx, decodes its calldata, and +// populates a BatchInfo using only the calldata + tx blob hashes — no beacon +// blob fetch. Returned BatchInfo is sufficient for verifyBatchContentPathB +// and verifyBatchRoots. +func (d *Derivation) fetchBatchInfoPathB(ctx context.Context, txHash common.Hash, blockNumber uint64) (*BatchInfo, error) { + tx, pending, err := d.l1Client.TransactionByHash(ctx, txHash) + if err != nil { + return nil, err + } + if pending { + return nil, errors.New("pending transaction") + } + batch, err := d.UnPackData(tx.Data()) + if err != nil { + return nil, err + } + + bi := new(BatchInfo) + if err := bi.ParseBatchMetadataOnly(batch); err != nil { + return nil, fmt.Errorf("parse batch metadata error: %w", err) + } + bi.l1BlockNumber = blockNumber + bi.txHash = txHash + bi.nonce = tx.Nonce() + bi.blobHashes = tx.BlobHashes() + return bi, nil +} + +// verifyBatchContentPathB rebuilds blob versioned hashes from local L2 +// blocks in the [batchInfo.firstBlockNumber, batchInfo.lastBlockNumber] +// range and compares them against batchInfo.blobHashes (taken from the +// L1 commitBatch tx). Returns nil on match. +func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *BatchInfo) error { + d.metrics.IncPathBTriggered() + + if batchInfo.firstBlockNumber == 0 || batchInfo.lastBlockNumber < batchInfo.firstBlockNumber { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: invalid block range [%d, %d]", + batchInfo.firstBlockNumber, batchInfo.lastBlockNumber) + } + if len(batchInfo.blobHashes) == 0 { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: no blob hashes recorded for batch %d", batchInfo.batchIndex) + } + + bd := commonbatch.NewBatchData() + totalL1MessagePopped := batchInfo.parentTotalL1MessagePopped + + for n := batchInfo.firstBlockNumber; n <= batchInfo.lastBlockNumber; n++ { + block, err := d.l2Client.BlockByNumber(ctx, big.NewInt(int64(n))) + if err != nil { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: read local block %d failed: %w", n, err) + } + if block == nil { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: local block %d missing", n) + } + + txsPayload, l1TxHashes, newTotal, l2TxNum, err := commonbatch.ParsingTxs(block.Transactions(), totalL1MessagePopped) + if err != nil { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: parsingTxs failed at block %d: %w", n, err) + } + l1MsgNum := int(newTotal - totalL1MessagePopped) + blockCtx := commonbatch.BuildBlockContext(block.Header(), l2TxNum+l1MsgNum, l1MsgNum) + bd.Append(blockCtx, txsPayload, l1TxHashes) + totalL1MessagePopped = newTotal + } + + // Pick V1 or V2 payload format based on batch version. V2 prepends the + // concatenated block contexts to the tx payload; V1 carries only txs. + var payload []byte + if batchInfo.version >= 2 { + payload = bd.TxsPayloadV2() + } else { + payload = bd.TxsPayload() + } + + compressed, err := commonblob.CompressBatchBytes(payload) + if err != nil { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: compress failed: %w", err) + } + + // maxBlobs is only an upper bound for sidecar capacity; the actual + // blob count is determined by the size of `compressed`. We pass + // len(blobHashes) so a payload that would require more blobs than + // L1 declared is rejected up front rather than producing a sidecar + // with the wrong blob count and a confusing hash mismatch later. + sidecar, err := commonblob.MakeBlobTxSidecar(compressed, len(batchInfo.blobHashes)) + if err != nil { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: build sidecar failed: %w", err) + } + + rebuilt := sidecar.BlobHashes() + if len(rebuilt) != len(batchInfo.blobHashes) { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: blob count mismatch (rebuilt=%d, l1=%d)", + len(rebuilt), len(batchInfo.blobHashes)) + } + for i := range rebuilt { + if rebuilt[i] != batchInfo.blobHashes[i] { + d.metrics.IncPathBFailed() + return fmt.Errorf("path B: versioned hash mismatch at index %d (rebuilt=%s, l1=%s)", + i, rebuilt[i].Hex(), batchInfo.blobHashes[i].Hex()) + } + } + return nil +} + +// fetchLocalLastHeader returns the local L2 header at batchInfo.lastBlockNumber. +// Used by Path B after content verification succeeds, to feed verifyBatchRoots. +func (d *Derivation) fetchLocalLastHeader(ctx context.Context, batchInfo *BatchInfo) (*eth.Header, error) { + header, err := d.l2Client.HeaderByNumber(ctx, big.NewInt(int64(batchInfo.lastBlockNumber))) + if err != nil { + return nil, fmt.Errorf("path B: read local header at %d failed: %w", batchInfo.lastBlockNumber, err) + } + if header == nil { + return nil, fmt.Errorf("path B: local header at %d missing", batchInfo.lastBlockNumber) + } + return header, nil +} + diff --git a/node/flags/flags.go b/node/flags/flags.go index 5472464b5..2ac941b6c 100644 --- a/node/flags/flags.go +++ b/node/flags/flags.go @@ -252,6 +252,13 @@ var ( Usage: "The number of confirmations needed on L1 for finalization. If not set, the default value is l1.confirmations", EnvVar: prefixEnvVar("DERIVATION_CONFIRMATIONS"), } + + DerivationVerifyMode = cli.StringFlag{ + Name: "derivation.verify-mode", + Usage: `Batch verification mode (SPEC-005): "pathA" pulls beacon blob and decodes (default); "pathB" rebuilds blob from local L2 blocks and compares versioned hashes. Mutually exclusive; not switchable at runtime.`, + EnvVar: prefixEnvVar("DERIVATION_VERIFY_MODE"), + Value: "pathA", + } // Logger LogLevel = &cli.StringFlag{ Name: "log.level", @@ -347,6 +354,7 @@ var Flags = []cli.Flag{ DerivationLogProgressInterval, DerivationFetchBlockRange, DerivationConfirmations, + DerivationVerifyMode, L1BeaconAddr, // blocktag options From a73639de345b1551fce8f243c3a6702f5cd5e758 Mon Sep 17 00:00:00 2001 From: corey Date: Wed, 13 May 2026 18:42:01 +0800 Subject: [PATCH 14/17] feat(node): SPEC-005 derivation-driven tag management + validator role cleanup Completes the remaining SPEC-005 work after Path B (commit 9f01c530): Target A residual -- delete validator node role: - Remove ValidatorEnable flag (--validator / MORPH_NODE_VALIDATOR) - Collapse cmd/node/main.go isValidator branch into a single linear assembly path; all non-mock nodes now run consensus + derivation simultaneously, sharing a single store + syncer instance - Remove validator-el / validator_node containers from docker-compose-4nodes.yml and the docker-compose-reth.yml override - Delete node/ops-morph/docker-compose-validator.yml Target C -- delete blocktag, derivation drives safe/finalized heads: - Delete node/blocktag/ package and BlockTagSafeConfirmations flag - Add tag_advance.go: tagAdvancer with advanceSafe / advanceFinalized, finalized<=safe<=unsafe invariant assertion, L1-reorg reset hook, and dedup of redundant SetBlockTags calls - Add finalizer.go: in-process subcomponent that polls L1 finalized -> Rollup.LastCommittedBatchIndex(@finalized) and forwards to tagAdvancer.advanceFinalized; lifecycle joined to derivation.Stop - Wire derivation main loop: advanceSafe after verifyBatchRoots passes; finalizer goroutine started in Start() and joined in Stop() - Add 6 prometheus metrics (safe/finalized advance + l2 number gauges + l1 reorg reset + invariant violation counters) - Add --derivation.finalizer-interval flag (default 30s) Tests: - 7 tag_advance behavioural tests (safe/finalized advance, dedup, monotonicity, invariant skip, L1 reorg reset, BlockNumber failure) - 3 static-scan tests guarding against validator/blocktag residue and common/blob package drift in Path B Note on SPEC-005 section 4.3 [must]: tagAdvancer.reset() infrastructure is in place but is not yet invoked from production code; the trigger (derivation main loop's L1 reorg detection) is deferred per tech-design section 8. The blocktag-side independent polling path is fully removed as required. Refs: morph-l2/morph-specs SPEC-005 sections 4.1, 4.3 Co-Authored-By: Claude Opus 4.7 (1M context) --- node/blocktag/config.go | 52 --- node/blocktag/service.go | 457 -------------------- node/cmd/node/main.go | 188 ++++---- node/derivation/batch_info.go | 4 +- node/derivation/config.go | 19 +- node/derivation/derivation.go | 29 +- node/derivation/finalizer.go | 161 +++++++ node/derivation/metrics.go | 71 ++- node/derivation/static_scan_test.go | 144 ++++++ node/derivation/tag_advance.go | 185 ++++++++ node/derivation/tag_advance_test.go | 186 ++++++++ node/derivation/verify.go | 2 +- node/derivation/verify_path_b.go | 4 +- node/flags/flags.go | 26 +- node/ops-morph/docker-compose-validator.yml | 40 -- ops/docker/docker-compose-4nodes.yml | 62 --- ops/docker/docker-compose-reth.yml | 5 - 17 files changed, 878 insertions(+), 757 deletions(-) delete mode 100644 node/blocktag/config.go delete mode 100644 node/blocktag/service.go create mode 100644 node/derivation/finalizer.go create mode 100644 node/derivation/static_scan_test.go create mode 100644 node/derivation/tag_advance.go create mode 100644 node/derivation/tag_advance_test.go delete mode 100644 node/ops-morph/docker-compose-validator.yml diff --git a/node/blocktag/config.go b/node/blocktag/config.go deleted file mode 100644 index 43c282800..000000000 --- a/node/blocktag/config.go +++ /dev/null @@ -1,52 +0,0 @@ -package blocktag - -import ( - "fmt" - "time" - - "github.com/morph-l2/go-ethereum/common" - "github.com/urfave/cli" - - node "morph-l2/node/core" - "morph-l2/node/flags" -) - -const ( - // DefaultSafeConfirmations is the default number of L1 blocks to wait before considering a batch as safe - DefaultSafeConfirmations = 10 - // DefaultPollInterval is the default interval to poll L1 for batch status updates - DefaultPollInterval = 10 * time.Second -) - -// Config holds the configuration for BlockTagService -type Config struct { - RollupAddress common.Address - SafeConfirmations uint64 - PollInterval time.Duration -} - -// DefaultConfig returns the default configuration -func DefaultConfig() *Config { - return &Config{ - SafeConfirmations: DefaultSafeConfirmations, - PollInterval: DefaultPollInterval, - } -} - -// SetCliContext sets the configuration from CLI context -func (c *Config) SetCliContext(ctx *cli.Context) error { - // Determine RollupAddress: use explicit flag, or mainnet default, or error - if ctx.GlobalBool(flags.MainnetFlag.Name) { - c.RollupAddress = node.MainnetRollupContractAddress - } else if ctx.GlobalIsSet(flags.RollupContractAddress.Name) { - c.RollupAddress = common.HexToAddress(ctx.GlobalString(flags.RollupContractAddress.Name)) - } else { - return fmt.Errorf("rollup contract address is required: either specify --%s or use --%s for mainnet default", - flags.RollupContractAddress.Name, flags.MainnetFlag.Name) - } - - if ctx.GlobalIsSet(flags.BlockTagSafeConfirmations.Name) { - c.SafeConfirmations = ctx.GlobalUint64(flags.BlockTagSafeConfirmations.Name) - } - return nil -} diff --git a/node/blocktag/service.go b/node/blocktag/service.go deleted file mode 100644 index 45f7ecda3..000000000 --- a/node/blocktag/service.go +++ /dev/null @@ -1,457 +0,0 @@ -package blocktag - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/morph-l2/go-ethereum/accounts/abi/bind" - "github.com/morph-l2/go-ethereum/common" - "github.com/morph-l2/go-ethereum/ethclient" - "github.com/morph-l2/go-ethereum/rpc" - tmlog "github.com/tendermint/tendermint/libs/log" - - "morph-l2/bindings/bindings" - "morph-l2/node/types" -) - -// BlockTagType represents the type of block tag (safe or finalized) -type BlockTagType int - -const ( - TagTypeSafe BlockTagType = iota - TagTypeFinalized -) - -// BlockTagService is responsible for tracking and updating safe/finalized block tags -// based on L1 batch commit tx status. -// -// Key logic: -// - Safe: batch tx is committed to L1 with N block confirmations (configurable) -// - Finalized: batch tx's L1 block is finalized (using L1 finalized block tag) -type BlockTagService struct { - ctx context.Context - cancel context.CancelFunc - - // Current safe and finalized L2 block hashes - safeL2BlockHash common.Hash - finalizedL2BlockHash common.Hash - // Last notified hashes (to avoid redundant RPC calls) - lastNotifiedSafeHash common.Hash - lastNotifiedFinalizedHash common.Hash - - // Cached batch index for optimization (avoid full binary search each time) - // Separate caches for safe and finalized since they have different maxBatchIndex - lastKnownSafeBatchIndex uint64 - lastKnownFinalizedBatchIndex uint64 - - // Clients - l1Client *ethclient.Client - l2Client *types.RetryableClient - rollup *bindings.Rollup - - // Configuration - rollupAddress common.Address - safeConfirmations uint64 // Number of L1 blocks to wait before considering a batch as safe - pollInterval time.Duration - - logger tmlog.Logger - stop chan struct{} -} - -// NewBlockTagService creates a new BlockTagService -func NewBlockTagService( - ctx context.Context, - l1Client *ethclient.Client, - l2Client *types.RetryableClient, - config *Config, - logger tmlog.Logger, -) (*BlockTagService, error) { - if l1Client == nil { - return nil, fmt.Errorf("L1 client is required") - } - if config.RollupAddress == (common.Address{}) { - return nil, fmt.Errorf("Rollup contract address is required") - } - - rollup, err := bindings.NewRollup(config.RollupAddress, l1Client) - if err != nil { - return nil, fmt.Errorf("failed to create rollup binding: %w", err) - } - - ctx, cancel := context.WithCancel(ctx) - - return &BlockTagService{ - ctx: ctx, - cancel: cancel, - l1Client: l1Client, - l2Client: l2Client, - rollup: rollup, - rollupAddress: config.RollupAddress, - safeConfirmations: config.SafeConfirmations, - pollInterval: config.PollInterval, - logger: logger.With("module", "blocktag"), - stop: make(chan struct{}), - }, nil -} - -// Start starts the BlockTagService -func (s *BlockTagService) Start() error { - s.logger.Info("Starting BlockTagService", - "safeConfirmations", s.safeConfirmations, - "pollInterval", s.pollInterval, - ) - - // Initialize by checking current L1 batch status - if err := s.initialize(); err != nil { - s.logger.Error("Failed to initialize BlockTagService", "error", err) - // Don't return error, let the service start and retry - } - - go s.loop() - return nil -} - -// Stop stops the BlockTagService -func (s *BlockTagService) Stop() { - s.logger.Info("Stopping BlockTagService") - s.cancel() - <-s.stop - s.logger.Info("BlockTagService stopped") -} - -// initialize initializes the service by checking current L1 batch status -func (s *BlockTagService) initialize() error { - s.logger.Info("Initializing BlockTagService") - return s.updateBlockTags() -} - -// loop is the main loop that polls L1 for batch status updates -func (s *BlockTagService) loop() { - defer close(s.stop) - - ticker := time.NewTicker(s.pollInterval) - defer ticker.Stop() - - for { - select { - case <-s.ctx.Done(): - return - case <-ticker.C: - if err := s.updateBlockTags(); err != nil { - s.logger.Error("Failed to update block tags", "error", err) - } - } - } -} - -// updateBlockTags updates the safe and finalized block tags based on L1 batch tx status -func (s *BlockTagService) updateBlockTags() error { - l2Head, err := s.l2Client.BlockNumber(s.ctx) - if err != nil { - return fmt.Errorf("failed to get L2 head: %w", err) - } - - var safeBlockNum uint64 - var safeBlockHash common.Hash - - // Update safe block - safeBlockNum, safeBlockHash, err = s.getL2BlockForTag(TagTypeSafe, l2Head) - if err != nil { - s.logger.Error("Failed to get safe L2 block", "error", err) - } else if safeBlockHash != (common.Hash{}) { - s.setSafeL2Block(safeBlockHash) - } - - // Update finalized block - finalizedBlockNum, finalizedBlockHash, err := s.getL2BlockForTag(TagTypeFinalized, l2Head) - if err != nil { - s.logger.Error("Failed to get finalized L2 block", "error", err) - } else if finalizedBlockHash != (common.Hash{}) { - // If finalized > safe, update safe to finalized (finalized is a stronger state) - if finalizedBlockNum > safeBlockNum { - safeBlockHash = finalizedBlockHash - s.setSafeL2Block(safeBlockHash) - } - s.setFinalizedL2Block(finalizedBlockHash) - } - - // Notify geth - if err := s.notifyGeth(); err != nil { - s.logger.Error("Failed to notify geth of block tags", "error", err) - } - - s.logger.Debug("Block tags updated", - "l2Head", l2Head, - "safeL2BlockHash", s.safeL2BlockHash.Hex(), - "finalizedL2BlockHash", s.finalizedL2BlockHash.Hex(), - ) - - return nil -} - -// getL2BlockForTag gets the L2 block number and hash based on the L1 block tag -// Also validates state root matches between L1 batch and L2 block -func (s *BlockTagService) getL2BlockForTag(tagType BlockTagType, l2Head uint64) (uint64, common.Hash, error) { - var l1BlockTag rpc.BlockNumber - - switch tagType { - case TagTypeSafe: - latestL1, err := s.l1Client.BlockNumber(s.ctx) - if err != nil { - return 0, common.Hash{}, fmt.Errorf("failed to get L1 latest block: %w", err) - } - if latestL1 <= s.safeConfirmations { - return 0, common.Hash{}, nil - } - l1BlockTag = rpc.BlockNumber(latestL1 - s.safeConfirmations) - - case TagTypeFinalized: - l1BlockTag = rpc.FinalizedBlockNumber - - default: - return 0, common.Hash{}, fmt.Errorf("unknown tag type: %d", tagType) - } - - // Query rollup contract at specified L1 block - lastCommittedBatchIndex, err := s.getLastCommittedBatchAtBlock(l1BlockTag) - if err != nil { - return 0, common.Hash{}, fmt.Errorf("failed to get last committed batch: %w", err) - } - if lastCommittedBatchIndex == 0 { - return 0, common.Hash{}, nil - } - - // Find the largest completed batch (lastL2Block <= l2Head) - // This works for both synced and syncing scenarios - targetBatchIndex, targetBatchLastBlockNum, err := s.findCompletedBatchForL2Block(tagType, l2Head, lastCommittedBatchIndex) - if err != nil { - s.logger.Debug("No completed batch found", "l2Head", l2Head, "error", err) - return 0, common.Hash{}, nil - } - - // Validate state root. - // Skip validation for already finalized batches, as their state roots may have been - // deleted from the L1 contract after finalization - lastFinalizedBatchIndex, err := s.rollup.LastFinalizedBatchIndex(nil) - if err != nil { - s.logger.Info("Failed to get last finalized batch index, skipping state root validation", "error", err) - return 0, common.Hash{}, nil - } - if targetBatchIndex < lastFinalizedBatchIndex.Uint64() { - // Batch data may have been deleted after finalization, cannot validate - // Return error so caller skips this batch and keeps previous safe/finalized value - // TODO: optimize this by using a different approach to get the state root - s.logger.Info("batch already finalized, state root may be deleted", - "batchIndex", targetBatchIndex, - "lastFinalized", lastFinalizedBatchIndex.Uint64()) - return 0, common.Hash{}, nil - } - if err := s.validateBatchStateRoot(targetBatchIndex, targetBatchLastBlockNum); err != nil { - s.logger.Error("State root validation failed", - "tagType", tagType, - "batchIndex", targetBatchIndex, - "l2Block", targetBatchLastBlockNum, - "error", err, - ) - return 0, common.Hash{}, err - } - - // Get L2 block header for hash - l2Header, err := s.l2Client.HeaderByNumber(s.ctx, big.NewInt(int64(targetBatchLastBlockNum))) - if err != nil { - return 0, common.Hash{}, fmt.Errorf("failed to get L2 block header: %w", err) - } - - l2BlockHash := l2Header.Hash() - - s.logger.Debug("Got L2 block for tag", - "tagType", tagType, - "l1BlockTag", l1BlockTag, - "batchIndex", targetBatchIndex, - "l2Block", targetBatchLastBlockNum, - "l2BlockHash", l2BlockHash.Hex(), - ) - - return targetBatchLastBlockNum, l2BlockHash, nil -} - -// validateBatchStateRoot validates that the state root of batch's lastL2Block matches L1 -func (s *BlockTagService) validateBatchStateRoot(batchIndex uint64, batchLastBlockNum uint64) error { - // Get L2 block header - l2Header, err := s.l2Client.HeaderByNumber(s.ctx, big.NewInt(int64(batchLastBlockNum))) - if err != nil { - return fmt.Errorf("failed to get L2 block header for block %d: %w", batchLastBlockNum, err) - } - - // Get state root from L1 committed batch - stateRoot, err := s.rollup.CommittedStateRoots(nil, big.NewInt(int64(batchIndex))) - if err != nil { - return fmt.Errorf("failed to get state root from L1: %w", err) - } - - // Compare state roots - l1StateRoot := common.BytesToHash(stateRoot[:]) - if l1StateRoot != l2Header.Root { - return fmt.Errorf("state root mismatch for batch %d: L1=%s, L2=%s", batchIndex, l1StateRoot.Hex(), l2Header.Root.Hex()) - } - - return nil -} - -// findCompletedBatchForL2Block finds the largest batch where lastL2Block <= l2BlockNum. -// Uses cached index for optimization: first call binary search, subsequent calls search forward. -// Separate caches for safe and finalized to avoid conflicts. -func (s *BlockTagService) findCompletedBatchForL2Block(tagType BlockTagType, l2HeaderNum uint64, lastCommittedBatchIndex uint64) (uint64, uint64, error) { - return s.findCompletedBatchForL2BlockWithDepth(tagType, l2HeaderNum, lastCommittedBatchIndex, 0) -} - -// findCompletedBatchForL2BlockWithDepth is the internal implementation with recursion depth limit. -// maxDepth is set to 1 to allow one retry after cache reset. -func (s *BlockTagService) findCompletedBatchForL2BlockWithDepth(tagType BlockTagType, l2HeaderNum uint64, lastCommittedBatchIndex uint64, depth int) (uint64, uint64, error) { - const maxDepth = 2 - - if lastCommittedBatchIndex == 0 { - return 0, 0, fmt.Errorf("no batches available") - } - - // Get cached index based on tag type - startIdx := s.getCachedBatchIndex(tagType) - if startIdx == 0 || startIdx > lastCommittedBatchIndex { - // First time or cache invalid: use binary search to find starting point - startIdx = s.binarySearchBatch(l2HeaderNum, lastCommittedBatchIndex) - if startIdx == 0 { - return 0, 0, fmt.Errorf("no completed batch found for L2 block %d", l2HeaderNum) - } - } - - // Search forward from startIdx - var resultIdx, resultLastL2Block uint64 - for idx := startIdx; idx <= lastCommittedBatchIndex; idx++ { - batchData, err := s.rollup.BatchDataStore(nil, big.NewInt(int64(idx))) - if err != nil { - return 0, 0, fmt.Errorf("failed to get batch data for index %d: %w", idx, err) - } - - lastL2Block := batchData.BlockNumber.Uint64() - if lastL2Block <= l2HeaderNum { - resultIdx = idx - resultLastL2Block = lastL2Block - s.setCachedBatchIndex(tagType, idx) - } else { - break - } - } - - // Handle L2 reorg: if cache was too new, reset and use binary search - if resultIdx == 0 { - if depth >= maxDepth { - return 0, 0, fmt.Errorf("no completed batch found for L2 block %d after retry", l2HeaderNum) - } - s.setCachedBatchIndex(tagType, 0) - return s.findCompletedBatchForL2BlockWithDepth(tagType, l2HeaderNum, lastCommittedBatchIndex, depth+1) - } - - return resultIdx, resultLastL2Block, nil -} - -func (s *BlockTagService) getCachedBatchIndex(tagType BlockTagType) uint64 { - if tagType == TagTypeSafe { - return s.lastKnownSafeBatchIndex - } - return s.lastKnownFinalizedBatchIndex -} - -func (s *BlockTagService) setCachedBatchIndex(tagType BlockTagType, idx uint64) { - if tagType == TagTypeSafe { - s.lastKnownSafeBatchIndex = idx - } else { - s.lastKnownFinalizedBatchIndex = idx - } -} - -// binarySearchBatch finds the largest batch index where lastL2BlockInBatch <= l2HeaderNum -func (s *BlockTagService) binarySearchBatch(l2HeaderNum uint64, maxBatchIndex uint64) uint64 { - low, high := uint64(1), maxBatchIndex - var result uint64 - - for low <= high { - mid := (low + high) / 2 - batchData, err := s.rollup.BatchDataStore(nil, big.NewInt(int64(mid))) - if err != nil { - return result // Return best result so far on error - } - - if batchData.BlockNumber.Uint64() <= l2HeaderNum { - result = mid - low = mid + 1 - } else { - high = mid - 1 - } - } - - return result -} - -// getLastCommittedBatchAtBlock queries the rollup contract at a specific L1 block -func (s *BlockTagService) getLastCommittedBatchAtBlock(l1BlockTag rpc.BlockNumber) (uint64, error) { - var blockNum *big.Int - if l1BlockTag == rpc.FinalizedBlockNumber { - blockNum = big.NewInt(int64(rpc.FinalizedBlockNumber)) - } else if l1BlockTag >= 0 { - blockNum = big.NewInt(int64(l1BlockTag)) - } - - lastCommitted, err := s.rollup.LastCommittedBatchIndex(&bind.CallOpts{ - BlockNumber: blockNum, - Context: s.ctx, - }) - if err != nil { - return 0, err - } - - return lastCommitted.Uint64(), nil -} - -// setSafeL2Block sets the safe L2 block hash -func (s *BlockTagService) setSafeL2Block(blockHash common.Hash) { - if blockHash != s.safeL2BlockHash { - s.safeL2BlockHash = blockHash - s.logger.Info("Updated safe L2 block", "hash", blockHash.Hex()) - } -} - -// setFinalizedL2Block sets the finalized L2 block hash -func (s *BlockTagService) setFinalizedL2Block(blockHash common.Hash) { - if blockHash != s.finalizedL2BlockHash { - s.finalizedL2BlockHash = blockHash - s.logger.Info("Updated finalized L2 block", "hash", blockHash.Hex()) - } -} - -// notifyGeth notifies geth of the new block tags via RPC -// Only calls RPC if there are changes since last notification -func (s *BlockTagService) notifyGeth() error { - safeBlockHash := s.safeL2BlockHash - finalizedBlockHash := s.finalizedL2BlockHash - - // Skip if no changes - if safeBlockHash == s.lastNotifiedSafeHash && finalizedBlockHash == s.lastNotifiedFinalizedHash { - return nil - } - - // Skip if both are empty - if safeBlockHash == (common.Hash{}) && finalizedBlockHash == (common.Hash{}) { - return nil - } - - if err := s.l2Client.SetBlockTags(s.ctx, safeBlockHash, finalizedBlockHash); err != nil { - return err - } - - // Update last notified hashes - s.lastNotifiedSafeHash = safeBlockHash - s.lastNotifiedFinalizedHash = finalizedBlockHash - return nil -} diff --git a/node/cmd/node/main.go b/node/cmd/node/main.go index 4b5d4fc3c..294665461 100644 --- a/node/cmd/node/main.go +++ b/node/cmd/node/main.go @@ -20,7 +20,6 @@ import ( "github.com/urfave/cli" "morph-l2/bindings/bindings" - "morph-l2/node/blocktag" node "morph-l2/node/core" "morph-l2/node/db" "morph-l2/node/derivation" @@ -49,21 +48,18 @@ func main() { func L2NodeMain(ctx *cli.Context) error { var ( - err error - executor *node.Executor - syncer *sync.Syncer - ms *mock.Sequencer - tmNode *tmnode.Node - dvNode *derivation.Derivation - blockTagSvc *blocktag.BlockTagService - tracker *l1sequencer.L1Tracker - verifier *l1sequencer.SequencerVerifier - signer l1sequencer.Signer - + err error + executor *node.Executor + syncer *sync.Syncer + ms *mock.Sequencer + tmNode *tmnode.Node + dvNode *derivation.Derivation + tracker *l1sequencer.L1Tracker + verifier *l1sequencer.SequencerVerifier + signer l1sequencer.Signer nodeConfig = node.DefaultConfig() ) isMockSequencer := ctx.GlobalBool(flags.MockEnabled.Name) - isValidator := ctx.GlobalBool(flags.ValidatorEnable.Name) // Apply consensus switch height if explicitly set via flag if ctx.GlobalIsSet(flags.ConsensusSwitchHeight.Name) { @@ -78,88 +74,82 @@ func L2NodeMain(ctx *cli.Context) error { return err } - if isValidator { - // configure store - dbConfig := db.DefaultConfig() - dbConfig.SetCliContext(ctx) - store, err := db.NewStore(dbConfig, home) - if err != nil { - return err - } - derivationCfg := derivation.DefaultConfig() - if err := derivationCfg.SetCliContext(ctx); err != nil { - return fmt.Errorf("derivation set cli context error: %v", err) - } - syncConfig := sync.DefaultConfig() - if err = syncConfig.SetCliContext(ctx); err != nil { - return err - } - syncer, err = sync.NewSyncer(context.Background(), store, syncConfig, nodeConfig.Logger) - if err != nil { - return fmt.Errorf("failed to create syncer, error: %v", err) - } - l1Client, err := ethclient.Dial(derivationCfg.L1.Addr) - if err != nil { - return fmt.Errorf("dial l1 node error:%v", err) - } - rollup, err := bindings.NewRollup(derivationCfg.RollupContractAddress, l1Client) - if err != nil { - return fmt.Errorf("NewRollup error:%v", err) - } - dvNode, err = derivation.NewDerivationClient(context.Background(), derivationCfg, syncer, store, rollup, nodeConfig.Logger) - if err != nil { - return fmt.Errorf("new derivation client error: %v", err) - } - dvNode.Start() - nodeConfig.Logger.Info("derivation node starting") - } else { - // ========== Create L1 Client ========== - l1RPC := ctx.GlobalString(flags.L1NodeAddr.Name) - l1Client, err := ethclient.Dial(l1RPC) - if err != nil { - return fmt.Errorf("failed to dial L1 node: %w", err) - } + // ========== Shared store + syncer (used by both executor and derivation) ========== + dbConfig := db.DefaultConfig() + dbConfig.SetCliContext(ctx) + store, err := db.NewStore(dbConfig, home) + if err != nil { + return err + } + syncConfig := sync.DefaultConfig() + if err = syncConfig.SetCliContext(ctx); err != nil { + return err + } + syncer, err = sync.NewSyncer(context.Background(), store, syncConfig, nodeConfig.Logger) + if err != nil { + return fmt.Errorf("failed to create syncer, error: %v", err) + } - tracker, verifier, signer, err = initL1SequencerComponents(ctx, l1Client, nodeConfig.Logger) - if err != nil { - return fmt.Errorf("failed to init L1 sequencer components: %w", err) - } + // ========== Derivation config + L1 client + rollup binding ========== + // All non-mock nodes self-verify against L1; the L1 client + rollup binding + // is shared by L1 sequencer components and derivation. + derivationCfg := derivation.DefaultConfig() + if err := derivationCfg.SetCliContext(ctx); err != nil { + return fmt.Errorf("derivation set cli context error: %v", err) + } + l1Client, err := ethclient.Dial(derivationCfg.L1.Addr) + if err != nil { + return fmt.Errorf("dial l1 node error: %v", err) + } + rollup, err := bindings.NewRollup(derivationCfg.RollupContractAddress, l1Client) + if err != nil { + return fmt.Errorf("NewRollup error: %v", err) + } + + tracker, verifier, signer, err = initL1SequencerComponents(ctx, l1Client, nodeConfig.Logger) + if err != nil { + return fmt.Errorf("failed to init L1 sequencer components: %w", err) + } + + // ========== Executor + sequencer / mock ========== + tmCfg, err := sequencer.LoadTmConfig(ctx, home) + if err != nil { + return err + } + tmVal := privval.LoadOrGenFilePV(tmCfg.PrivValidatorKeyFile(), tmCfg.PrivValidatorStateFile()) + pubKey, _ := tmVal.GetPubKey() - // ========== Launch Tendermint Node ========== - tmCfg, err := sequencer.LoadTmConfig(ctx, home) + // Reuse the shared syncer instance -- DevSequencer mode is the only path + // that pulls a syncer out of NewExecutor, so we hand back the same one + // rather than letting NewExecutor open a second store + syncer. + newSyncerFunc := func() (*sync.Syncer, error) { return syncer, nil } + executor, err = node.NewExecutor(newSyncerFunc, nodeConfig, pubKey) + if err != nil { + return err + } + if isMockSequencer { + ms, err = mock.NewSequencer(executor) if err != nil { return err } - tmVal := privval.LoadOrGenFilePV(tmCfg.PrivValidatorKeyFile(), tmCfg.PrivValidatorStateFile()) - pubKey, _ := tmVal.GetPubKey() - - newSyncerFunc := func() (*sync.Syncer, error) { return node.NewSyncer(ctx, home, nodeConfig) } - executor, err = node.NewExecutor(newSyncerFunc, nodeConfig, pubKey) + go ms.Start() + } else { + tmNode, err = sequencer.SetupNode(tmCfg, tmVal, executor, nodeConfig.Logger, verifier, signer) if err != nil { - return err + return fmt.Errorf("failed to setup consensus node: %v", err) } - if isMockSequencer { - ms, err = mock.NewSequencer(executor) - if err != nil { - return err - } - go ms.Start() - } else { - tmNode, err = sequencer.SetupNode(tmCfg, tmVal, executor, nodeConfig.Logger, verifier, signer) - if err != nil { - return fmt.Errorf("failed to setup consensus node: %v", err) - } - if err = tmNode.Start(); err != nil { - return fmt.Errorf("failed to start consensus node, error: %v", err) - } + if err = tmNode.Start(); err != nil { + return fmt.Errorf("failed to start consensus node, error: %v", err) } + } - // ========== Initialize BlockTagService ========== - blockTagSvc, err = initBlockTagService(ctx, l1Client, executor, nodeConfig.Logger) - if err != nil { - return fmt.Errorf("failed to init BlockTagService: %w", err) - } + // ========== Derivation (SPEC-005: self-verifies + drives safe/finalized tags) ========== + dvNode, err = derivation.NewDerivationClient(context.Background(), derivationCfg, syncer, store, rollup, nodeConfig.Logger) + if err != nil { + return fmt.Errorf("new derivation client error: %v", err) } + dvNode.Start() + nodeConfig.Logger.Info("derivation started") interruptChannel := make(chan os.Signal, 1) signal.Notify(interruptChannel, []os.Signal{ @@ -185,9 +175,6 @@ func L2NodeMain(ctx *cli.Context) error { if dvNode != nil { dvNode.Stop() } - if blockTagSvc != nil { - blockTagSvc.Stop() - } if tracker != nil { tracker.Stop() } @@ -259,31 +246,6 @@ func initL1SequencerComponents( return tracker, verifier, signer, nil } -// initBlockTagService initializes the block tag service -func initBlockTagService( - ctx *cli.Context, - l1Client *ethclient.Client, - executor *node.Executor, - logger tmlog.Logger, -) (*blocktag.BlockTagService, error) { - config := blocktag.DefaultConfig() - if err := config.SetCliContext(ctx); err != nil { - return nil, err - } - - svc, err := blocktag.NewBlockTagService(context.Background(), l1Client, executor.L2Client(), config, logger) - if err != nil { - return nil, err - } - - if err := svc.Start(); err != nil { - return nil, err - } - - logger.Info("BlockTagService started") - return svc, nil -} - func homeDir(ctx *cli.Context) (string, error) { home := ctx.GlobalString(flags.Home.Name) if home == "" { diff --git a/node/derivation/batch_info.go b/node/derivation/batch_info.go index 1e2261cad..90c319d16 100644 --- a/node/derivation/batch_info.go +++ b/node/derivation/batch_info.go @@ -62,7 +62,7 @@ type BatchInfo struct { // blobHashes is the ordered list of EIP-4844 blob versioned hashes // declared by the L1 commitBatch tx. Path B uses this to compare - // against locally-rebuilt versioned hashes (SPEC-005 §4). + // against locally-rebuilt versioned hashes (SPEC-005 section 4). blobHashes []common.Hash } @@ -82,7 +82,7 @@ func (bi *BatchInfo) TxNum() uint64 { return bi.txNum } -// ParseBatchMetadataOnly populates BatchInfo using only L1 calldata — +// ParseBatchMetadataOnly populates BatchInfo using only L1 calldata -- // it does NOT touch the blob sidecar and does NOT decode any transactions. // // Used by Path B (SPEC-005), which verifies the batch by rebuilding the diff --git a/node/derivation/config.go b/node/derivation/config.go index 81a581049..47d66cc13 100644 --- a/node/derivation/config.go +++ b/node/derivation/config.go @@ -30,13 +30,18 @@ const ( // DefaultLogProgressInterval is the frequency at which we log progress. DefaultLogProgressInterval = time.Second * 10 - // VerifyMode values (SPEC-005 §4.2). Selected at startup; not switchable + // VerifyMode values (SPEC-005 section 4.2). Selected at startup; not switchable // at runtime. Default is VerifyModePathA which preserves current behaviour. VerifyModePathA = "pathA" VerifyModePathB = "pathB" // DefaultVerifyMode is Path A (pull beacon blob, decode, derive, verify). DefaultVerifyMode = VerifyModePathA + + // DefaultFinalizerInterval is the polling cadence for the SPEC-005 section 4.7.4 + // finalizer subcomponent that walks L1 finalized -> Rollup.LastCommittedBatchIndex. + // 30s is roughly an L1 epoch; cheap relative to derivation's main poll loop. + DefaultFinalizerInterval = 30 * time.Second ) type Config struct { @@ -50,6 +55,7 @@ type Config struct { LogProgressInterval time.Duration `json:"log_progress_interval"` FetchBlockRange uint64 `json:"fetch_block_range"` VerifyMode string `json:"verify_mode"` + FinalizerInterval time.Duration `json:"finalizer_interval"` MetricsPort uint64 `json:"metrics_port"` MetricsHostname string `json:"metrics_hostname"` MetricsServerEnable bool `json:"metrics_server_enable"` @@ -64,6 +70,7 @@ func DefaultConfig() *Config { LogProgressInterval: DefaultLogProgressInterval, FetchBlockRange: DefaultFetchBlockRange, VerifyMode: DefaultVerifyMode, + FinalizerInterval: DefaultFinalizerInterval, L2: new(types.L2Config), } } @@ -133,6 +140,16 @@ func (c *Config) SetCliContext(ctx *cli.Context) error { c.VerifyMode, VerifyModePathA, VerifyModePathB) } + if ctx.GlobalIsSet(flags.DerivationFinalizerInterval.Name) { + c.FinalizerInterval = ctx.GlobalDuration(flags.DerivationFinalizerInterval.Name) + if c.FinalizerInterval <= 0 { + return errors.New("invalid finalizerInterval") + } + } + if c.FinalizerInterval == 0 { + c.FinalizerInterval = DefaultFinalizerInterval + } + l2EthAddr := ctx.GlobalString(flags.L2EthAddr.Name) l2EngineAddr := ctx.GlobalString(flags.L2EngineAddr.Name) fileName := ctx.GlobalString(flags.L2EngineJWTSecret.Name) diff --git a/node/derivation/derivation.go b/node/derivation/derivation.go index 17345dd63..330cc77f8 100644 --- a/node/derivation/derivation.go +++ b/node/derivation/derivation.go @@ -60,8 +60,13 @@ type Derivation struct { fetchBlockRange uint64 pollInterval time.Duration logProgressInterval time.Duration - verifyMode string // SPEC-005 §4.2: "pathA" (default) or "pathB"; bound at startup, never switches. - stop chan struct{} + verifyMode string // SPEC-005 section 4.2: "pathA" (default) or "pathB"; bound at startup, never switches. + finalizerInterval time.Duration + + tagAdvancer *tagAdvancer + finalizer *finalizer + + stop chan struct{} } type DeployContractBackend interface { @@ -116,6 +121,10 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, baseHttp := NewBasicHTTPClient(cfg.BeaconRpc, logger) l1BeaconClient := NewL1BeaconClient(baseHttp) + l2Client := types.NewRetryableClient(aClient, eClient, logger) + tagAdv := newTagAdvancer(l2Client, metrics, logger) + fin := newFinalizer(ctx, cfg.FinalizerInterval, l1Client, l2Client, rollup, tagAdv, logger) + return &Derivation{ ctx: ctx, db: db, @@ -128,7 +137,7 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, logger: logger, RollupContractAddress: cfg.RollupContractAddress, confirmations: cfg.L1.Confirmations, - l2Client: types.NewRetryableClient(aClient, eClient, logger), + l2Client: l2Client, cancel: cancel, stop: make(chan struct{}), startHeight: cfg.StartHeight, @@ -137,6 +146,9 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, pollInterval: cfg.PollInterval, logProgressInterval: cfg.LogProgressInterval, verifyMode: cfg.VerifyMode, + finalizerInterval: cfg.FinalizerInterval, + tagAdvancer: tagAdv, + finalizer: fin, metrics: metrics, l1BeaconClient: l1BeaconClient, L2ToL1MessagePasser: msgPasser, @@ -144,6 +156,11 @@ func NewDerivationClient(ctx context.Context, cfg *Config, syncer *sync.Syncer, } func (d *Derivation) Start() { + // finalizer subcomponent -- SPEC-005 section 4.7.4. Runs in its own goroutine so + // L1-finalized polling does not block the derivation main loop's batch + // verification cadence. + go d.finalizer.run() + // block node startup during initial sync and print some helpful logs go func() { d.syncer.Start() @@ -177,6 +194,9 @@ func (d *Derivation) Stop() { d.cancel() } <-d.stop + if d.finalizer != nil { + <-d.finalizer.stopped // join finalizer per SPEC-005 section 4.7.4 lifecycle contract + } d.logger.Info("derivation service is stopped") } @@ -274,6 +294,9 @@ func (d *Derivation) derivationBlock(ctx context.Context) { } d.metrics.SetBatchStatus(stateNormal) d.metrics.SetL1SyncHeight(lg.BlockNumber) + + // SPEC-005 section 4.7.3: a verified batch (Path A or Path B) advances safe. + d.tagAdvancer.advanceSafe(d.ctx, batchInfo.batchIndex, lastHeader) } d.db.WriteLatestDerivationL1Height(end) diff --git a/node/derivation/finalizer.go b/node/derivation/finalizer.go new file mode 100644 index 000000000..d2dc42841 --- /dev/null +++ b/node/derivation/finalizer.go @@ -0,0 +1,161 @@ +package derivation + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/morph-l2/go-ethereum/accounts/abi/bind" + "github.com/morph-l2/go-ethereum/ethclient" + "github.com/morph-l2/go-ethereum/rpc" + tmlog "github.com/tendermint/tendermint/libs/log" + + "morph-l2/bindings/bindings" + "morph-l2/node/types" +) + +// finalizer is the SPEC-005 section 4.7.4 finalized-head subcomponent. It runs as an +// in-process goroutine inside Derivation (not a standalone service): each +// tick it reads L1 finalized -> Rollup.LastCommittedBatchIndex(@finalized), +// takes min with the highest verified batch index recorded by tagAdvancer, +// resolves the corresponding L2 last-block, and forwards to +// tagAdvancer.advanceFinalized. +// +// Cheap relative to derivation main loop: one L1 header + one contract call +// per tick (default 30s). +type finalizer struct { + ctx context.Context + interval time.Duration + logger tmlog.Logger + + l1Client *ethclient.Client + l2Client *types.RetryableClient + rollup *bindings.Rollup + tagAdvancer *tagAdvancer + + stopped chan struct{} +} + +func newFinalizer( + ctx context.Context, + interval time.Duration, + l1Client *ethclient.Client, + l2Client *types.RetryableClient, + rollup *bindings.Rollup, + tagAdv *tagAdvancer, + logger tmlog.Logger, +) *finalizer { + return &finalizer{ + ctx: ctx, + interval: interval, + l1Client: l1Client, + l2Client: l2Client, + rollup: rollup, + tagAdvancer: tagAdv, + logger: logger.With("component", "finalizer"), + stopped: make(chan struct{}), + } +} + +func (f *finalizer) run() { + defer close(f.stopped) + + t := time.NewTicker(f.interval) + defer t.Stop() + + // Run once immediately so the first tag flush doesn't wait a full + // interval after startup; matches blocktag's `initialize()` behaviour. + f.tick() + + for { + select { + case <-f.ctx.Done(): + return + case <-t.C: + f.tick() + } + } +} + +func (f *finalizer) tick() { + // 1. Resolve the L1 finalized header. + finHeader, err := f.l1Client.HeaderByNumber(f.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + f.logger.Info("finalizer: read L1 finalized header failed", "err", err) + return + } + if finHeader == nil { + return + } + + // 2. Query Rollup.LastCommittedBatchIndex pinned at that L1 block. + maxCommittedAtFin, err := f.rollup.LastCommittedBatchIndex(&bind.CallOpts{ + BlockNumber: finHeader.Number, + Context: f.ctx, + }) + if err != nil { + f.logger.Info("finalizer: query LastCommittedBatchIndex@finalized failed", + "l1Block", finHeader.Number.Uint64(), "err", err) + return + } + if maxCommittedAtFin == nil { + return + } + + // 3. Take min with the highest verified batch index recorded by tagAdvancer. + verifiedMax := f.tagAdvancer.SafeMaxBatchIndex() + if verifiedMax == 0 { + // derivation hasn't yet verified any batch this run; nothing to finalize. + return + } + candidate := maxCommittedAtFin.Uint64() + if verifiedMax < candidate { + candidate = verifiedMax + } + if candidate == 0 { + return + } + + // 4. Resolve candidate batch's lastL2Block, then fetch the L2 header. + lastL2Block, err := f.lookupBatchLastL2Block(candidate) + if err != nil { + f.logger.Info("finalizer: lookup batch lastL2Block failed", + "batchIndex", candidate, "err", err) + return + } + // Defensive: a zero BlockNumber means the contract slot is uninitialised + // (BatchDataStore returned the zero value). Advancing finalized to genesis + // would pass the monotonicity check on first call and produce a confusing + // "finalized at block 0" tag -- skip and retry on next tick. + if lastL2Block == 0 { + f.logger.Info("finalizer: batch has zero lastL2Block; skipping", + "batchIndex", candidate) + return + } + header, err := f.l2Client.HeaderByNumber(f.ctx, big.NewInt(int64(lastL2Block))) + if err != nil { + f.logger.Info("finalizer: read L2 header failed", + "batchIndex", candidate, "l2Block", lastL2Block, "err", err) + return + } + if header == nil { + return + } + + f.tagAdvancer.advanceFinalized(f.ctx, candidate, header) +} + +// lookupBatchLastL2Block resolves a batch index to its lastL2Block via the +// rollup contract's BatchDataStore mapping (already populated for any +// committed batch). This is the same data source blocktag.service used. +func (f *finalizer) lookupBatchLastL2Block(batchIndex uint64) (uint64, error) { + bd, err := f.rollup.BatchDataStore(&bind.CallOpts{Context: f.ctx}, new(big.Int).SetUint64(batchIndex)) + if err != nil { + return 0, err + } + if bd.BlockNumber == nil { + return 0, fmt.Errorf("batch %d has nil BlockNumber in BatchDataStore", batchIndex) + } + return bd.BlockNumber.Uint64(), nil +} diff --git a/node/derivation/metrics.go b/node/derivation/metrics.go index cd5c4c8db..237341358 100644 --- a/node/derivation/metrics.go +++ b/node/derivation/metrics.go @@ -25,11 +25,20 @@ type Metrics struct { LatestBatchIndex metrics.Gauge SyncedBatchIndex metrics.Gauge - // SPEC-005 §4.6 Path B counters. PathBTriggered increments once per batch + // SPEC-005 section 4.6 Path B counters. PathBTriggered increments once per batch // processed under VerifyModePathB; PathBFailed increments on local-block // missing / encoding error / versioned hash mismatch. PathBTriggered metrics.Counter PathBFailed metrics.Counter + + // SPEC-005 section 4.7 Tag management metrics. Replace the (previously absent) + // blocktag instrumentation; on-call alerts should now key off these. + SafeAdvanceTotal metrics.Counter + FinalizedAdvanceTotal metrics.Counter + SafeL2BlockNumber metrics.Gauge + FinalizedL2BlockNumber metrics.Gauge + L1ReorgResetTotal metrics.Counter + TagInvariantViolationTotal metrics.Counter } func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { @@ -86,6 +95,42 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "path_b_failed_total", Help: "Path B failures: local block missing, encoding error, or versioned hash mismatch.", }, labels).With(labelsAndValues...), + SafeAdvanceTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "safe_advance_total", + Help: "Times derivation advanced the safe L2 head after a verified batch.", + }, labels).With(labelsAndValues...), + FinalizedAdvanceTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "finalized_advance_total", + Help: "Times the finalizer advanced the finalized L2 head from L1 finalized state.", + }, labels).With(labelsAndValues...), + SafeL2BlockNumber: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "safe_l2_block_number", + Help: "Current in-memory safe L2 block number (mirror of derivation tag advancer).", + }, labels).With(labelsAndValues...), + FinalizedL2BlockNumber: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "finalized_l2_block_number", + Help: "Current in-memory finalized L2 block number (mirror of derivation tag advancer).", + }, labels).With(labelsAndValues...), + L1ReorgResetTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "l1_reorg_reset_total", + Help: "Times an L1 reorg triggered a tag advancer reset (safe cleared, refilled by re-derivation).", + }, labels).With(labelsAndValues...), + TagInvariantViolationTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: metricsSubsystem, + Name: "tag_invariant_violation_total", + Help: "Times the finalized <= safe <= unsafe invariant failed; SetBlockTags is skipped on each occurrence.", + }, labels).With(labelsAndValues...), } } @@ -121,6 +166,30 @@ func (m *Metrics) IncPathBFailed() { m.PathBFailed.Add(1) } +func (m *Metrics) IncSafeAdvance() { + m.SafeAdvanceTotal.Add(1) +} + +func (m *Metrics) IncFinalizedAdvance() { + m.FinalizedAdvanceTotal.Add(1) +} + +func (m *Metrics) SetSafeL2BlockNumber(n uint64) { + m.SafeL2BlockNumber.Set(float64(n)) +} + +func (m *Metrics) SetFinalizedL2BlockNumber(n uint64) { + m.FinalizedL2BlockNumber.Set(float64(n)) +} + +func (m *Metrics) IncL1ReorgReset() { + m.L1ReorgResetTotal.Add(1) +} + +func (m *Metrics) IncTagInvariantViolation() { + m.TagInvariantViolationTotal.Add(1) +} + func (m *Metrics) Serve(hostname string, port uint64) (*http.Server, error) { mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) diff --git a/node/derivation/static_scan_test.go b/node/derivation/static_scan_test.go new file mode 100644 index 000000000..52b88fc7b --- /dev/null +++ b/node/derivation/static_scan_test.go @@ -0,0 +1,144 @@ +package derivation + +import ( + "io/fs" + "os" + "path/filepath" + "strings" + "testing" +) + +// SPEC-005 section 5.1 static-assertion tests. These guard against regressions where +// someone accidentally re-introduces validator/blocktag references or pulls +// the wrong common package after a refactor. + +// walkNodeRepoSourceFiles walks up from this test file to the morph repo +// root (parent of node/) and yields every .go source file under node/ +// (excluding test files and vendored code). +func walkNodeRepoSourceFiles(t *testing.T) (string, []string) { + t.Helper() + + wd, err := os.Getwd() // .../morph/node/derivation + if err != nil { + t.Fatalf("getwd: %v", err) + } + nodeRoot := filepath.Dir(wd) // .../morph/node + + var files []string + err = filepath.WalkDir(nodeRoot, func(path string, d fs.DirEntry, e error) error { + if e != nil { + return e + } + if d.IsDir() { + // Skip vendored / test-fixtures dirs if any; nothing matches today + // but cheap to keep the door closed. + name := d.Name() + if name == "node_modules" || name == "vendor" || name == "ops-morph" { + return filepath.SkipDir + } + return nil + } + if !strings.HasSuffix(path, ".go") { + return nil + } + if strings.HasSuffix(path, "_test.go") { + return nil + } + files = append(files, path) + return nil + }) + if err != nil { + t.Fatalf("walk node tree: %v", err) + } + return nodeRoot, files +} + +func TestNoValidatorReferences(t *testing.T) { + _, files := walkNodeRepoSourceFiles(t) + + // Symbols that the SPEC-005 validator-role removal must keep out of node/. + // We are specifically guarding against accidental re-introduction; the + // patterns are narrow on purpose so legitimate uses (e.g., Tendermint + // consensus validator pubkeys) don't false-positive. + banned := []string{ + "node/validator", // import path + "validator.NewValidator", // factory call + "validator.NewConfig", // config call + "flags.ValidatorEnable", // role flag + "validator.challengeEnable", // legacy flag string + "validator.privateKey", // legacy flag string + "VALIDATOR_PRIVATE_KEY", // legacy envvar + "VALIDATOR_CHALLENGE_ENABLE", // legacy envvar + // We deliberately do NOT ban "ChallengeEnable" / "ChallengeState" + // in source -- they appear in the Rollup contract ABI string in + // node/types/batch.go and are immutable on-chain identifiers we + // must keep in sync with. The node-side challenge bypass that + // SPEC-005 removes is keyed by validator.* flags above, which + // uniquely identify the deleted code paths. + } + + for _, f := range files { + b, err := os.ReadFile(f) + if err != nil { + t.Fatalf("read %s: %v", f, err) + } + body := string(b) + for _, p := range banned { + if strings.Contains(body, p) { + t.Errorf("validator residue: %q found in %s", p, f) + } + } + } +} + +func TestNoBlocktagReferences(t *testing.T) { + _, files := walkNodeRepoSourceFiles(t) + + banned := []string{ + "node/blocktag", // import path + "BlockTagService", // service type + "NewBlockTagService", // factory + "BlockTagSafeConfirmations", // flag symbol + "BLOCKTAG_SAFE_CONFIRMATIONS", // envvar + "blocktag.safeConfirmations", // flag name string + "blocktag.DefaultConfig", // config factory + } + + for _, f := range files { + b, err := os.ReadFile(f) + if err != nil { + t.Fatalf("read %s: %v", f, err) + } + body := string(b) + for _, p := range banned { + if strings.Contains(body, p) { + t.Errorf("blocktag residue: %q found in %s", p, f) + } + } + } +} + +// TestPathBUsesCommonBlobPackage guards SPEC-005 section 3.4: Path B must use +// `common/blob` helpers (the same set tx-submitter calls), not the duplicate +// implementations under `common/batch/blob.go`. Codec drift between the two +// would cause permanent versioned hash mismatches. +func TestPathBUsesCommonBlobPackage(t *testing.T) { + body, err := os.ReadFile("verify_path_b.go") + if err != nil { + t.Fatalf("read verify_path_b.go: %v", err) + } + src := string(body) + + if !strings.Contains(src, `"morph-l2/common/blob"`) { + t.Fatalf("verify_path_b.go must import morph-l2/common/blob") + } + // Sanity check the actual call sites -- import is necessary but not + // sufficient; mismatched calls (e.g., commonbatch.CompressBatchBytes) + // would still drift codecs. + required := []string{"commonblob.CompressBatchBytes", "commonblob.MakeBlobTxSidecar"} + for _, sym := range required { + if !strings.Contains(src, sym) { + t.Errorf("verify_path_b.go missing required call %q", sym) + } + } +} diff --git a/node/derivation/tag_advance.go b/node/derivation/tag_advance.go new file mode 100644 index 000000000..69f0b7402 --- /dev/null +++ b/node/derivation/tag_advance.go @@ -0,0 +1,185 @@ +package derivation + +import ( + "context" + "sync" + + "github.com/morph-l2/go-ethereum/common" + eth "github.com/morph-l2/go-ethereum/core/types" + tmlog "github.com/tendermint/tendermint/libs/log" +) + +// tagL2Client narrows the dependency on types.RetryableClient to the two +// methods the tag advancer actually calls. Keeping this local makes +// tagAdvancer trivially mockable from tests without dragging in an authclient +// stack. +type tagL2Client interface { + BlockNumber(ctx context.Context) (uint64, error) + SetBlockTags(ctx context.Context, safe common.Hash, finalized common.Hash) error +} + +// tagAdvancer is the SPEC-005 section 4.7 single source of truth for safe and +// finalized L2 head propagation. It replaces the previous standalone +// polling service: derivation main loop drives `advanceSafe` per +// verified batch; the in-process finalizer subcomponent drives +// `advanceFinalized`. Both paths converge on `flushTags` which enforces the +// `finalized <= safe <= unsafe` invariant before calling the existing +// `RetryableClient.SetBlockTags` engine RPC. +// +// In-memory only by design: SPEC-005 section 4.7.7 -- restart starts from zero and +// derivation refills naturally as it walks its cursor. +type tagAdvancer struct { + mu sync.Mutex + + l2Client tagL2Client + metrics *Metrics + logger tmlog.Logger + + // safe head -- last verified batch's lastL2Block. + safeL2Hash common.Hash + safeL2Number uint64 + safeMaxBatchIndex uint64 + + // finalized head -- L1 finalized derived verified batch's lastL2Block. + finalizedL2Hash common.Hash + finalizedL2Number uint64 + + // Suppress redundant SetBlockTags RPCs (mirrors blocktag's + // lastNotifiedSafeHash / lastNotifiedFinalizedHash semantics). + lastNotifiedSafe common.Hash + lastNotifiedFinalized common.Hash +} + +func newTagAdvancer(l2Client tagL2Client, metrics *Metrics, logger tmlog.Logger) *tagAdvancer { + return &tagAdvancer{ + l2Client: l2Client, + metrics: metrics, + logger: logger.With("component", "tag-advancer"), + } +} + +// advanceSafe is called by the derivation main loop after a batch passes both +// content verification (Path A or Path B) and verifyBatchRoots. It records the +// new safe head and flushes via SetBlockTags. +func (t *tagAdvancer) advanceSafe(ctx context.Context, batchIndex uint64, lastHeader *eth.Header) { + if lastHeader == nil { + return + } + t.mu.Lock() + t.safeL2Hash = lastHeader.Hash() + t.safeL2Number = lastHeader.Number.Uint64() + if batchIndex > t.safeMaxBatchIndex { + t.safeMaxBatchIndex = batchIndex + } + t.metrics.IncSafeAdvance() + t.metrics.SetSafeL2BlockNumber(t.safeL2Number) + t.mu.Unlock() + + t.flushTags(ctx) +} + +// advanceFinalized is called by the finalizer subcomponent each tick if the +// L1 finalized block produces a new finalized batch <= safeMaxBatchIndex. +// finalized never moves backwards; if a lower number is provided we log and +// keep the previous value (SPEC-005 section 4.7.4 monotonicity check). +func (t *tagAdvancer) advanceFinalized(ctx context.Context, batchIndex uint64, lastHeader *eth.Header) { + if lastHeader == nil { + return + } + t.mu.Lock() + newNumber := lastHeader.Number.Uint64() + if t.finalizedL2Number != 0 && newNumber < t.finalizedL2Number { + t.logger.Error("finalized monotonicity violated; ignoring", + "prev", t.finalizedL2Number, "next", newNumber) + t.mu.Unlock() + return + } + if newNumber == t.finalizedL2Number && lastHeader.Hash() == t.finalizedL2Hash { + t.mu.Unlock() + return + } + t.finalizedL2Hash = lastHeader.Hash() + t.finalizedL2Number = newNumber + t.metrics.IncFinalizedAdvance() + t.metrics.SetFinalizedL2BlockNumber(t.finalizedL2Number) + t.mu.Unlock() + + _ = batchIndex // currently logged by the finalizer; reserved for future telemetry + t.flushTags(ctx) +} + +// SafeMaxBatchIndex returns the highest verified batch index recorded so far, +// for the finalizer to take min(L1 finalized batch, safe). +func (t *tagAdvancer) SafeMaxBatchIndex() uint64 { + t.mu.Lock() + defer t.mu.Unlock() + return t.safeMaxBatchIndex +} + +// reset clears safe head when the derivation main loop detects an L1 reorg +// and rewinds its cursor. finalized is intentionally NOT reset -- see +// SPEC-005 section 4.7.6: L1 finalized is assumed monotonic, and finalizer.tick will +// re-evaluate on the next iteration. +func (t *tagAdvancer) reset(toBatchIndex uint64) { + t.mu.Lock() + defer t.mu.Unlock() + + t.safeL2Hash = common.Hash{} + t.safeL2Number = 0 + t.safeMaxBatchIndex = toBatchIndex + t.lastNotifiedSafe = common.Hash{} + t.metrics.IncL1ReorgReset() + t.metrics.SetSafeL2BlockNumber(0) + t.logger.Info("tag advancer reset on L1 reorg", "to_batch_index", toBatchIndex) +} + +// flushTags enforces the finalized <= safe <= unsafe invariant and calls +// SetBlockTags exactly once per state change. On invariant violation we log +// error and skip -- no panic, no halt -- matching op-node's +// tryUpdateEngineInternal behaviour. +func (t *tagAdvancer) flushTags(ctx context.Context) { + unsafeNum, err := t.l2Client.BlockNumber(ctx) + if err != nil { + t.logger.Info("flushTags: read L2 latest failed", "err", err) + return + } + + t.mu.Lock() + safeHash := t.safeL2Hash + safeNum := t.safeL2Number + finalizedHash := t.finalizedL2Hash + finalizedNum := t.finalizedL2Number + notifiedSafe := t.lastNotifiedSafe + notifiedFinalized := t.lastNotifiedFinalized + t.mu.Unlock() + + if finalizedNum > safeNum { + t.metrics.IncTagInvariantViolation() + t.logger.Error("invariant violation: finalized > safe", + "finalized", finalizedNum, "safe", safeNum) + return + } + if safeNum > unsafeNum { + t.metrics.IncTagInvariantViolation() + t.logger.Error("invariant violation: safe > unsafe", + "safe", safeNum, "unsafe", unsafeNum) + return + } + + if safeHash == notifiedSafe && finalizedHash == notifiedFinalized { + return + } + if safeHash == (common.Hash{}) && finalizedHash == (common.Hash{}) { + return + } + + if err := t.l2Client.SetBlockTags(ctx, safeHash, finalizedHash); err != nil { + t.logger.Error("SetBlockTags failed", "err", err) + return + } + + t.mu.Lock() + t.lastNotifiedSafe = safeHash + t.lastNotifiedFinalized = finalizedHash + t.mu.Unlock() +} diff --git a/node/derivation/tag_advance_test.go b/node/derivation/tag_advance_test.go new file mode 100644 index 000000000..e19ddb131 --- /dev/null +++ b/node/derivation/tag_advance_test.go @@ -0,0 +1,186 @@ +package derivation + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/go-kit/kit/metrics/discard" + "github.com/morph-l2/go-ethereum/common" + eth "github.com/morph-l2/go-ethereum/core/types" + tmlog "github.com/tendermint/tendermint/libs/log" +) + +// fakeTagL2Client implements tagL2Client for unit tests. It records each +// SetBlockTags call so tests can assert on call count and arguments, and +// lets the test set the unsafe upper bound returned by BlockNumber. +type fakeTagL2Client struct { + unsafe uint64 + blockNumberErr error + calls []setBlockTagsCall + setErr error +} + +type setBlockTagsCall struct { + safe common.Hash + finalized common.Hash +} + +func (f *fakeTagL2Client) BlockNumber(_ context.Context) (uint64, error) { + if f.blockNumberErr != nil { + return 0, f.blockNumberErr + } + return f.unsafe, nil +} + +func (f *fakeTagL2Client) SetBlockTags(_ context.Context, safe common.Hash, finalized common.Hash) error { + if f.setErr != nil { + return f.setErr + } + f.calls = append(f.calls, setBlockTagsCall{safe: safe, finalized: finalized}) + return nil +} + +// newDiscardMetrics returns a *Metrics whose collectors discard all updates. +// Avoids prometheus default-registry double-registration across multiple +// tests in the same process. +func newDiscardMetrics() *Metrics { + return &Metrics{ + L1SyncHeight: discard.NewGauge(), + RollupL2Height: discard.NewGauge(), + DeriveL2Height: discard.NewGauge(), + BatchStatus: discard.NewGauge(), + LatestBatchIndex: discard.NewGauge(), + SyncedBatchIndex: discard.NewGauge(), + PathBTriggered: discard.NewCounter(), + PathBFailed: discard.NewCounter(), + SafeAdvanceTotal: discard.NewCounter(), + FinalizedAdvanceTotal: discard.NewCounter(), + SafeL2BlockNumber: discard.NewGauge(), + FinalizedL2BlockNumber: discard.NewGauge(), + L1ReorgResetTotal: discard.NewCounter(), + TagInvariantViolationTotal: discard.NewCounter(), + } +} + +func newTestTagAdvancer(t *testing.T, unsafe uint64) (*tagAdvancer, *fakeTagL2Client, *Metrics) { + t.Helper() + fake := &fakeTagL2Client{unsafe: unsafe} + m := newDiscardMetrics() + logger := tmlog.NewNopLogger() + return newTagAdvancer(fake, m, logger), fake, m +} + +func headerAt(num uint64, mark byte) *eth.Header { + h := ð.Header{Number: new(big.Int).SetUint64(num)} + // Mutate ParentHash so different "mark" values produce different block + // hashes -- header.Hash() mixes everything. + h.ParentHash = common.BytesToHash([]byte{mark, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + return h +} + +func TestTagAdvance_Safe_CallsSetBlockTags(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 100) + h := headerAt(50, 'a') + + tagAdv.advanceSafe(context.Background(), 7, h) + + if len(fake.calls) != 1 { + t.Fatalf("expected 1 SetBlockTags call, got %d", len(fake.calls)) + } + if fake.calls[0].safe != h.Hash() { + t.Fatalf("safe hash mismatch") + } + if tagAdv.SafeMaxBatchIndex() != 7 { + t.Fatalf("safeMaxBatchIndex got %d, want 7", tagAdv.SafeMaxBatchIndex()) + } +} + +func TestTagAdvance_DedupSetBlockTags(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 100) + h := headerAt(50, 'a') + + tagAdv.advanceSafe(context.Background(), 7, h) + tagAdv.advanceSafe(context.Background(), 7, h) // identical state + + if len(fake.calls) != 1 { + t.Fatalf("expected dedup to suppress 2nd call; got %d total", len(fake.calls)) + } +} + +func TestTagAdvance_InvariantSafeGtUnsafe_Skips(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 30) // unsafe = 30 + h := headerAt(50, 'a') // safe wants 50 -- invalid + + tagAdv.advanceSafe(context.Background(), 7, h) + + if len(fake.calls) != 0 { + t.Fatalf("expected SetBlockTags skipped on invariant violation, got %d calls", len(fake.calls)) + } +} + +func TestTagAdvance_InvariantFinalizedGtSafe_Skips(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 200) + + // safe at 50, finalized would be 80 -> finalized > safe. + tagAdv.advanceSafe(context.Background(), 5, headerAt(50, 'a')) + // reset the call recorder so we only inspect the finalized call. + fake.calls = nil + + tagAdv.advanceFinalized(context.Background(), 6, headerAt(80, 'b')) + + if len(fake.calls) != 0 { + t.Fatalf("expected SetBlockTags skipped on finalized > safe; got %d calls", len(fake.calls)) + } +} + +func TestTagAdvance_FinalizedMonotonic(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 200) + tagAdv.advanceSafe(context.Background(), 10, headerAt(120, 'a')) + fake.calls = nil + + tagAdv.advanceFinalized(context.Background(), 8, headerAt(100, 'b')) + if got := tagAdv.finalizedL2Number; got != 100 { + t.Fatalf("finalized first advance: got %d, want 100", got) + } + + // Second advance with smaller number should be ignored. + prevHash := tagAdv.finalizedL2Hash + tagAdv.advanceFinalized(context.Background(), 7, headerAt(80, 'c')) + if tagAdv.finalizedL2Number != 100 || tagAdv.finalizedL2Hash != prevHash { + t.Fatalf("finalized regressed: number=%d, hash unchanged=%v", + tagAdv.finalizedL2Number, tagAdv.finalizedL2Hash == prevHash) + } +} + +func TestTagAdvance_L1ReorgReset(t *testing.T) { + tagAdv, _, _ := newTestTagAdvancer(t, 200) + tagAdv.advanceSafe(context.Background(), 10, headerAt(120, 'a')) + + tagAdv.reset(8) + + if tagAdv.safeL2Number != 0 { + t.Fatalf("safeL2Number not cleared after reset: got %d", tagAdv.safeL2Number) + } + if tagAdv.safeL2Hash != (common.Hash{}) { + t.Fatalf("safeL2Hash not cleared after reset") + } + if got := tagAdv.SafeMaxBatchIndex(); got != 8 { + t.Fatalf("safeMaxBatchIndex after reset: got %d, want 8", got) + } + if tagAdv.lastNotifiedSafe != (common.Hash{}) { + t.Fatalf("lastNotifiedSafe not cleared after reset") + } +} + +func TestTagAdvance_BlockNumberError_SkipsFlush(t *testing.T) { + tagAdv, fake, _ := newTestTagAdvancer(t, 100) + fake.blockNumberErr = errors.New("rpc down") + + tagAdv.advanceSafe(context.Background(), 7, headerAt(50, 'a')) + + if len(fake.calls) != 0 { + t.Fatalf("expected SetBlockTags skipped when BlockNumber fails; got %d", len(fake.calls)) + } +} diff --git a/node/derivation/verify.go b/node/derivation/verify.go index c69b1b345..af23bc77d 100644 --- a/node/derivation/verify.go +++ b/node/derivation/verify.go @@ -12,7 +12,7 @@ import ( // verifyBatchRoots verifies the local state root and withdrawal root against the // values recorded in the L1 commit batch tx calldata. // -// SPEC-005 §3.4 invariant: this check is independent of blob data — both +// SPEC-005 section 3.4 invariant: this check is independent of blob data -- both // batchInfo.root (postStateRoot) and batchInfo.withdrawalRoot are extracted // from L1 calldata at parse time, so this function runs identically under // Path A (online beacon blob) and Path B (local-rebuild) verification modes. diff --git a/node/derivation/verify_path_b.go b/node/derivation/verify_path_b.go index 99f154d05..f4dca1b9a 100644 --- a/node/derivation/verify_path_b.go +++ b/node/derivation/verify_path_b.go @@ -13,7 +13,7 @@ import ( commonblob "morph-l2/common/blob" ) -// SPEC-005 §4 Path B: blob-independent batch content verification. +// SPEC-005 section 4 Path B: blob-independent batch content verification. // // In VerifyModePathB the node does not pull blobs from the beacon chain. // Instead it reads the L2 blocks in the batch range from local storage, @@ -30,7 +30,7 @@ import ( // must restart with the alternate mode. // fetchBatchInfoPathB pulls the L1 commitBatch tx, decodes its calldata, and -// populates a BatchInfo using only the calldata + tx blob hashes — no beacon +// populates a BatchInfo using only the calldata + tx blob hashes -- no beacon // blob fetch. Returned BatchInfo is sufficient for verifyBatchContentPathB // and verifyBatchRoots. func (d *Derivation) fetchBatchInfoPathB(ctx context.Context, txHash common.Hash, blockNumber uint64) (*BatchInfo, error) { diff --git a/node/flags/flags.go b/node/flags/flags.go index 2ac941b6c..b3f63e015 100644 --- a/node/flags/flags.go +++ b/node/flags/flags.go @@ -162,12 +162,6 @@ var ( EnvVar: prefixEnvVar("MOCK_SEQUENCER"), } - ValidatorEnable = cli.BoolFlag{ - Name: "validator", - Usage: "Enable the validator mode", - EnvVar: prefixEnvVar("VALIDATOR"), - } - // derivation RollupContractAddress = cli.StringFlag{ Name: "derivation.rollupAddress", @@ -205,14 +199,6 @@ var ( EnvVar: prefixEnvVar("DERIVATION_FETCH_BLOCK_RANGE"), } - // BlockTag options - BlockTagSafeConfirmations = cli.Uint64Flag{ - Name: "blocktag.safeConfirmations", - Usage: "Number of L1 blocks to wait before considering a batch as safe", - EnvVar: prefixEnvVar("BLOCKTAG_SAFE_CONFIRMATIONS"), - Value: 10, - } - // L1 Sequencer options L1SequencerContractAddr = cli.StringFlag{ Name: "l1.sequencerContract", @@ -259,6 +245,13 @@ var ( EnvVar: prefixEnvVar("DERIVATION_VERIFY_MODE"), Value: "pathA", } + + DerivationFinalizerInterval = cli.DurationFlag{ + Name: "derivation.finalizer-interval", + Usage: "Polling cadence for the SPEC-005 finalizer subcomponent (queries L1 finalized -> Rollup.LastCommittedBatchIndex). Default 30s.", + EnvVar: prefixEnvVar("DERIVATION_FINALIZER_INTERVAL"), + Value: 30 * time.Second, + } // Logger LogLevel = &cli.StringFlag{ Name: "log.level", @@ -344,7 +337,6 @@ var Flags = []cli.Flag{ DevSequencer, TendermintConfigPath, MockEnabled, - ValidatorEnable, // derivation RollupContractAddress, @@ -355,11 +347,9 @@ var Flags = []cli.Flag{ DerivationFetchBlockRange, DerivationConfirmations, DerivationVerifyMode, + DerivationFinalizerInterval, L1BeaconAddr, - // blocktag options - BlockTagSafeConfirmations, - // L1 Sequencer options L1SequencerContractAddr, L1SyncLagThreshold, diff --git a/node/ops-morph/docker-compose-validator.yml b/node/ops-morph/docker-compose-validator.yml deleted file mode 100644 index 0b0bc4d63..000000000 --- a/node/ops-morph/docker-compose-validator.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: '3.8' - -volumes: - validator_node_data: - -services: - - validator_node: - build: - context: .. - dockerfile: ./ops-morph/Dockerfile - image: morph-node:latest - ports: - - "26660:26660" - environment: - - EMPTY_BLOCK_DELAY=true - - MORPH_NODE_L2_ETH_RPC=http://host.docker.internal:7545 - - MORPH_NODE_L2_ENGINE_RPC=http://host.docker.internal:7551 - - MORPH_NODE_L2_ENGINE_AUTH=jwt-secret.txt - - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=0xcf7ed3acca5a467e9e704c703e8d87f634fb0fc9 - ## todo need to replace it to a public network - - MORPH_NODE_L1_ETH_RPC=http://host.docker.internal:9545 - - MORPH_NODE_L1_ETH_BEACON_RPC=http://host.docker.internal:3500 - - MORPH_NODE_ROLLUP_ADDRESS=0xa513e6e4b8f2a923d98304ec87f64353c4d5c853 - - MORPH_NODE_DERIVATION_START_HEIGHT=1 - - MORPH_NODE_DERIVATION_FETCH_BLOCK_RANGE=1000 - - MORPH_NODE_L1_CHAIN_ID=900 - - MORPH_NODE_VALIDATOR=true - - MORPH_NODE_MOCK_SEQUENCER=false - - MORPH_NODE_L1_CONFIRMATIONS=1 - - MORPH_NODE_METRICS_SERVER_ENABLE=true - - MORPH_NODE_METRICS_PORT=26660 - - MORPH_NODE_SYNC_START_HEIGHT=1 - volumes: - - "validator_node_data:${NODE_DATA_DIR}" - - "${PWD}/jwt-secret.txt:${JWT_SECRET_PATH}" - command: > - morphnode - --validator - --home $NODE_DATA_DIR diff --git a/ops/docker/docker-compose-4nodes.yml b/ops/docker/docker-compose-4nodes.yml index 88480b329..209d9b477 100644 --- a/ops/docker/docker-compose-4nodes.yml +++ b/ops/docker/docker-compose-4nodes.yml @@ -14,8 +14,6 @@ volumes: node_data_2: node_data_3: sentry_node_data: - validator_el_data: - validator_node_data: layer1-el-data: layer1-cl-data: layer1-vc-data: @@ -390,66 +388,6 @@ services: command: > morphnode --home $NODE_DATA_DIR - - - validator-el: - container_name: validator-el - image: morph-geth:latest - depends_on: - tx-submitter-0: - condition: service_started - ports: - - "7545:8545" - - "7546:8546" - - "7551:8551" - healthcheck: - test: ["CMD-SHELL", "wget -qO- --header='Content-Type: application/json' --post-data='{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' http://localhost:8545 | grep -q '\"result\"'"] - interval: 30s - timeout: 5s - retries: 3 - volumes: - - "validator_el_data:${GETH_DATA_DIR}" - - "${PWD}/jwt-secret.txt:${JWT_SECRET_PATH}" - - "${PWD}/../l2-genesis/.devnet/genesis-l2.json:/genesis.json" - entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments - - "/bin/bash" - - "/entrypoint.sh" - - validator_node: - container_name: validator_node - depends_on: - validator-el: - condition: service_started - node-0: - condition: service_started - image: morph-node:latest - ports: - - "26660" - environment: - - MORPH_NODE_L2_ETH_RPC=http://validator-el:8545 - - MORPH_NODE_L2_ENGINE_RPC=http://validator-el:8551 - - MORPH_NODE_L2_ENGINE_AUTH=${JWT_SECRET_PATH} - ## todo need to replace it to a public network - - MORPH_NODE_L1_ETH_RPC=${L1_ETH_RPC} - - MORPH_NODE_L1_ETH_BEACON_RPC=${L1_BEACON_CHAIN_RPC} - - MORPH_NODE_SYNC_DEPOSIT_CONTRACT_ADDRESS=${MORPH_PORTAL:-0x6900000000000000000000000000000000000001} - - MORPH_NODE_ROLLUP_ADDRESS=${MORPH_ROLLUP:-0x6900000000000000000000000000000000000010} - - MORPH_NODE_DERIVATION_START_HEIGHT=1 - - MORPH_NODE_SYNC_START_HEIGHT=1 - - MORPH_NODE_DERIVATION_FETCH_BLOCK_RANGE=5000 - - MORPH_NODE_L1_CHAIN_ID=900 - - MORPH_NODE_VALIDATOR=true - - MORPH_NODE_MOCK_SEQUENCER=false - - MORPH_NODE_L1_CONFIRMATIONS=1 - - MORPH_NODE_METRICS_SERVER_ENABLE=true - - MORPH_NODE_METRICS_PORT=26660 - volumes: - - "validator_node_data:${NODE_DATA_DIR}" - - "${PWD}/jwt-secret.txt:${JWT_SECRET_PATH}" - command: > - morphnode - --validator - --home $NODE_DATA_DIR tx-submitter-0: container_name: tx-submitter-0 diff --git a/ops/docker/docker-compose-reth.yml b/ops/docker/docker-compose-reth.yml index fecc42f89..f66b471e8 100644 --- a/ops/docker/docker-compose-reth.yml +++ b/ops/docker/docker-compose-reth.yml @@ -40,8 +40,3 @@ services: sentry-el-0: <<: *reth-service build: !reset null - - validator-el: - <<: *reth-service - healthcheck: - disable: true From b9ad14ec60bae3a70cead94fd6ff185fb78c26b8 Mon Sep 17 00:00:00 2001 From: corey Date: Wed, 13 May 2026 19:29:42 +0800 Subject: [PATCH 15/17] test(derivation): SPEC-005 verify-mode + Path B unit tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add 9 unit-test cases covering SPEC-005 §5.1 entries that don't require the full L1 / beacon / authclient mock stack: - config_test.go (3): TestVerifyMode_DefaultIsPathA / _AcceptsPathB / _RejectsUnknown -- exercise validateAndDefaultVerifyMode and assert DefaultConfig().VerifyMode pins to pathA. - verify_path_b_test.go (6): round-trip OK V1 + V2 (using empty L2 blocks against the same encoder pipeline as the L1 oracle), versioned hash mismatch, local block missing, local block read error, three invalid-input branches (firstBlock=0, last --- node/derivation/config.go | 27 ++- node/derivation/config_test.go | 46 +++++ node/derivation/verify_path_b.go | 36 ++-- node/derivation/verify_path_b_test.go | 246 ++++++++++++++++++++++++++ 4 files changed, 336 insertions(+), 19 deletions(-) create mode 100644 node/derivation/config_test.go create mode 100644 node/derivation/verify_path_b_test.go diff --git a/node/derivation/config.go b/node/derivation/config.go index 47d66cc13..a913cb054 100644 --- a/node/derivation/config.go +++ b/node/derivation/config.go @@ -44,6 +44,21 @@ const ( DefaultFinalizerInterval = 30 * time.Second ) +// validateAndDefaultVerifyMode normalises an empty VerifyMode to the default +// and rejects unknown values. Extracted from SetCliContext so the validation +// can be unit-tested without building a cli.Context. +func validateAndDefaultVerifyMode(s string) (string, error) { + switch s { + case VerifyModePathA, VerifyModePathB: + return s, nil + case "": + return DefaultVerifyMode, nil + default: + return "", fmt.Errorf("invalid derivation.verify-mode %q (must be %q or %q)", + s, VerifyModePathA, VerifyModePathB) + } +} + type Config struct { L1 *types.L1Config `json:"l1"` L2 *types.L2Config `json:"l2"` @@ -130,15 +145,11 @@ func (c *Config) SetCliContext(ctx *cli.Context) error { if ctx.GlobalIsSet(flags.DerivationVerifyMode.Name) { c.VerifyMode = ctx.GlobalString(flags.DerivationVerifyMode.Name) } - switch c.VerifyMode { - case VerifyModePathA, VerifyModePathB: - // ok - case "": - c.VerifyMode = DefaultVerifyMode - default: - return fmt.Errorf("invalid derivation.verify-mode %q (must be %q or %q)", - c.VerifyMode, VerifyModePathA, VerifyModePathB) + normalized, err := validateAndDefaultVerifyMode(c.VerifyMode) + if err != nil { + return err } + c.VerifyMode = normalized if ctx.GlobalIsSet(flags.DerivationFinalizerInterval.Name) { c.FinalizerInterval = ctx.GlobalDuration(flags.DerivationFinalizerInterval.Name) diff --git a/node/derivation/config_test.go b/node/derivation/config_test.go new file mode 100644 index 000000000..7cbce6b7e --- /dev/null +++ b/node/derivation/config_test.go @@ -0,0 +1,46 @@ +package derivation + +import ( + "strings" + "testing" +) + +// SPEC-005 section 4.2 + 5.1 verify-mode dispatch tests. The mode is bound at +// startup; the validation switch in SetCliContext rejects unknown values +// fail-fast so a typo never reaches the main loop. + +func TestVerifyMode_DefaultIsPathA(t *testing.T) { + if got := DefaultConfig().VerifyMode; got != VerifyModePathA { + t.Fatalf("DefaultConfig().VerifyMode = %q, want %q", got, VerifyModePathA) + } + + got, err := validateAndDefaultVerifyMode("") + if err != nil { + t.Fatalf("empty verify-mode rejected: %v", err) + } + if got != VerifyModePathA { + t.Fatalf("empty verify-mode normalised to %q, want %q", got, VerifyModePathA) + } +} + +func TestVerifyMode_AcceptsPathB(t *testing.T) { + got, err := validateAndDefaultVerifyMode(VerifyModePathB) + if err != nil { + t.Fatalf("pathB rejected: %v", err) + } + if got != VerifyModePathB { + t.Fatalf("pathB normalised to %q, want %q", got, VerifyModePathB) + } +} + +func TestVerifyMode_RejectsUnknown(t *testing.T) { + if _, err := validateAndDefaultVerifyMode("pathC"); err == nil { + t.Fatal("expected error on unknown verify-mode, got nil") + } else if !strings.Contains(err.Error(), "pathC") { + t.Fatalf("error should mention the offending value; got: %v", err) + } + + if _, err := validateAndDefaultVerifyMode("PATHA"); err == nil { + t.Fatal("verify-mode is case-sensitive; uppercase should be rejected") + } +} diff --git a/node/derivation/verify_path_b.go b/node/derivation/verify_path_b.go index f4dca1b9a..cb8fb4856 100644 --- a/node/derivation/verify_path_b.go +++ b/node/derivation/verify_path_b.go @@ -57,20 +57,34 @@ func (d *Derivation) fetchBatchInfoPathB(ctx context.Context, txHash common.Hash return bi, nil } +// pathBBlockReader is the minimal L2 client surface verifyPathBContent +// needs. Narrowed from types.RetryableClient so unit tests can exercise +// the full Path B encoding pipeline without an authclient stack. +type pathBBlockReader interface { + BlockByNumber(ctx context.Context, number *big.Int) (*eth.Block, error) +} + // verifyBatchContentPathB rebuilds blob versioned hashes from local L2 // blocks in the [batchInfo.firstBlockNumber, batchInfo.lastBlockNumber] // range and compares them against batchInfo.blobHashes (taken from the // L1 commitBatch tx). Returns nil on match. func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *BatchInfo) error { - d.metrics.IncPathBTriggered() + return verifyPathBContent(ctx, d.l2Client, d.metrics, batchInfo) +} + +// verifyPathBContent is the testable core of Path B verification. It is +// extracted from the Derivation method above so tests can supply a fake +// pathBBlockReader. Behaviour and error messages are unchanged. +func verifyPathBContent(ctx context.Context, reader pathBBlockReader, metrics *Metrics, batchInfo *BatchInfo) error { + metrics.IncPathBTriggered() if batchInfo.firstBlockNumber == 0 || batchInfo.lastBlockNumber < batchInfo.firstBlockNumber { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: invalid block range [%d, %d]", batchInfo.firstBlockNumber, batchInfo.lastBlockNumber) } if len(batchInfo.blobHashes) == 0 { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: no blob hashes recorded for batch %d", batchInfo.batchIndex) } @@ -78,19 +92,19 @@ func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *Bat totalL1MessagePopped := batchInfo.parentTotalL1MessagePopped for n := batchInfo.firstBlockNumber; n <= batchInfo.lastBlockNumber; n++ { - block, err := d.l2Client.BlockByNumber(ctx, big.NewInt(int64(n))) + block, err := reader.BlockByNumber(ctx, big.NewInt(int64(n))) if err != nil { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: read local block %d failed: %w", n, err) } if block == nil { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: local block %d missing", n) } txsPayload, l1TxHashes, newTotal, l2TxNum, err := commonbatch.ParsingTxs(block.Transactions(), totalL1MessagePopped) if err != nil { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: parsingTxs failed at block %d: %w", n, err) } l1MsgNum := int(newTotal - totalL1MessagePopped) @@ -110,7 +124,7 @@ func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *Bat compressed, err := commonblob.CompressBatchBytes(payload) if err != nil { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: compress failed: %w", err) } @@ -121,19 +135,19 @@ func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *Bat // with the wrong blob count and a confusing hash mismatch later. sidecar, err := commonblob.MakeBlobTxSidecar(compressed, len(batchInfo.blobHashes)) if err != nil { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: build sidecar failed: %w", err) } rebuilt := sidecar.BlobHashes() if len(rebuilt) != len(batchInfo.blobHashes) { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: blob count mismatch (rebuilt=%d, l1=%d)", len(rebuilt), len(batchInfo.blobHashes)) } for i := range rebuilt { if rebuilt[i] != batchInfo.blobHashes[i] { - d.metrics.IncPathBFailed() + metrics.IncPathBFailed() return fmt.Errorf("path B: versioned hash mismatch at index %d (rebuilt=%s, l1=%s)", i, rebuilt[i].Hex(), batchInfo.blobHashes[i].Hex()) } diff --git a/node/derivation/verify_path_b_test.go b/node/derivation/verify_path_b_test.go new file mode 100644 index 000000000..0272e24d8 --- /dev/null +++ b/node/derivation/verify_path_b_test.go @@ -0,0 +1,246 @@ +package derivation + +import ( + "context" + "errors" + "math/big" + "strings" + "testing" + + "github.com/morph-l2/go-ethereum/common" + eth "github.com/morph-l2/go-ethereum/core/types" + + commonbatch "morph-l2/common/batch" + commonblob "morph-l2/common/blob" +) + +// SPEC-005 section 4.3 + 5.1 Path B core encoding tests. These cover the +// verify_path_b.go logic via the extracted verifyPathBContent free function +// and a fake pathBBlockReader, avoiding the full L1 / beacon / authclient +// stack required by Derivation construction. +// +// The round-trip tests use makeEmptyL2Block (zero L2 txs / zero L1 messages) +// so ParsingTxs returns an empty payload. The codec is still exercised end +// to end -- BatchData.Append, V1/V2 payload selection, CompressBatchBytes, +// MakeBlobTxSidecar, BlobHashes() -- and the resulting hashes round-trip +// against the same code path. Tx-bearing blocks add no Path B coverage that +// the existing common/batch unit tests don't already provide. + +type fakePathBBlockReader struct { + blocks map[uint64]*eth.Block + errs map[uint64]error +} + +func (f *fakePathBBlockReader) BlockByNumber(_ context.Context, n *big.Int) (*eth.Block, error) { + num := n.Uint64() + if e, ok := f.errs[num]; ok && e != nil { + return nil, e + } + return f.blocks[num], nil // nil block when not registered -- exercises the "missing" branch +} + +// makeEmptyL2Block builds a header-only block. ParsingTxs / BuildBlockContext +// only read fields verifyPathBContent already owns; no signer / state / receipts +// machinery is required. +func makeEmptyL2Block(num uint64) *eth.Block { + h := ð.Header{ + Number: new(big.Int).SetUint64(num), + Time: 1700000000 + num, + GasLimit: 30_000_000, + BaseFee: big.NewInt(0), + } + return eth.NewBlockWithHeader(h) +} + +// rebuildExpectedBlobHashes runs the same encoding pipeline as +// verifyPathBContent against the supplied blocks and returns the versioned +// hashes a real L1 commitBatch tx would have recorded for that batch. The +// round-trip tests use this as the L1-side oracle. +func rebuildExpectedBlobHashes(t *testing.T, blocks []*eth.Block, version, parentTotalL1Popped uint64, blobCount int) []common.Hash { + t.Helper() + + bd := commonbatch.NewBatchData() + total := parentTotalL1Popped + for _, b := range blocks { + txsPayload, l1Hashes, newTotal, l2TxNum, err := commonbatch.ParsingTxs(b.Transactions(), total) + if err != nil { + t.Fatalf("ParsingTxs(block %d): %v", b.NumberU64(), err) + } + l1MsgNum := int(newTotal - total) + bd.Append(commonbatch.BuildBlockContext(b.Header(), l2TxNum+l1MsgNum, l1MsgNum), txsPayload, l1Hashes) + total = newTotal + } + + var payload []byte + if version >= 2 { + payload = bd.TxsPayloadV2() + } else { + payload = bd.TxsPayload() + } + + compressed, err := commonblob.CompressBatchBytes(payload) + if err != nil { + t.Fatalf("CompressBatchBytes: %v", err) + } + sidecar, err := commonblob.MakeBlobTxSidecar(compressed, blobCount) + if err != nil { + t.Fatalf("MakeBlobTxSidecar: %v", err) + } + return sidecar.BlobHashes() +} + +func TestPathB_RoundTripOK_V1(t *testing.T) { + blocks := []*eth.Block{makeEmptyL2Block(10), makeEmptyL2Block(11), makeEmptyL2Block(12)} + hashes := rebuildExpectedBlobHashes(t, blocks, 1, 0, 1) + + reader := &fakePathBBlockReader{blocks: map[uint64]*eth.Block{ + 10: blocks[0], 11: blocks[1], 12: blocks[2], + }} + bi := &BatchInfo{ + batchIndex: 7, + version: 1, + firstBlockNumber: 10, + lastBlockNumber: 12, + parentTotalL1MessagePopped: 0, + blobHashes: hashes, + } + + if err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), bi); err != nil { + t.Fatalf("V1 round-trip failed: %v", err) + } +} + +func TestPathB_RoundTripOK_V2(t *testing.T) { + blocks := []*eth.Block{makeEmptyL2Block(20), makeEmptyL2Block(21)} + hashes := rebuildExpectedBlobHashes(t, blocks, 2, 5, 1) + + reader := &fakePathBBlockReader{blocks: map[uint64]*eth.Block{ + 20: blocks[0], 21: blocks[1], + }} + bi := &BatchInfo{ + batchIndex: 8, + version: 2, + firstBlockNumber: 20, + lastBlockNumber: 21, + parentTotalL1MessagePopped: 5, + blobHashes: hashes, + } + + if err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), bi); err != nil { + t.Fatalf("V2 round-trip failed: %v", err) + } +} + +func TestPathB_VersionedHashMismatch(t *testing.T) { + blocks := []*eth.Block{makeEmptyL2Block(10)} + hashes := rebuildExpectedBlobHashes(t, blocks, 1, 0, 1) + // Flip a single byte so the rebuilt hash cannot possibly match. + tampered := make([]common.Hash, len(hashes)) + copy(tampered, hashes) + tampered[0][0] ^= 0xff + + reader := &fakePathBBlockReader{blocks: map[uint64]*eth.Block{10: blocks[0]}} + bi := &BatchInfo{ + batchIndex: 9, + version: 1, + firstBlockNumber: 10, + lastBlockNumber: 10, + parentTotalL1MessagePopped: 0, + blobHashes: tampered, + } + + err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), bi) + if err == nil { + t.Fatal("expected versioned hash mismatch error, got nil") + } + if !strings.Contains(err.Error(), "versioned hash mismatch") { + t.Fatalf("error should mention 'versioned hash mismatch'; got: %v", err) + } +} + +func TestPathB_LocalBlockMissing(t *testing.T) { + // Pre-build hashes that match a 2-block batch, then deliberately omit + // block 11 from the reader so verifyPathBContent observes it as nil. + blocks := []*eth.Block{makeEmptyL2Block(10), makeEmptyL2Block(11)} + hashes := rebuildExpectedBlobHashes(t, blocks, 1, 0, 1) + + reader := &fakePathBBlockReader{blocks: map[uint64]*eth.Block{10: blocks[0]}} + bi := &BatchInfo{ + batchIndex: 11, + version: 1, + firstBlockNumber: 10, + lastBlockNumber: 11, + parentTotalL1MessagePopped: 0, + blobHashes: hashes, + } + + err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), bi) + if err == nil { + t.Fatal("expected local block missing error, got nil") + } + if !strings.Contains(err.Error(), "missing") { + t.Fatalf("error should mention 'missing'; got: %v", err) + } +} + +func TestPathB_LocalBlockReadError(t *testing.T) { + blocks := []*eth.Block{makeEmptyL2Block(10)} + hashes := rebuildExpectedBlobHashes(t, blocks, 1, 0, 1) + + reader := &fakePathBBlockReader{ + blocks: map[uint64]*eth.Block{10: blocks[0]}, + errs: map[uint64]error{10: errors.New("rpc down")}, + } + bi := &BatchInfo{ + batchIndex: 12, + version: 1, + firstBlockNumber: 10, + lastBlockNumber: 10, + blobHashes: hashes, + } + + err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), bi) + if err == nil { + t.Fatal("expected wrapped read error, got nil") + } + if !strings.Contains(err.Error(), "read local block") { + t.Fatalf("error should mention 'read local block'; got: %v", err) + } +} + +func TestPathB_RejectsInvalidInputs(t *testing.T) { + cases := []struct { + name string + bi *BatchInfo + wantSub string + }{ + { + name: "firstBlockNumber zero", + bi: &BatchInfo{firstBlockNumber: 0, lastBlockNumber: 5, blobHashes: []common.Hash{{}}}, + wantSub: "invalid block range", + }, + { + name: "last < first", + bi: &BatchInfo{firstBlockNumber: 10, lastBlockNumber: 9, blobHashes: []common.Hash{{}}}, + wantSub: "invalid block range", + }, + { + name: "empty blobHashes", + bi: &BatchInfo{firstBlockNumber: 5, lastBlockNumber: 5, blobHashes: nil}, + wantSub: "no blob hashes", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + reader := &fakePathBBlockReader{} + err := verifyPathBContent(context.Background(), reader, newDiscardMetrics(), tc.bi) + if err == nil { + t.Fatal("expected validation error, got nil") + } + if !strings.Contains(err.Error(), tc.wantSub) { + t.Fatalf("error should mention %q; got: %v", tc.wantSub, err) + } + }) + } +} From 6b096f8e0e1cd606b2aba1017b1b8111cf2b9edb Mon Sep 17 00:00:00 2001 From: corey Date: Wed, 13 May 2026 20:08:18 +0800 Subject: [PATCH 16/17] style(derivation): fix gofmt + misspell lint failures CI golangci-lint flagged two categories on the SPEC-005 derivation files: - gofmt: metrics.go, static_scan_test.go, tag_advance_test.go, verify_path_b.go -- alignment / spacing drift. Auto-fixed with gofmt -w; no semantic changes. - misspell (US English): four "behaviour" -> "behavior" sites in config.go (added in b9ad14ec), finalizer.go (a73639de), tag_advance.go (a73639de), and verify_path_b.go (b9ad14ec). go test ./node/derivation/... still passes 22 cases (incl. the 9 added in b9ad14ec). No symbols renamed; comments only. Co-Authored-By: Claude Opus 4.7 (1M context) --- node/derivation/config.go | 2 +- node/derivation/finalizer.go | 2 +- node/derivation/metrics.go | 12 ++++++------ node/derivation/static_scan_test.go | 30 ++++++++++++++--------------- node/derivation/tag_advance.go | 2 +- node/derivation/tag_advance_test.go | 8 ++++---- node/derivation/verify_path_b.go | 3 +-- 7 files changed, 29 insertions(+), 30 deletions(-) diff --git a/node/derivation/config.go b/node/derivation/config.go index a913cb054..6896f7315 100644 --- a/node/derivation/config.go +++ b/node/derivation/config.go @@ -31,7 +31,7 @@ const ( DefaultLogProgressInterval = time.Second * 10 // VerifyMode values (SPEC-005 section 4.2). Selected at startup; not switchable - // at runtime. Default is VerifyModePathA which preserves current behaviour. + // at runtime. Default is VerifyModePathA which preserves current behavior. VerifyModePathA = "pathA" VerifyModePathB = "pathB" diff --git a/node/derivation/finalizer.go b/node/derivation/finalizer.go index d2dc42841..89b9b9656 100644 --- a/node/derivation/finalizer.go +++ b/node/derivation/finalizer.go @@ -65,7 +65,7 @@ func (f *finalizer) run() { defer t.Stop() // Run once immediately so the first tag flush doesn't wait a full - // interval after startup; matches blocktag's `initialize()` behaviour. + // interval after startup; matches blocktag's `initialize()` behavior. f.tick() for { diff --git a/node/derivation/metrics.go b/node/derivation/metrics.go index 237341358..3659da1c2 100644 --- a/node/derivation/metrics.go +++ b/node/derivation/metrics.go @@ -33,12 +33,12 @@ type Metrics struct { // SPEC-005 section 4.7 Tag management metrics. Replace the (previously absent) // blocktag instrumentation; on-call alerts should now key off these. - SafeAdvanceTotal metrics.Counter - FinalizedAdvanceTotal metrics.Counter - SafeL2BlockNumber metrics.Gauge - FinalizedL2BlockNumber metrics.Gauge - L1ReorgResetTotal metrics.Counter - TagInvariantViolationTotal metrics.Counter + SafeAdvanceTotal metrics.Counter + FinalizedAdvanceTotal metrics.Counter + SafeL2BlockNumber metrics.Gauge + FinalizedL2BlockNumber metrics.Gauge + L1ReorgResetTotal metrics.Counter + TagInvariantViolationTotal metrics.Counter } func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { diff --git a/node/derivation/static_scan_test.go b/node/derivation/static_scan_test.go index 52b88fc7b..d075fe137 100644 --- a/node/derivation/static_scan_test.go +++ b/node/derivation/static_scan_test.go @@ -61,14 +61,14 @@ func TestNoValidatorReferences(t *testing.T) { // patterns are narrow on purpose so legitimate uses (e.g., Tendermint // consensus validator pubkeys) don't false-positive. banned := []string{ - "node/validator", // import path - "validator.NewValidator", // factory call - "validator.NewConfig", // config call - "flags.ValidatorEnable", // role flag - "validator.challengeEnable", // legacy flag string - "validator.privateKey", // legacy flag string - "VALIDATOR_PRIVATE_KEY", // legacy envvar - "VALIDATOR_CHALLENGE_ENABLE", // legacy envvar + "node/validator", // import path + "validator.NewValidator", // factory call + "validator.NewConfig", // config call + "flags.ValidatorEnable", // role flag + "validator.challengeEnable", // legacy flag string + "validator.privateKey", // legacy flag string + "VALIDATOR_PRIVATE_KEY", // legacy envvar + "VALIDATOR_CHALLENGE_ENABLE", // legacy envvar // We deliberately do NOT ban "ChallengeEnable" / "ChallengeState" // in source -- they appear in the Rollup contract ABI string in // node/types/batch.go and are immutable on-chain identifiers we @@ -95,13 +95,13 @@ func TestNoBlocktagReferences(t *testing.T) { _, files := walkNodeRepoSourceFiles(t) banned := []string{ - "node/blocktag", // import path - "BlockTagService", // service type - "NewBlockTagService", // factory - "BlockTagSafeConfirmations", // flag symbol - "BLOCKTAG_SAFE_CONFIRMATIONS", // envvar - "blocktag.safeConfirmations", // flag name string - "blocktag.DefaultConfig", // config factory + "node/blocktag", // import path + "BlockTagService", // service type + "NewBlockTagService", // factory + "BlockTagSafeConfirmations", // flag symbol + "BLOCKTAG_SAFE_CONFIRMATIONS", // envvar + "blocktag.safeConfirmations", // flag name string + "blocktag.DefaultConfig", // config factory } for _, f := range files { diff --git a/node/derivation/tag_advance.go b/node/derivation/tag_advance.go index 69f0b7402..991421e1f 100644 --- a/node/derivation/tag_advance.go +++ b/node/derivation/tag_advance.go @@ -136,7 +136,7 @@ func (t *tagAdvancer) reset(toBatchIndex uint64) { // flushTags enforces the finalized <= safe <= unsafe invariant and calls // SetBlockTags exactly once per state change. On invariant violation we log // error and skip -- no panic, no halt -- matching op-node's -// tryUpdateEngineInternal behaviour. +// tryUpdateEngineInternal behavior. func (t *tagAdvancer) flushTags(ctx context.Context) { unsafeNum, err := t.l2Client.BlockNumber(ctx) if err != nil { diff --git a/node/derivation/tag_advance_test.go b/node/derivation/tag_advance_test.go index e19ddb131..9b0d6a75d 100644 --- a/node/derivation/tag_advance_test.go +++ b/node/derivation/tag_advance_test.go @@ -16,10 +16,10 @@ import ( // SetBlockTags call so tests can assert on call count and arguments, and // lets the test set the unsafe upper bound returned by BlockNumber. type fakeTagL2Client struct { - unsafe uint64 + unsafe uint64 blockNumberErr error - calls []setBlockTagsCall - setErr error + calls []setBlockTagsCall + setErr error } type setBlockTagsCall struct { @@ -111,7 +111,7 @@ func TestTagAdvance_DedupSetBlockTags(t *testing.T) { func TestTagAdvance_InvariantSafeGtUnsafe_Skips(t *testing.T) { tagAdv, fake, _ := newTestTagAdvancer(t, 30) // unsafe = 30 - h := headerAt(50, 'a') // safe wants 50 -- invalid + h := headerAt(50, 'a') // safe wants 50 -- invalid tagAdv.advanceSafe(context.Background(), 7, h) diff --git a/node/derivation/verify_path_b.go b/node/derivation/verify_path_b.go index cb8fb4856..bb70b3652 100644 --- a/node/derivation/verify_path_b.go +++ b/node/derivation/verify_path_b.go @@ -74,7 +74,7 @@ func (d *Derivation) verifyBatchContentPathB(ctx context.Context, batchInfo *Bat // verifyPathBContent is the testable core of Path B verification. It is // extracted from the Derivation method above so tests can supply a fake -// pathBBlockReader. Behaviour and error messages are unchanged. +// pathBBlockReader. Behavior and error messages are unchanged. func verifyPathBContent(ctx context.Context, reader pathBBlockReader, metrics *Metrics, batchInfo *BatchInfo) error { metrics.IncPathBTriggered() @@ -167,4 +167,3 @@ func (d *Derivation) fetchLocalLastHeader(ctx context.Context, batchInfo *BatchI } return header, nil } - From d27d088ce62bba0e1f340493f179b3d63b9f30f1 Mon Sep 17 00:00:00 2001 From: corey Date: Wed, 13 May 2026 20:20:16 +0800 Subject: [PATCH 17/17] fix(sync): make Syncer.Start idempotent via sync.Once The shared *Syncer is wired through node/cmd/node/main.go to two independent start sites: Path 1: Derivation.Start spawns a goroutine that calls d.syncer.Start (node/derivation/derivation.go:166, since the very first commit). Path 2: Executor.updateSequencerSet calls e.syncer.Start (or `go e.syncer.Start`) on the !isSequencer -> isSequencer transition (node/core/sequencers.go:86,88). On a real Tendermint sequencer node, e.isSequencer starts at false (zero value), so the first updateSequencerSet call always fires this branch -- producing a second Start on the syncer Derivation already started. The previous Start was not idempotent: it (a) re-ran the blocking initial fetchL1Messages, (b) spawned a duplicate poller racing on s.latestSynced and s.metrics, and (c) double-closed s.stop on shutdown (panic: close of closed channel). On verifier-only nodes only Path 1 fires, masking the issue; the mock-sequencer devnet path avoids Tendermint and also masks it; real Tendermint sequencer nodes hit the race. This bug predates SPEC-005 (Path 1 was added in 7f2f8874a, the initial commit, 2024-01-12), but the SPEC-005 PR consolidates how derivation drives shared services and is the natural place to retire the duplicate-load path. Wrapping Start in s.startOnce.Do() keeps both call sites correct without changing ownership semantics: the second caller returns immediately, the first poller continues, shutdown closes s.stop exactly once. go build ./node/sync/ ./node/derivation/ ./node/core/ -- clean. go test ./node/derivation/... -- 22 tests PASS. Co-Authored-By: Claude Opus 4.7 (1M context) --- node/sync/syncer.go | 59 +++++++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/node/sync/syncer.go b/node/sync/syncer.go index c9948983a..38b88c782 100644 --- a/node/sync/syncer.go +++ b/node/sync/syncer.go @@ -3,6 +3,7 @@ package sync import ( "context" "errors" + "sync" "time" "github.com/morph-l2/go-ethereum/common" @@ -13,6 +14,8 @@ import ( ) type Syncer struct { + startOnce sync.Once + ctx context.Context cancel context.CancelFunc bridgeClient *BridgeClient @@ -75,32 +78,40 @@ func NewSyncer(ctx context.Context, db Database, config *Config, logger tmlog.Lo }, nil } +// Start begins the L1 message sync loop. Safe to call multiple times: the +// shared *Syncer is wired through main.go to both Derivation.Start (which +// always invokes it) and Executor.updateSequencerSet (which invokes it on +// sequencer-role transitions). Without the once-guard the second caller +// would spawn a duplicate poller racing on s.latestSynced and double-close +// s.stop on shutdown. func (s *Syncer) Start() { - if s.isFake { - return - } - // block node startup during initial sync and print some helpful logs - s.logger.Info("initial sync start", "msg", "Running initial sync of L1 messages before starting sequencer, this might take a while...") - s.fetchL1Messages() - s.logger.Info("initial sync completed", "latestSyncedBlock", s.latestSynced) - - go func() { - t := time.NewTicker(s.pollInterval) - defer t.Stop() - - for { - // don't wait for ticker during startup - s.fetchL1Messages() - - select { - case <-s.ctx.Done(): - close(s.stop) - return - case <-t.C: - continue - } + s.startOnce.Do(func() { + if s.isFake { + return } - }() + // block node startup during initial sync and print some helpful logs + s.logger.Info("initial sync start", "msg", "Running initial sync of L1 messages before starting sequencer, this might take a while...") + s.fetchL1Messages() + s.logger.Info("initial sync completed", "latestSyncedBlock", s.latestSynced) + + go func() { + t := time.NewTicker(s.pollInterval) + defer t.Stop() + + for { + // don't wait for ticker during startup + s.fetchL1Messages() + + select { + case <-s.ctx.Done(): + close(s.stop) + return + case <-t.C: + continue + } + } + }() + }) } func (s *Syncer) Stop() {