Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion node/db/keys.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ var (
L1MessagePrefix = []byte("l1")
BatchBlockNumberPrefix = []byte("batch")

derivationL1HeightKey = []byte("LastDerivationL1Height")
derivationL1HeightKey = []byte("LastDerivationL1Height")
derivationL1BlockPrefix = []byte("derivL1Block")
Comment on lines +10 to +11
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical | ⚡ Quick win

Fix formatting to pass gofmt check.

The pipeline reports a gofmt failure at line 10. Please run gofmt -w node/db/keys.go to resolve the formatting issue.

🧰 Tools
🪛 GitHub Actions: Node / 0_check.txt

[error] 10-10: golangci-lint (gofmt): File is not properly formatted. Run gofmt on this file.

🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@node/db/keys.go` around lines 10 - 11, The file fails gofmt due to formatting
around the constant/variable declarations for derivationL1HeightKey and
derivationL1BlockPrefix; run gofmt -w on the file (or adjust spacing so the
declarations for derivationL1HeightKey and derivationL1BlockPrefix are formatted
according to gofmt) and save the file so the pipeline passes.

)

// encodeBlockNumber encodes an L1 enqueue index as big endian uint64
Expand All @@ -26,3 +27,8 @@ func L1MessageKey(enqueueIndex uint64) []byte {
func BatchBlockNumberKey(batchIndex uint64) []byte {
return append(BatchBlockNumberPrefix, encodeEnqueueIndex(batchIndex)...)
}

// DerivationL1BlockKey = derivationL1BlockPrefix + l1Height (uint64 big endian)
func DerivationL1BlockKey(l1Height uint64) []byte {
return append(derivationL1BlockPrefix, encodeEnqueueIndex(l1Height)...)
}
59 changes: 59 additions & 0 deletions node/db/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,65 @@ func (s *Store) WriteSyncedL1Messages(messages []types.L1Message, latestSynced u
return batch.Write()
}

// DerivationL1Block stores L1 block info for reorg detection.
type DerivationL1Block struct {
Number uint64
Hash [32]byte
}

func (s *Store) WriteDerivationL1Block(block *DerivationL1Block) {
data, err := rlp.EncodeToBytes(block)
if err != nil {
panic(fmt.Sprintf("failed to RLP encode DerivationL1Block, err: %v", err))
}
if err := s.db.Put(DerivationL1BlockKey(block.Number), data); err != nil {
panic(fmt.Sprintf("failed to write DerivationL1Block, err: %v", err))
}
}

func (s *Store) ReadDerivationL1Block(l1Height uint64) *DerivationL1Block {
data, err := s.db.Get(DerivationL1BlockKey(l1Height))
if err != nil && !isNotFoundErr(err) {
panic(fmt.Sprintf("failed to read DerivationL1Block, err: %v", err))
}
if len(data) == 0 {
return nil
}
var block DerivationL1Block
if err := rlp.DecodeBytes(data, &block); err != nil {
panic(fmt.Sprintf("invalid DerivationL1Block RLP, err: %v", err))
}
return &block
}

func (s *Store) ReadDerivationL1BlockRange(from, to uint64) []*DerivationL1Block {
var blocks []*DerivationL1Block
for h := from; h <= to; h++ {
b := s.ReadDerivationL1Block(h)
if b != nil {
blocks = append(blocks, b)
}
}
return blocks
}

func (s *Store) DeleteDerivationL1BlocksFrom(height uint64) {
batch := s.db.NewBatch()
for h := height; ; h++ {
key := DerivationL1BlockKey(h)
has, err := s.db.Has(key)
if err != nil || !has {
break
}
if err := batch.Delete(key); err != nil {
panic(fmt.Sprintf("failed to delete DerivationL1Block at %d, err: %v", h, err))
}
}
if err := batch.Write(); err != nil {
panic(fmt.Sprintf("failed to write batch delete for DerivationL1Blocks, err: %v", err))
}
}

func isNotFoundErr(err error) bool {
return err.Error() == leveldb.ErrNotFound.Error() || err.Error() == types.ErrMemoryDBNotFound.Error()
}
163 changes: 163 additions & 0 deletions node/derivation/DERIVATION_REFACTOR.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
# Derivation Refactor: Batch Verification & L1 Reorg Detection

## Background

The derivation module is the core component that syncs L2 state from L1 batch data. Previously it only ran on validator nodes and used a challenge mechanism when state mismatches were detected. This refactor makes two fundamental changes:

1. **L1 batch data is the source of truth** — when local L2 blocks don't match L1 batch data, roll back and re-derive from L1 instead of issuing a challenge.
2. **Support `latest` mode** for fetching L1 batches (instead of only `finalized`), with L1 reorg detection to handle the reduced confirmation window.

## Design Principles

- **L2 rollback is only triggered by batch data mismatch**, never by L1 reorg alone.
- L1 reorg → clean up DB → re-derive from reorg point → batch comparison decides if L2 needs rollback.
- Most L1 reorgs just re-include the same batch tx in a different block — L2 stays valid.
- **Derivation can run as a verification thread** — when blocks already exist locally (e.g. produced by sequencer), derivation compares them against L1 batch data instead of skipping.

## What Changed

### Removed

| Item | Reason |
|------|--------|
| `validator` field in `Derivation` struct | Challenge mechanism removed |
| `validator.Validator` parameter in `NewDerivationClient()` | No longer needed |
| `ChallengeState` / `ChallengeEnable` logic in `derivationBlock()` | Replaced by rollback + re-derive |
| `validator` import in `node/cmd/node/main.go` | No longer referenced |

### Added — L1 Reorg Detection

When `confirmations` is not `finalized` (i.e. using `latest` or `safe`), each derivation loop checks recent L1 blocks for hash changes before processing new batches.

**New DB layer** (`node/db/`):

- `DerivationL1Block` struct — stores `{Number, Hash}` per L1 block
- `WriteDerivationL1Block` / `ReadDerivationL1Block` / `ReadDerivationL1BlockRange` / `DeleteDerivationL1BlocksFrom`
- DB key prefix: `derivL1Block` + uint64 big-endian height

**New config** (`node/derivation/config.go`):

- `ReorgCheckDepth uint64` — how many recent L1 blocks to verify each loop (default: 64)
- CLI flag: `--derivation.reorgCheckDepth` / env `MORPH_NODE_DERIVATION_REORG_CHECK_DEPTH`

**New methods** (`node/derivation/derivation.go`):

| Method | Purpose |
|--------|---------|
| `detectReorg(ctx)` | Iterates recent L1 block hashes from DB, compares against current L1 chain. Returns the height where a mismatch is found, or nil. |
| `handleL1Reorg(height)` | Cleans DB records from the reorg point and resets `latestDerivationL1Height`. Does NOT rollback L2 — the next derivation loop re-fetches batches and the normal comparison logic decides. |
| `recordL1Blocks(ctx, from, to)` | After each derivation round, records L1 block hashes for the processed range. |

**Flow**:

```text
derivationBlock() loop start
├─ [if not finalized] detectReorg()
│ ├─ no reorg → continue
│ └─ reorg at height X → handleL1Reorg(X)
│ ├─ DeleteDerivationL1BlocksFrom(X)
│ ├─ WriteLatestDerivationL1Height(X-1)
│ └─ return (next loop re-processes from X)
├─ fetch CommitBatch logs from L1
├─ process each batch → derive() + verifyBatchRoots()
├─ recordL1Blocks(start, end)
└─ WriteLatestDerivationL1Height(end)
```

### Added — Batch Data Verification

When `derive()` encounters an L2 block that already exists locally, it now **compares** the block against the L1 batch data instead of blindly skipping it.

**New methods**:

| Method | Purpose |
|--------|---------|
| `verifyBlockContext(localHeader, blockData)` | Compares timestamp, gasLimit, baseFee between local L2 block header and batch block context. |
| `verifyBatchRoots(batchInfo, lastHeader)` | Compares stateRoot and withdrawalRoot between L1 batch and last derived L2 block. Extracted from the old inline logic. |
| `rollbackLocalChain(targetBlockNumber)` | **TODO stub** — will call geth `SetHead` API to rewind L2 chain. |

**`derive()` new flow for each block in batch**:

```text
block.Number <= latestBlockNumber?
├─ YES (block exists)
│ ├─ verifyBlockContext() passes → skip, continue
│ └─ verifyBlockContext() fails
│ ├─ IncBlockMismatchCount()
│ ├─ rollbackLocalChain(block.Number - 1)
│ └─ fall through to NewSafeL2Block (re-execute)
└─ NO (new block)
└─ NewSafeL2Block (execute normally)
```

**`derivationBlock()` batch-level verification**:

```text
After derive(batchInfo) completes:
├─ verifyBatchRoots() passes → normal
└─ verifyBatchRoots() fails
├─ IncRollbackCount()
├─ rollbackLocalChain(firstBlockNumber - 1)
├─ re-derive(batchInfo)
├─ verifyBatchRoots() again
│ ├─ passes → recovered
│ └─ fails → CRITICAL error, stop (manual intervention needed)
```

### Added — Metrics

| Metric | Type | Description |
|--------|------|-------------|
| `morphnode_derivation_l1_reorg_detected_total` | Counter | L1 reorg detection count |
| `morphnode_derivation_l2_rollback_total` | Counter | L2 rollbacks triggered by batch mismatch |
| `morphnode_derivation_block_mismatch_total` | Counter | Block-level context mismatches |
| `morphnode_derivation_halted` | Gauge | Set to 1 when derivation halts due to unrecoverable batch mismatch (alert on this) |

## Modified Files

| File | Changes |
|------|---------|
| `node/derivation/derivation.go` | Core refactor: removed validator/challenge, added reorg detection, batch verification, rollback flow |
| `node/derivation/database.go` | Extended `Reader`/`Writer` interfaces for L1 block hash tracking |
| `node/derivation/config.go` | Added `ReorgCheckDepth` config field |
| `node/derivation/metrics.go` | Added 3 new counter metrics |
| `node/db/keys.go` | Added `derivationL1BlockPrefix` and `DerivationL1BlockKey()` |
| `node/db/store.go` | Added `DerivationL1Block` struct and 4 CRUD methods |
| `node/flags/flags.go` | Added `DerivationReorgCheckDepth` CLI flag |
| `node/cmd/node/main.go` | Removed `validator` dependency from `NewDerivationClient` call |

## TODO (follow-up work)

### `rollbackLocalChain()` — geth SetHead integration

Currently a stub that returns an error. Any batch mismatch will be detected and logged, but the
actual L2 chain rollback cannot proceed until this is implemented:

1. Expose `SetL2Head(number uint64)` in `go-ethereum/eth/catalyst/l2_api.go`
2. Add `SetHead` method to `go-ethereum/ethclient/authclient`
3. Add `SetHead` method to `node/types/retryable_client.go`
4. Call `d.l2Client.SetHead(d.ctx, targetBlockNumber)` in `rollbackLocalChain()`

Note: geth already has `BlockChain.SetHead(head uint64) error` — we just need to expose it through the engine API chain.

### Transaction-level verification

`verifyBlockContext` currently checks timestamp, gasLimit, baseFee, and batch-internal tx count
consistency. Full transaction hash comparison against local blocks requires `BlockByNumber` RPC
on `RetryableClient`, which is not yet exposed. State root verification in `verifyBatchRoots`
covers transaction execution correctness as an indirect check.

### Concurrency safety

When running as a verification thread alongside a sequencer, concurrent access between block production and rollback needs locking. This will be handled separately.

## How to Test

1. **Existing behavior preserved**: Set `--derivation.confirmations` to finalized (default) — reorg detection and L1 block hash recording are both skipped, batch verification still runs.
2. **Latest mode**: Set `--derivation.confirmations` to `-2` (latest) — reorg detection activates, L1 block hashes are tracked.
3. **Reorg detection**: Simulate by modifying a saved L1 block hash in DB — next loop should detect and clean up.
4. **Batch verification**: When an existing L2 block matches L1 batch data, it logs "block verified" and skips. When mismatched, it logs the error and returns (rollback stub returns error, preventing silent continuation).
8 changes: 8 additions & 0 deletions node/derivation/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ const (

// DefaultLogProgressInterval is the frequency at which we log progress.
DefaultLogProgressInterval = time.Second * 10

// DefaultReorgCheckDepth is the number of recent L1 blocks to check for reorgs.
DefaultReorgCheckDepth = uint64(64)
)

type Config struct {
Expand All @@ -41,6 +44,7 @@ type Config struct {
PollInterval time.Duration `json:"poll_interval"`
LogProgressInterval time.Duration `json:"log_progress_interval"`
FetchBlockRange uint64 `json:"fetch_block_range"`
ReorgCheckDepth uint64 `json:"reorg_check_depth"`
MetricsPort uint64 `json:"metrics_port"`
MetricsHostname string `json:"metrics_hostname"`
MetricsServerEnable bool `json:"metrics_server_enable"`
Expand All @@ -54,6 +58,7 @@ func DefaultConfig() *Config {
PollInterval: DefaultPollInterval,
LogProgressInterval: DefaultLogProgressInterval,
FetchBlockRange: DefaultFetchBlockRange,
ReorgCheckDepth: DefaultReorgCheckDepth,
L2: new(types.L2Config),
}
}
Expand Down Expand Up @@ -109,6 +114,9 @@ func (c *Config) SetCliContext(ctx *cli.Context) error {
return errors.New("invalid fetchBlockRange")
}
}
if ctx.GlobalIsSet(flags.DerivationReorgCheckDepth.Name) {
c.ReorgCheckDepth = ctx.GlobalUint64(flags.DerivationReorgCheckDepth.Name)
}

l2EthAddr := ctx.GlobalString(flags.L2EthAddr.Name)
l2EngineAddr := ctx.GlobalString(flags.L2EngineAddr.Name)
Expand Down
5 changes: 5 additions & 0 deletions node/derivation/database.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package derivation

import (
"morph-l2/node/db"
"morph-l2/node/sync"
)

Expand All @@ -12,8 +13,12 @@ type Database interface {

type Reader interface {
ReadLatestDerivationL1Height() *uint64
ReadDerivationL1Block(l1Height uint64) *db.DerivationL1Block
ReadDerivationL1BlockRange(from, to uint64) []*db.DerivationL1Block
}

type Writer interface {
WriteLatestDerivationL1Height(latest uint64)
WriteDerivationL1Block(block *db.DerivationL1Block)
DeleteDerivationL1BlocksFrom(height uint64)
}
Loading
Loading