Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions node/derivation/batch_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,9 @@ func (bi *BatchInfo) TxNum() uint64 {

// ParseBatch This method is externally referenced for parsing Batch
func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
if len(batch.Sidecar.Blobs) == 0 {
return fmt.Errorf("blobs length can not be zero")
}
parentBatchHeader := types.BatchHeaderBytes(batch.ParentBatchHeader)
parentBatchIndex, err := parentBatchHeader.BatchIndex()
if err != nil {
Expand All @@ -104,9 +107,6 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
return fmt.Errorf("decode batch header version error:%v", err)
}
if parentVersion == 0 {
if len(batch.Sidecar.Blobs) == 0 {
return fmt.Errorf("blobs length can not be zero")
}
blobData, err := types.RetrieveBlobBytes(&batch.Sidecar.Blobs[0])
if err != nil {
return err
Expand Down Expand Up @@ -200,11 +200,9 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error {
}
var txs []*eth.Transaction
var err error
if len(batch.Sidecar.Blobs) != 0 {
txs, err = tq.dequeue(int(block.txsNum) - int(block.l1MsgNum))
if err != nil {
return fmt.Errorf("decode txsPayload error:%v", err)
}
txs, err = tq.dequeue(int(block.txsNum) - int(block.l1MsgNum))
if err != nil {
return fmt.Errorf("decode txsPayload error:%v", err)
}
txsNum += uint64(block.txsNum)
l1MsgNum += uint64(block.l1MsgNum)
Expand Down
32 changes: 32 additions & 0 deletions node/derivation/beacon.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,3 +241,35 @@ func dataAndHashesFromTxs(txs types.Transactions, targetTx *types.Transaction) [
}
return hashes
}

// Note: ForceGetAllBlobs is defined in derivation.go in the same package

// GetBlobSidecarsEnhanced is an enhanced version of GetBlobSidecars method, combining two approaches to fetch blob data
// If the first method fails or returns no blobs, it will try the second method
func (cl *L1BeaconClient) GetBlobSidecarsEnhanced(ctx context.Context, ref L1BlockRef, hashes []IndexedBlobHash) ([]*BlobSidecar, error) {
// First try using the original GetBlobSidecars method
blobSidecars, err := cl.GetBlobSidecars(ctx, ref, hashes)
if err != nil || len(blobSidecars) == 0 {
// If failed or no blobs retrieved, try the second method
slotFn, err := cl.GetTimeToSlotFn(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get timeToSlotFn: %w", err)
}

slot, err := slotFn(ref.Time)
if err != nil {
return nil, fmt.Errorf("failed to calculate slot: %w", err)
}

// Build request URL and use apiReq method directly
method := fmt.Sprintf("%s%d", sidecarsMethodPrefix, slot)
var blobResp APIGetBlobSidecarsResponse
if err := cl.apiReq(ctx, &blobResp, method); err != nil {
return nil, fmt.Errorf("failed to request blob sidecars: %w", err)
}

return blobResp.Data, nil
}

return blobSidecars, nil
}
1 change: 1 addition & 0 deletions node/derivation/beacon_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
package derivation
88 changes: 77 additions & 11 deletions node/derivation/derivation.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ import (
"github.com/morph-l2/go-ethereum/accounts/abi"
"github.com/morph-l2/go-ethereum/accounts/abi/bind"
"github.com/morph-l2/go-ethereum/common"
"github.com/morph-l2/go-ethereum/common/hexutil"
eth "github.com/morph-l2/go-ethereum/core/types"
"github.com/morph-l2/go-ethereum/crypto"
"github.com/morph-l2/go-ethereum/crypto/kzg4844"
geth "github.com/morph-l2/go-ethereum/eth"
"github.com/morph-l2/go-ethereum/ethclient"
"github.com/morph-l2/go-ethereum/ethclient/authclient"
Expand Down Expand Up @@ -297,26 +299,90 @@ func (d *Derivation) fetchRollupDataByTxHash(txHash common.Hash, blockNumber uin
if err != nil {
return nil, err
}
// query blob
block, err := d.l1Client.BlockByNumber(d.ctx, big.NewInt(int64(blockNumber)))
if err != nil {
return nil, err
}
indexedBlobHashes := dataAndHashesFromTxs(block.Transactions(), tx)

// Get block header to retrieve timestamp
header, err := d.l1Client.HeaderByNumber(d.ctx, big.NewInt(int64(blockNumber)))
if err != nil {
return nil, err
}
var bts eth.BlobTxSidecar
if len(indexedBlobHashes) != 0 {
bts, err = d.l1BeaconClient.GetBlobSidecar(context.Background(), L1BlockRef{

// Get transaction blob hashes
blobHashes := tx.BlobHashes()
if len(blobHashes) > 0 {
d.logger.Info("Transaction contains blobs", "txHash", txHash, "blobCount", len(blobHashes))

// Initialize indexedBlobHashes as nil
var indexedBlobHashes []IndexedBlobHash

// Only try to build IndexedBlobHash array if not forcing get all blobs
// Try to get the block to build IndexedBlobHash array
block, err := d.l1Client.BlockByNumber(d.ctx, big.NewInt(int64(blockNumber)))
if err == nil {
// Successfully got the block, now build IndexedBlobHash array
d.logger.Info("Building IndexedBlobHash array from block", "blockNumber", blockNumber)
indexedBlobHashes = dataAndHashesFromTxs(block.Transactions(), tx)
d.logger.Info("Built IndexedBlobHash array", "count", len(indexedBlobHashes))
} else {
d.logger.Info("Failed to get block, will try fetching all blobs", "blockNumber", blockNumber, "error", err)
}

// Get all blobs corresponding to this timestamp
blobSidecars, err := d.l1BeaconClient.GetBlobSidecarsEnhanced(d.ctx, L1BlockRef{
Time: header.Time,
}, indexedBlobHashes)
if err != nil {
return nil, fmt.Errorf("getBlockSidecar error:%v", err)
return nil, fmt.Errorf("failed to get blobs, continuing processing:%v", err)
}
if len(blobSidecars) > 0 {
// Create blob sidecar
var blobTxSidecar eth.BlobTxSidecar
matchedCount := 0

// Match blobs
for _, sidecar := range blobSidecars {
var commitment kzg4844.Commitment
copy(commitment[:], sidecar.KZGCommitment[:])
versionedHash := KZGToVersionedHash(commitment)

for _, expectedHash := range blobHashes {
if bytes.Equal(versionedHash[:], expectedHash[:]) {
matchedCount++
d.logger.Info("Found matching blob", "index", sidecar.Index, "hash", versionedHash.Hex())

// Decode and process blob data
var blob Blob
b, err := hexutil.Decode(sidecar.Blob)
if err != nil {
d.logger.Error("Failed to decode blob data", "error", err)
continue
}
copy(blob[:], b)

// Verify blob
if err := VerifyBlobProof(&blob, commitment, kzg4844.Proof(sidecar.KZGProof)); err != nil {
d.logger.Error("Blob verification failed", "error", err)
continue
}

// Add to sidecar
blobTxSidecar.Blobs = append(blobTxSidecar.Blobs, *blob.KZGBlob())
blobTxSidecar.Commitments = append(blobTxSidecar.Commitments, commitment)
blobTxSidecar.Proofs = append(blobTxSidecar.Proofs, kzg4844.Proof(sidecar.KZGProof))
break
}
}
}

d.logger.Info("Blob matching results", "matched", matchedCount, "expected", len(blobHashes))
if matchedCount > 0 {
batch.Sidecar = blobTxSidecar
}
} else {
return nil, fmt.Errorf("not matched blob,txHash:%v,blockNumber:%v", txHash, blockNumber)
}
}
batch.Sidecar = bts

// Get L2 height
l2Height, err := d.l2Client.BlockNumber(d.ctx)
if err != nil {
return nil, fmt.Errorf("query l2 block number error:%v", err)
Expand Down
10 changes: 2 additions & 8 deletions node/derivation/derivation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,10 @@ func TestUnPackData(t *testing.T) {
require.Error(t, err)
legacyTxData, err := hexutil.Decode(legacyData)
require.NoError(t, err)
legacyBatch, err := d.UnPackData(legacyTxData)
require.NoError(t, err)
LegacyBatchInfo := new(BatchInfo)
err = LegacyBatchInfo.ParseBatch(legacyBatch)
_, err = d.UnPackData(legacyTxData)
require.NoError(t, err)
beforeMoveBctxTxData, err := hexutil.Decode(beforeMoveBctxData)
require.NoError(t, err)
beforeMoveBctxBatch, err := d.UnPackData(beforeMoveBctxTxData)
require.NoError(t, err)
beforeMoveBctxBatchInfo := new(BatchInfo)
err = beforeMoveBctxBatchInfo.ParseBatch(beforeMoveBctxBatch)
_, err = d.UnPackData(beforeMoveBctxTxData)
require.NoError(t, err)
}
Loading