diff --git a/node/derivation/batch_info.go b/node/derivation/batch_info.go index 4c1470aa..d795092b 100644 --- a/node/derivation/batch_info.go +++ b/node/derivation/batch_info.go @@ -80,6 +80,9 @@ func (bi *BatchInfo) TxNum() uint64 { // ParseBatch This method is externally referenced for parsing Batch func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error { + if len(batch.Sidecar.Blobs) == 0 { + return fmt.Errorf("blobs length can not be zero") + } parentBatchHeader := types.BatchHeaderBytes(batch.ParentBatchHeader) parentBatchIndex, err := parentBatchHeader.BatchIndex() if err != nil { @@ -104,9 +107,6 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error { return fmt.Errorf("decode batch header version error:%v", err) } if parentVersion == 0 { - if len(batch.Sidecar.Blobs) == 0 { - return fmt.Errorf("blobs length can not be zero") - } blobData, err := types.RetrieveBlobBytes(&batch.Sidecar.Blobs[0]) if err != nil { return err @@ -200,11 +200,9 @@ func (bi *BatchInfo) ParseBatch(batch geth.RPCRollupBatch) error { } var txs []*eth.Transaction var err error - if len(batch.Sidecar.Blobs) != 0 { - txs, err = tq.dequeue(int(block.txsNum) - int(block.l1MsgNum)) - if err != nil { - return fmt.Errorf("decode txsPayload error:%v", err) - } + txs, err = tq.dequeue(int(block.txsNum) - int(block.l1MsgNum)) + if err != nil { + return fmt.Errorf("decode txsPayload error:%v", err) } txsNum += uint64(block.txsNum) l1MsgNum += uint64(block.l1MsgNum) diff --git a/node/derivation/beacon.go b/node/derivation/beacon.go index ab35a373..d94101d6 100644 --- a/node/derivation/beacon.go +++ b/node/derivation/beacon.go @@ -241,3 +241,35 @@ func dataAndHashesFromTxs(txs types.Transactions, targetTx *types.Transaction) [ } return hashes } + +// Note: ForceGetAllBlobs is defined in derivation.go in the same package + +// GetBlobSidecarsEnhanced is an enhanced version of GetBlobSidecars method, combining two approaches to fetch blob data +// If the first method fails or returns no blobs, it will try the second method +func (cl *L1BeaconClient) GetBlobSidecarsEnhanced(ctx context.Context, ref L1BlockRef, hashes []IndexedBlobHash) ([]*BlobSidecar, error) { + // First try using the original GetBlobSidecars method + blobSidecars, err := cl.GetBlobSidecars(ctx, ref, hashes) + if err != nil || len(blobSidecars) == 0 { + // If failed or no blobs retrieved, try the second method + slotFn, err := cl.GetTimeToSlotFn(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get timeToSlotFn: %w", err) + } + + slot, err := slotFn(ref.Time) + if err != nil { + return nil, fmt.Errorf("failed to calculate slot: %w", err) + } + + // Build request URL and use apiReq method directly + method := fmt.Sprintf("%s%d", sidecarsMethodPrefix, slot) + var blobResp APIGetBlobSidecarsResponse + if err := cl.apiReq(ctx, &blobResp, method); err != nil { + return nil, fmt.Errorf("failed to request blob sidecars: %w", err) + } + + return blobResp.Data, nil + } + + return blobSidecars, nil +} diff --git a/node/derivation/beacon_test.go b/node/derivation/beacon_test.go new file mode 100644 index 00000000..2f79aa0c --- /dev/null +++ b/node/derivation/beacon_test.go @@ -0,0 +1 @@ +package derivation diff --git a/node/derivation/derivation.go b/node/derivation/derivation.go index a3fc43fb..95edf84c 100644 --- a/node/derivation/derivation.go +++ b/node/derivation/derivation.go @@ -13,8 +13,10 @@ import ( "github.com/morph-l2/go-ethereum/accounts/abi" "github.com/morph-l2/go-ethereum/accounts/abi/bind" "github.com/morph-l2/go-ethereum/common" + "github.com/morph-l2/go-ethereum/common/hexutil" eth "github.com/morph-l2/go-ethereum/core/types" "github.com/morph-l2/go-ethereum/crypto" + "github.com/morph-l2/go-ethereum/crypto/kzg4844" geth "github.com/morph-l2/go-ethereum/eth" "github.com/morph-l2/go-ethereum/ethclient" "github.com/morph-l2/go-ethereum/ethclient/authclient" @@ -297,26 +299,90 @@ func (d *Derivation) fetchRollupDataByTxHash(txHash common.Hash, blockNumber uin if err != nil { return nil, err } - // query blob - block, err := d.l1Client.BlockByNumber(d.ctx, big.NewInt(int64(blockNumber))) - if err != nil { - return nil, err - } - indexedBlobHashes := dataAndHashesFromTxs(block.Transactions(), tx) + + // Get block header to retrieve timestamp header, err := d.l1Client.HeaderByNumber(d.ctx, big.NewInt(int64(blockNumber))) if err != nil { return nil, err } - var bts eth.BlobTxSidecar - if len(indexedBlobHashes) != 0 { - bts, err = d.l1BeaconClient.GetBlobSidecar(context.Background(), L1BlockRef{ + + // Get transaction blob hashes + blobHashes := tx.BlobHashes() + if len(blobHashes) > 0 { + d.logger.Info("Transaction contains blobs", "txHash", txHash, "blobCount", len(blobHashes)) + + // Initialize indexedBlobHashes as nil + var indexedBlobHashes []IndexedBlobHash + + // Only try to build IndexedBlobHash array if not forcing get all blobs + // Try to get the block to build IndexedBlobHash array + block, err := d.l1Client.BlockByNumber(d.ctx, big.NewInt(int64(blockNumber))) + if err == nil { + // Successfully got the block, now build IndexedBlobHash array + d.logger.Info("Building IndexedBlobHash array from block", "blockNumber", blockNumber) + indexedBlobHashes = dataAndHashesFromTxs(block.Transactions(), tx) + d.logger.Info("Built IndexedBlobHash array", "count", len(indexedBlobHashes)) + } else { + d.logger.Info("Failed to get block, will try fetching all blobs", "blockNumber", blockNumber, "error", err) + } + + // Get all blobs corresponding to this timestamp + blobSidecars, err := d.l1BeaconClient.GetBlobSidecarsEnhanced(d.ctx, L1BlockRef{ Time: header.Time, }, indexedBlobHashes) if err != nil { - return nil, fmt.Errorf("getBlockSidecar error:%v", err) + return nil, fmt.Errorf("failed to get blobs, continuing processing:%v", err) + } + if len(blobSidecars) > 0 { + // Create blob sidecar + var blobTxSidecar eth.BlobTxSidecar + matchedCount := 0 + + // Match blobs + for _, sidecar := range blobSidecars { + var commitment kzg4844.Commitment + copy(commitment[:], sidecar.KZGCommitment[:]) + versionedHash := KZGToVersionedHash(commitment) + + for _, expectedHash := range blobHashes { + if bytes.Equal(versionedHash[:], expectedHash[:]) { + matchedCount++ + d.logger.Info("Found matching blob", "index", sidecar.Index, "hash", versionedHash.Hex()) + + // Decode and process blob data + var blob Blob + b, err := hexutil.Decode(sidecar.Blob) + if err != nil { + d.logger.Error("Failed to decode blob data", "error", err) + continue + } + copy(blob[:], b) + + // Verify blob + if err := VerifyBlobProof(&blob, commitment, kzg4844.Proof(sidecar.KZGProof)); err != nil { + d.logger.Error("Blob verification failed", "error", err) + continue + } + + // Add to sidecar + blobTxSidecar.Blobs = append(blobTxSidecar.Blobs, *blob.KZGBlob()) + blobTxSidecar.Commitments = append(blobTxSidecar.Commitments, commitment) + blobTxSidecar.Proofs = append(blobTxSidecar.Proofs, kzg4844.Proof(sidecar.KZGProof)) + break + } + } + } + + d.logger.Info("Blob matching results", "matched", matchedCount, "expected", len(blobHashes)) + if matchedCount > 0 { + batch.Sidecar = blobTxSidecar + } + } else { + return nil, fmt.Errorf("not matched blob,txHash:%v,blockNumber:%v", txHash, blockNumber) } } - batch.Sidecar = bts + + // Get L2 height l2Height, err := d.l2Client.BlockNumber(d.ctx) if err != nil { return nil, fmt.Errorf("query l2 block number error:%v", err) diff --git a/node/derivation/derivation_test.go b/node/derivation/derivation_test.go index 653ce96c..69eb750d 100644 --- a/node/derivation/derivation_test.go +++ b/node/derivation/derivation_test.go @@ -34,16 +34,10 @@ func TestUnPackData(t *testing.T) { require.Error(t, err) legacyTxData, err := hexutil.Decode(legacyData) require.NoError(t, err) - legacyBatch, err := d.UnPackData(legacyTxData) - require.NoError(t, err) - LegacyBatchInfo := new(BatchInfo) - err = LegacyBatchInfo.ParseBatch(legacyBatch) + _, err = d.UnPackData(legacyTxData) require.NoError(t, err) beforeMoveBctxTxData, err := hexutil.Decode(beforeMoveBctxData) require.NoError(t, err) - beforeMoveBctxBatch, err := d.UnPackData(beforeMoveBctxTxData) - require.NoError(t, err) - beforeMoveBctxBatchInfo := new(BatchInfo) - err = beforeMoveBctxBatchInfo.ParseBatch(beforeMoveBctxBatch) + _, err = d.UnPackData(beforeMoveBctxTxData) require.NoError(t, err) }