Merge pull request #1398 from trezor/utxo-reorg-fix

Utxo reorg detection fix
This commit is contained in:
pragmaxim
2026-02-10 10:45:55 +01:00
committed by GitHub
21 changed files with 175 additions and 13 deletions

View File

@@ -297,6 +297,7 @@ func (p *BitcoinLikeParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: w.Header.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: w.Header.Timestamp.Unix(),
},

View File

@@ -0,0 +1,111 @@
//go:build integration
package btc
import (
"encoding/json"
"testing"
"github.com/trezor/blockbook/bchain"
)
const blockHeightLag = 100
func newTestBitcoinRPC(t *testing.T) *BitcoinRPC {
t.Helper()
cfg := bchain.LoadBlockchainCfg(t, "bitcoin")
config := Configuration{
RPCURL: cfg.RpcUrl,
RPCUser: cfg.RpcUser,
RPCPass: cfg.RpcPass,
RPCTimeout: cfg.RpcTimeout,
Parse: cfg.Parse,
}
raw, err := json.Marshal(config)
if err != nil {
t.Fatalf("marshal config: %v", err)
}
chain, err := NewBitcoinRPC(raw, nil)
if err != nil {
t.Fatalf("new bitcoin rpc: %v", err)
}
rpcClient, ok := chain.(*BitcoinRPC)
if !ok {
t.Fatalf("unexpected rpc client type %T", chain)
}
if err := rpcClient.Initialize(); err != nil {
t.Skipf("skipping: cannot connect to RPC at %s: %v", cfg.RpcUrl, err)
return nil
}
return rpcClient
}
func assertBlockBasics(t *testing.T, block *bchain.Block, hash string, height uint32) {
t.Helper()
if block.Hash != hash {
t.Fatalf("hash mismatch: got %s want %s", block.Hash, hash)
}
if block.Height != height {
t.Fatalf("height mismatch: got %d want %d", block.Height, height)
}
if block.Time <= 0 {
t.Fatalf("expected block time > 0, got %d", block.Time)
}
}
// TestBitcoinRPCGetBlockIntegration validates GetBlock by hash/height and checks
// previous hash availability for fork detection.
func TestBitcoinRPCGetBlockIntegration(t *testing.T) {
rpcClient := newTestBitcoinRPC(t)
if rpcClient == nil {
return
}
best, err := rpcClient.GetBestBlockHeight()
if err != nil {
t.Fatalf("GetBestBlockHeight: %v", err)
}
if best <= blockHeightLag {
t.Skipf("best height %d too low for lag %d", best, blockHeightLag)
return
}
height := best - blockHeightLag
if height == 0 {
t.Skip("block height is zero, cannot validate previous hash")
return
}
hash, err := rpcClient.GetBlockHash(height)
if err != nil {
t.Fatalf("GetBlockHash height %d: %v", height, err)
}
prevHash, err := rpcClient.GetBlockHash(height - 1)
if err != nil {
t.Fatalf("GetBlockHash height %d: %v", height-1, err)
}
blockByHash, err := rpcClient.GetBlock(hash, 0)
if err != nil {
t.Fatalf("GetBlock by hash: %v", err)
}
assertBlockBasics(t, blockByHash, hash, height)
if blockByHash.Confirmations <= 0 {
t.Fatalf("expected confirmations > 0, got %d", blockByHash.Confirmations)
}
if blockByHash.Prev != prevHash {
t.Fatalf("previous hash mismatch: got %s want %s", blockByHash.Prev, prevHash)
}
blockByHeight, err := rpcClient.GetBlock("", height)
if err != nil {
t.Fatalf("GetBlock by height: %v", err)
}
assertBlockBasics(t, blockByHeight, hash, height)
if blockByHeight.Prev != prevHash {
t.Fatalf("previous hash mismatch by height: got %s want %s", blockByHeight.Prev, prevHash)
}
if len(blockByHeight.Txs) != len(blockByHash.Txs) {
t.Fatalf("tx count mismatch: by hash %d vs by height %d", len(blockByHash.Txs), len(blockByHeight.Txs))
}
}

View File

@@ -83,12 +83,18 @@ func GetChainParams(chain string) *chaincfg.Params {
// headerFixedLength is the length of fixed fields of a block (i.e. without solution)
// see https://github.com/BTCGPU/BTCGPU/wiki/Technical-Spec#block-header
const headerFixedLength = 44 + (chainhash.HashSize * 3)
const prevHashOffset = 4
const timestampOffset = 100
const timestampLength = 4
// ParseBlock parses raw block to our Block struct
func (p *BGoldParser) ParseBlock(b []byte) (*bchain.Block, error) {
r := bytes.NewReader(b)
prev, err := readPrevBlockHash(r)
if err != nil {
return nil, err
}
time, err := getTimestampAndSkipHeader(r, 0)
if err != nil {
return nil, err
@@ -107,6 +113,7 @@ func (p *BGoldParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: prev, // needed for fork detection when parsing raw blocks
Size: len(b),
Time: time,
},
@@ -114,6 +121,21 @@ func (p *BGoldParser) ParseBlock(b []byte) (*bchain.Block, error) {
}, nil
}
func readPrevBlockHash(r io.ReadSeeker) (string, error) {
// Read prev hash directly so fork detection still works with raw parsing.
if _, err := r.Seek(prevHashOffset, io.SeekStart); err != nil {
// Return the seek error when the header layout can't be accessed.
return "", err
}
var prevHash chainhash.Hash
if _, err := io.ReadFull(r, prevHash[:]); err != nil {
// Return read errors for truncated or malformed headers.
return "", err
}
// Return the canonical display string for comparison in sync logic.
return prevHash.String(), nil
}
func getTimestampAndSkipHeader(r io.ReadSeeker, pver uint32) (int64, error) {
_, err := r.Seek(timestampOffset, io.SeekStart)
if err != nil {

View File

@@ -25,12 +25,14 @@ type testBlock struct {
size int
time int64
txs []string
prev string
}
var testParseBlockTxs = map[int]testBlock{
104000: {
size: 15776,
time: 1295705889,
prev: "00000000000138de0496607bfc85ec4bfcebb6de0ff30048dd4bc4b12da48997",
txs: []string{
"331d4ef64118e9e5be75f0f51f1a4c5057550c3320e22ff7206f3e1101f113d0",
"1f4817d8e91c21d8c8d163dabccdd1875f760fd2dc34a1c2b7b8fa204e103597",
@@ -84,6 +86,7 @@ var testParseBlockTxs = map[int]testBlock{
532144: {
size: 12198,
time: 1528372417,
prev: "0000000048de525aea2af2ac305a7b196222fc327a34298f45110e378f838dce",
txs: []string{
"574348e23301cc89535408b6927bf75f2ac88fadf8fdfb181c17941a5de02fe0",
"9f048446401e7fac84963964df045b1f3992eda330a87b02871e422ff0a3fd28",
@@ -143,6 +146,10 @@ func TestParseBlock(t *testing.T) {
t.Errorf("ParseBlock() block time: got %d, want %d", blk.Time, tb.time)
}
if blk.Prev != tb.prev {
t.Errorf("ParseBlock() prev hash: got %s, want %s", blk.Prev, tb.prev)
}
if len(blk.Txs) != len(tb.txs) {
t.Errorf("ParseBlock() number of transactions: got %d, want %d", len(blk.Txs), len(tb.txs))
}

View File

@@ -119,6 +119,7 @@ func (p *DecredParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -99,6 +99,7 @@ func (p *DivicoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -92,6 +92,7 @@ func (p *DogecoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -271,6 +271,7 @@ func (p *FiroParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: header.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: header.Timestamp.Unix(),
},

View File

@@ -731,6 +731,7 @@ func TestParseBlock(t *testing.T) {
},
want: &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: "a3b419a943bdc31aba65d40fc71f12ceb4ef2edcf1c8bd6d83b839261387e0d9",
Size: 200286,
Time: 1547120622,
},
@@ -746,6 +747,7 @@ func TestParseBlock(t *testing.T) {
},
want: &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: "0fb6e382a25a9e298a533237f359cb6cd86a99afb8d98e3d981e650fd5012c00",
Size: 25298,
Time: 1482107572,
},
@@ -761,6 +763,7 @@ func TestParseBlock(t *testing.T) {
},
want: &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: "12c117c25e52f71e8863eadd0ccc7cd7d45e7ef907cfadf99ca4b4d390cb1a0a",
Size: 200062,
Time: 1591752749,
},

View File

@@ -105,6 +105,7 @@ func (p *MonetaryUnitParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -85,6 +85,7 @@ func (p *MyriadParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -82,6 +82,7 @@ func (p *NamecoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -112,6 +112,7 @@ func (p *OmotenashiCoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -118,6 +118,7 @@ func (p *PivXParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -124,6 +124,7 @@ func (p *QtumParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -85,6 +85,7 @@ func (p *RitocoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -82,6 +82,7 @@ func (p *UnobtaniumParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -99,6 +99,7 @@ func (p *ViacoinParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -120,6 +120,7 @@ func (p *VIPSTARCOINParser) ParseBlock(b []byte) (*bchain.Block, error) {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Prev: h.PrevBlock.String(), // needed for fork detection when parsing raw blocks
Size: len(b),
Time: h.Timestamp.Unix(),
},

View File

@@ -17,8 +17,12 @@ import (
// BlockchainCfg contains fields read from blockbook's blockchaincfg.json after being rendered from templates.
type BlockchainCfg struct {
// more fields can be added later as needed
RpcUrl string `json:"rpc_url"`
RpcUrlWs string `json:"rpc_url_ws"`
RpcUrl string `json:"rpc_url"`
RpcUrlWs string `json:"rpc_url_ws"`
RpcUser string `json:"rpc_user"`
RpcPass string `json:"rpc_pass"`
RpcTimeout int `json:"rpc_timeout"`
Parse bool `json:"parse"`
}
// LoadBlockchainCfg returns the resolved blockchaincfg.json (env overrides are honored in tests)

View File

@@ -456,26 +456,26 @@ func (m *MempoolBitcoinType) Resync() (count int, err error) {
if mempoolSize > 0 {
avgPerTx = totalDuration / time.Duration(mempoolSize)
}
var cacheHits uint64
var cacheMisses uint64
var cacheHitRate float64
if cache := m.getResyncOutpointCache(); cache != nil {
outpointCacheEntries = cache.len()
cacheHits, cacheMisses = cache.stats()
total := cacheHits + cacheMisses
if total > 0 {
cacheHitRate = float64(cacheHits) / float64(total)
}
}
listDurationRounded := roundDuration(listDuration, time.Millisecond)
processDurationRounded := roundDuration(processDuration, time.Millisecond)
totalDurationRounded := roundDuration(totalDuration, time.Millisecond)
avgPerTxRounded := roundDuration(avgPerTx, time.Microsecond)
hitRateText := fmt.Sprintf("%.3f", cacheHitRate)
if err != nil {
glog.Warning("mempool: resync failed size=", mempoolSize, " missing=", missingCount, " outpoint_cache_entries=", outpointCacheEntries, " batch_size=", batchSize, " batch_workers=", batchWorkers, " list_duration=", listDurationRounded, " process_duration=", processDurationRounded, " duration=", totalDurationRounded, " avg_per_tx=", avgPerTxRounded, " err=", err)
glog.Warning("mempool: resync failed size=", mempoolSize, " missing=", missingCount, " outpoint_cache_entries=", outpointCacheEntries, " outpoint_cache_hits=", cacheHits, " outpoint_cache_misses=", cacheMisses, " outpoint_cache_hit_rate=", hitRateText, " batch_size=", batchSize, " batch_workers=", batchWorkers, " list_duration=", listDurationRounded, " process_duration=", processDurationRounded, " duration=", totalDurationRounded, " avg_per_tx=", avgPerTxRounded, " err=", err)
} else {
glog.Info("mempool: resync finished size=", mempoolSize, " missing=", missingCount, " outpoint_cache_entries=", outpointCacheEntries, " batch_size=", batchSize, " batch_workers=", batchWorkers, " list_duration=", listDurationRounded, " process_duration=", processDurationRounded, " duration=", totalDurationRounded, " avg_per_tx=", avgPerTxRounded)
}
if cache := m.getResyncOutpointCache(); cache != nil {
hits, misses := cache.stats()
total := hits + misses
hitRate := 0.0
if total > 0 {
hitRate = float64(hits) / float64(total)
}
glog.Info("mempool: resync outpoint cache hits=", hits, " misses=", misses, " hit_rate=", fmt.Sprintf("%.3f", hitRate))
glog.Info("mempool: resync finished size=", mempoolSize, " missing=", missingCount, " outpoint_cache_entries=", outpointCacheEntries, " outpoint_cache_hits=", cacheHits, " outpoint_cache_misses=", cacheMisses, " outpoint_cache_hit_rate=", hitRateText, " batch_size=", batchSize, " batch_workers=", batchWorkers, " list_duration=", listDurationRounded, " process_duration=", processDurationRounded, " duration=", totalDurationRounded, " avg_per_tx=", avgPerTxRounded)
}
m.resyncOutpoints.Store((*resyncOutpointCache)(nil))
}()