Merge branch 'master' into verifier

This commit is contained in:
Martin Boehm
2018-05-04 12:40:20 +02:00
67 changed files with 2583 additions and 469 deletions

8
Gopkg.lock generated
View File

@@ -31,6 +31,12 @@
packages = [".","base58","bech32"]
revision = "501929d3d046174c3d39f0ea54ece471aa17238c"
[[projects]]
branch = "master"
name = "github.com/cpacia/bchutil"
packages = ["."]
revision = "12e86f41eb040d3b85b5d8e3a3a4bed035517c52"
[[projects]]
name = "github.com/ethereum/go-ethereum"
packages = [".","common","common/hexutil","common/math","core/types","crypto","crypto/secp256k1","crypto/sha3","ethclient","ethdb","log","metrics","params","rlp","rpc","trie"]
@@ -184,6 +190,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "47ed30394b51a48d76267e128d24a1b110e0c018cf7dfb41881511a4f2a1a866"
inputs-digest = "3e3bcaeb80d40bd8073342d32dbc57e4266fba7b8dfa00fc90bc6184e03ab96f"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -72,3 +72,7 @@
[[constraint]]
name = "github.com/golang/protobuf"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/cpacia/bchutil"

View File

@@ -14,6 +14,9 @@ build-debug: .bin-image
test: .bin-image
docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src $(BIN_IMAGE) make test
test-all: .bin-image
docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src $(BIN_IMAGE) make test-all
deb: .deb-image
docker run -t --rm -e PACKAGER=$(PACKAGER) -v $(CURDIR):/src -v $(CURDIR)/build:/out $(DEB_IMAGE)

View File

@@ -2,6 +2,7 @@ package bchain
import (
"encoding/hex"
"fmt"
"github.com/gogo/protobuf/proto"
"github.com/juju/errors"
@@ -35,8 +36,16 @@ func (p *BaseParser) PackedTxidLen() int {
return 32
}
// KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column
func (p *BaseParser) KeepBlockAddresses() int {
return 100
}
// PackTxid packs txid to byte array
func (p *BaseParser) PackTxid(txid string) ([]byte, error) {
if txid == "" {
return nil, ErrTxidMissing
}
return hex.DecodeString(txid)
}
@@ -149,6 +158,9 @@ func (p *BaseParser) UnpackTx(buf []byte) (*Tx, uint32, error) {
},
Value: pto.Value,
}
if len(pto.Addresses) == 1 {
vout[i].Address = NewBaseAddress(pto.Addresses[0])
}
}
tx := Tx{
Blocktime: int64(pt.Blocktime),
@@ -161,3 +173,43 @@ func (p *BaseParser) UnpackTx(buf []byte) (*Tx, uint32, error) {
}
return &tx, pt.Height, nil
}
type baseAddress struct {
addr string
}
func NewBaseAddress(addr string) Address {
return &baseAddress{addr: addr}
}
func (a baseAddress) String() string {
return a.addr
}
func (a baseAddress) EncodeAddress(format AddressFormat) (string, error) {
if format != DefaultAddress {
return "", fmt.Errorf("Unknown address format: %d", format)
}
return a.addr, nil
}
func (a baseAddress) AreEqual(addr string) (bool, error) {
ea, err := a.EncodeAddress(0)
if err != nil {
return false, err
}
return ea == addr, nil
}
func (a baseAddress) InSlice(addrs []string) (bool, error) {
for _, addr := range addrs {
eq, err := a.AreEqual(addr)
if err != nil {
return false, err
}
if eq {
return true, nil
}
}
return false, nil
}

View File

@@ -0,0 +1,165 @@
package bch
import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"fmt"
"strings"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcutil"
"github.com/cpacia/bchutil"
)
var prefixes = []string{"bitcoincash", "bchtest", "bchreg"}
// BCashParser handle
type BCashParser struct {
*btc.BitcoinParser
}
// GetChainParams contains network parameters for the main Bitcoin Cash network,
// the regression test Bitcoin Cash network, the test Bitcoin Cash network and
// the simulation test Bitcoin Cash network, in this order
func GetChainParams(chain string) *chaincfg.Params {
var params *chaincfg.Params
switch chain {
case "test":
params = &chaincfg.TestNet3Params
params.Net = bchutil.TestnetMagic
case "regtest":
params = &chaincfg.RegressionNetParams
params.Net = bchutil.Regtestmagic
default:
params = &chaincfg.MainNetParams
params.Net = bchutil.MainnetMagic
}
return params
}
// GetAddrIDFromAddress returns internal address representation of given address
func (p *BCashParser) GetAddrIDFromAddress(address string) ([]byte, error) {
return p.AddressToOutputScript(address)
}
// AddressToOutputScript converts bitcoin address to ScriptPubKey
func (p *BCashParser) AddressToOutputScript(address string) ([]byte, error) {
if isCashAddr(address) {
da, err := bchutil.DecodeAddress(address, p.Params)
if err != nil {
return nil, err
}
script, err := bchutil.PayToAddrScript(da)
if err != nil {
return nil, err
}
return script, nil
} else {
da, err := btcutil.DecodeAddress(address, p.Params)
if err != nil {
return nil, err
}
script, err := txscript.PayToAddrScript(da)
if err != nil {
return nil, err
}
return script, nil
}
}
func isCashAddr(addr string) bool {
slice := strings.Split(addr, ":")
if len(slice) != 2 {
return false
}
for _, prefix := range prefixes {
if slice[0] == prefix {
return true
}
}
return false
}
func (p *BCashParser) UnpackTx(buf []byte) (tx *bchain.Tx, height uint32, err error) {
tx, height, err = p.BitcoinParser.UnpackTx(buf)
if err != nil {
return
}
for i, vout := range tx.Vout {
if len(vout.ScriptPubKey.Addresses) == 1 {
tx.Vout[i].Address = &bcashAddress{
addr: vout.ScriptPubKey.Addresses[0],
net: p.Params,
}
}
}
return
}
type bcashAddress struct {
addr string
net *chaincfg.Params
}
func (a *bcashAddress) String() string {
return a.addr
}
func (a *bcashAddress) EncodeAddress(format bchain.AddressFormat) (string, error) {
switch format {
case bchain.DefaultAddress:
return a.String(), nil
case bchain.BCashAddress:
da, err := btcutil.DecodeAddress(a.addr, a.net)
if err != nil {
return "", err
}
var ca btcutil.Address
switch da := da.(type) {
case *btcutil.AddressPubKeyHash:
ca, err = bchutil.NewCashAddressPubKeyHash(da.Hash160()[:], a.net)
case *btcutil.AddressScriptHash:
ca, err = bchutil.NewCashAddressScriptHash(da.Hash160()[:], a.net)
default:
err = fmt.Errorf("Unknown address type: %T", da)
}
if err != nil {
return "", err
}
return ca.String(), nil
default:
return "", fmt.Errorf("Unknown address format: %d", format)
}
}
func (a *bcashAddress) AreEqual(addr string) (bool, error) {
var format bchain.AddressFormat
if isCashAddr(addr) {
format = bchain.BCashAddress
} else {
format = bchain.DefaultAddress
}
ea, err := a.EncodeAddress(format)
if err != nil {
return false, err
}
return ea == addr, nil
}
func (a *bcashAddress) InSlice(addrs []string) (bool, error) {
for _, addr := range addrs {
eq, err := a.AreEqual(addr)
if err != nil {
return false, err
}
if eq {
return true, nil
}
}
return false, nil
}

View File

@@ -0,0 +1,250 @@
package bch
import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"bytes"
"encoding/hex"
"reflect"
"testing"
)
func TestBcashAddressEncodeAddress(t *testing.T) {
addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")}
got1, err := addr.EncodeAddress(bchain.DefaultAddress)
if err != nil {
t.Errorf("EncodeAddress() error = %v", err)
return
}
if got1 != "13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji" {
t.Errorf("EncodeAddress() got1 = %v, want %v", got1, "13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji")
}
got2, err := addr.EncodeAddress(bchain.BCashAddress)
if err != nil {
t.Errorf("EncodeAddress() error = %v", err)
return
}
if got2 != "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf" {
t.Errorf("EncodeAddress() got2 = %v, want %v", got2, "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf")
}
}
func TestBcashAddressAreEqual(t *testing.T) {
addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")}
got1, err := addr.AreEqual("13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji")
if err != nil {
t.Errorf("AreEqual() error = %v", err)
return
}
if got1 != true {
t.Errorf("AreEqual() got1 = %v, want %v", got1, true)
}
got2, err := addr.AreEqual("bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf")
if err != nil {
t.Errorf("AreEqual() error = %v", err)
return
}
if got2 != true {
t.Errorf("AreEqual() got2 = %v, want %v", got2, true)
}
got3, err := addr.AreEqual("1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w")
if err != nil {
t.Errorf("AreEqual() error = %v", err)
return
}
if got3 != false {
t.Errorf("AreEqual() got3 = %v, want %v", got3, false)
}
got4, err := addr.AreEqual("bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch")
if err != nil {
t.Errorf("AreEqual() error = %v", err)
return
}
if got4 != false {
t.Errorf("AreEqual() got4 = %v, want %v", got4, false)
}
}
func TestBcashAddressInSlice(t *testing.T) {
addr := bcashAddress{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", GetChainParams("main")}
got1, err := addr.InSlice([]string{"13zMwGC5bxRn9ckJ1mgxf7UR8qbbNe2iji", "bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch"})
if err != nil {
t.Errorf("InSlice() error = %v", err)
return
}
if got1 != true {
t.Errorf("InSlice() got1 = %v, want %v", got1, true)
}
got2, err := addr.InSlice([]string{"1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w", "bitcoincash:qqsvjuqqwgyzvz7zz9xcvxent0ul2xjs6y4d9qvsrf"})
if err != nil {
t.Errorf("InSlice() error = %v", err)
return
}
if got2 != true {
t.Errorf("InSlice() got2 = %v, want %v", got2, true)
}
got3, err := addr.InSlice([]string{"1HoKgKQh7ZNomWURmS9Tk3z8JM2MWm7S1w", "1E6Np6dUPYpBSdLMLuwBF8sRQ3cngdaRRY"})
if err != nil {
t.Errorf("InSlice() error = %v", err)
return
}
if got3 != false {
t.Errorf("InSlice() got3 = %v, want %v", got3, false)
}
got4, err := addr.InSlice([]string{"bitcoincash:qzuyf0gpqj7q5wfck3nyghhklju7r0k3ksmq6d0vch", "bitcoincash:qz8emmpenqgeg7et8xsz8prvhy6cqcalyyjcamt7e9"})
if err != nil {
t.Errorf("InSlice() error = %v", err)
return
}
if got4 != false {
t.Errorf("InSlice() got4 = %v, want %v", got4, false)
}
}
func TestAddressToOutputScript(t *testing.T) {
parser := BCashParser{&btc.BitcoinParser{Params: GetChainParams("test")}}
want, err := hex.DecodeString("76a9144fa927fd3bcf57d4e3c582c3d2eb2bd3df8df47c88ac")
if err != nil {
panic(err)
}
got1, err := parser.AddressToOutputScript("mnnAKPTSrWjgoi3uEYaQkHA1QEC5btFeBr")
if err != nil {
t.Errorf("AddressToOutputScript() error = %v", err)
return
}
if !bytes.Equal(got1, want) {
t.Errorf("AddressToOutputScript() got1 = %v, want %v", got1, want)
}
got2, err := parser.AddressToOutputScript("bchtest:qp86jfla8084048rckpv85ht90falr050s03ejaesm")
if err != nil {
t.Errorf("AddressToOutputScript() error = %v", err)
return
}
if !bytes.Equal(got2, want) {
t.Errorf("AddressToOutputScript() got2 = %v, want %v", got2, want)
}
}
var testTx1 = bchain.Tx{
Hex: "01000000017f9a22c9cbf54bd902400df746f138f37bcf5b4d93eb755820e974ba43ed5f42040000006a4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80feffffff019c9700000000000017a9146144d57c8aff48492c9dfb914e120b20bad72d6f8773d00700",
Blocktime: 1519053802,
Txid: "056e3d82e5ffd0e915fb9b62797d76263508c34fe3e5dbed30dd3e943930f204",
LockTime: 512115,
Vin: []bchain.Vin{
{
ScriptSig: bchain.ScriptSig{
Hex: "4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80",
},
Txid: "425fed43ba74e9205875eb934d5bcf7bf338f146f70d4002d94bf5cbc9229a7f",
Vout: 4,
Sequence: 4294967294,
},
},
Vout: []bchain.Vout{
{
Value: 0.00038812,
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: "a9146144d57c8aff48492c9dfb914e120b20bad72d6f87",
Addresses: []string{
"3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK",
},
},
Address: &bcashAddress{"3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK", GetChainParams("main")},
},
},
}
var testTxPacked1 = "0001e2408ba8d7af5401000000017f9a22c9cbf54bd902400df746f138f37bcf5b4d93eb755820e974ba43ed5f42040000006a4730440220037f4ed5427cde81d55b9b6a2fd08c8a25090c2c2fff3a75c1a57625ca8a7118022076c702fe55969fa08137f71afd4851c48e31082dd3c40c919c92cdbc826758d30121029f6da5623c9f9b68a9baf9c1bc7511df88fa34c6c2f71f7c62f2f03ff48dca80feffffff019c9700000000000017a9146144d57c8aff48492c9dfb914e120b20bad72d6f8773d00700"
var testTx2 = bchain.Tx{
Hex: "010000000001019d64f0c72a0d206001decbffaa722eb1044534c74eee7a5df8318e42a4323ec10000000017160014550da1f5d25a9dae2eafd6902b4194c4c6500af6ffffffff02809698000000000017a914cd668d781ece600efa4b2404dc91fd26b8b8aed8870553d7360000000017a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a8702473044022076aba4ad559616905fa51d4ddd357fc1fdb428d40cb388e042cdd1da4a1b7357022011916f90c712ead9a66d5f058252efd280439ad8956a967e95d437d246710bc9012102a80a5964c5612bb769ef73147b2cf3c149bc0fd4ecb02f8097629c94ab013ffd00000000",
Blocktime: 1235678901,
Txid: "474e6795760ebe81cb4023dc227e5a0efe340e1771c89a0035276361ed733de7",
LockTime: 0,
Vin: []bchain.Vin{
{
ScriptSig: bchain.ScriptSig{
Hex: "160014550da1f5d25a9dae2eafd6902b4194c4c6500af6",
},
Txid: "c13e32a4428e31f85d7aee4ec7344504b12e72aaffcbde0160200d2ac7f0649d",
Vout: 0,
Sequence: 4294967295,
},
},
Vout: []bchain.Vout{
{
Value: .1,
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: "a914cd668d781ece600efa4b2404dc91fd26b8b8aed887",
Addresses: []string{
"2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu",
},
},
Address: &bcashAddress{"2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu", GetChainParams("test")},
},
{
Value: 9.20081157,
N: 1,
ScriptPubKey: bchain.ScriptPubKey{
Hex: "a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a87",
Addresses: []string{
"2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D",
},
},
Address: &bcashAddress{"2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D", GetChainParams("test")},
},
},
}
var testTxPacked2 = "0007c91a899ab7da6a010000000001019d64f0c72a0d206001decbffaa722eb1044534c74eee7a5df8318e42a4323ec10000000017160014550da1f5d25a9dae2eafd6902b4194c4c6500af6ffffffff02809698000000000017a914cd668d781ece600efa4b2404dc91fd26b8b8aed8870553d7360000000017a914246655bdbd54c7e477d0ea2375e86e0db2b8f80a8702473044022076aba4ad559616905fa51d4ddd357fc1fdb428d40cb388e042cdd1da4a1b7357022011916f90c712ead9a66d5f058252efd280439ad8956a967e95d437d246710bc9012102a80a5964c5612bb769ef73147b2cf3c149bc0fd4ecb02f8097629c94ab013ffd00000000"
func Test_UnpackTx(t *testing.T) {
type args struct {
packedTx string
parser *BCashParser
}
tests := []struct {
name string
args args
want *bchain.Tx
want1 uint32
wantErr bool
}{
{
name: "btc-1",
args: args{
packedTx: testTxPacked1,
parser: &BCashParser{&btc.BitcoinParser{Params: GetChainParams("main")}},
},
want: &testTx1,
want1: 123456,
wantErr: false,
},
{
name: "testnet-1",
args: args{
packedTx: testTxPacked2,
parser: &BCashParser{&btc.BitcoinParser{Params: GetChainParams("test")}},
},
want: &testTx2,
want1: 510234,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b, _ := hex.DecodeString(tt.args.packedTx)
got, got1, err := tt.args.parser.UnpackTx(b)
if (err != nil) != tt.wantErr {
t.Errorf("unpackTx() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("unpackTx() got = %v, want %v", got, tt.want)
}
if got1 != tt.want1 {
t.Errorf("unpackTx() got1 = %v, want %v", got1, tt.want1)
}
})
}
}

View File

@@ -0,0 +1,216 @@
package bch
import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"encoding/hex"
"encoding/json"
"github.com/cpacia/bchutil"
"github.com/golang/glog"
"github.com/juju/errors"
)
// BCashRPC is an interface to JSON-RPC bitcoind service.
type BCashRPC struct {
*btc.BitcoinRPC
}
// NewBCashRPC returns new BCashRPC instance.
func NewBCashRPC(config json.RawMessage, pushHandler func(bchain.NotificationType)) (bchain.BlockChain, error) {
b, err := btc.NewBitcoinRPC(config, pushHandler)
if err != nil {
return nil, err
}
s := &BCashRPC{
b.(*btc.BitcoinRPC),
}
return s, nil
}
func (b *BCashRPC) Initialize() error {
b.Mempool = bchain.NewUTXOMempool(b)
chainName, err := b.GetBlockChainInfo()
if err != nil {
return err
}
params := GetChainParams(chainName)
// always create parser
b.Parser = &BCashParser{
&btc.BitcoinParser{
Params: params,
},
}
// parameters for getInfo request
if params.Net == bchutil.MainnetMagic {
b.Testnet = false
b.Network = "livenet"
} else {
b.Testnet = true
b.Network = "testnet"
}
glog.Info("rpc: block chain ", params.Name)
return nil
}
// getblock
type cmdGetBlock struct {
Method string `json:"method"`
Params struct {
BlockHash string `json:"blockhash"`
Verbose bool `json:"verbose"`
} `json:"params"`
}
type resGetBlockRaw struct {
Error *bchain.RPCError `json:"error"`
Result string `json:"result"`
}
type resGetBlockThin struct {
Error *bchain.RPCError `json:"error"`
Result bchain.ThinBlock `json:"result"`
}
// estimatesmartfee
type cmdEstimateSmartFee struct {
Method string `json:"method"`
Params struct {
Blocks int `json:"nblocks"`
} `json:"params"`
}
type resEstimateSmartFee struct {
Error *bchain.RPCError `json:"error"`
Result struct {
Feerate float64 `json:"feerate"`
Blocks int `json:"blocks"`
} `json:"result"`
}
// GetBlock returns block with given hash.
func (b *BCashRPC) GetBlock(hash string, height uint32) (*bchain.Block, error) {
var err error
if hash == "" && height > 0 {
hash, err = b.GetBlockHash(height)
if err != nil {
return nil, err
}
}
// XXX
// // optimization
// if height > 0 {
// return b.getBlockWithoutHeader(hash, height)
// }
header, err := b.GetBlockHeader(hash)
if err != nil {
return nil, err
}
data, err := b.GetBlockRaw(hash)
if err != nil {
return nil, err
}
block, err := b.Parser.ParseBlock(data)
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
}
block.BlockHeader = *header
return block, nil
}
// GetBlockRaw returns block with given hash as bytes.
func (b *BCashRPC) GetBlockRaw(hash string) ([]byte, error) {
glog.V(1).Info("rpc: getblock (verbose=0) ", hash)
res := resGetBlockRaw{}
req := cmdGetBlock{Method: "getblock"}
req.Params.BlockHash = hash
req.Params.Verbose = false
err := b.Call(&req, &res)
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
}
if res.Error != nil {
if isErrBlockNotFound(res.Error) {
return nil, bchain.ErrBlockNotFound
}
return nil, errors.Annotatef(res.Error, "hash %v", hash)
}
return hex.DecodeString(res.Result)
}
// GetBlockList returns block with given hash by downloading block
// transactions one by one.
func (b *BCashRPC) GetBlockList(hash string) (*bchain.Block, error) {
glog.V(1).Info("rpc: getblock (verbose=1) ", hash)
res := resGetBlockThin{}
req := cmdGetBlock{Method: "getblock"}
req.Params.BlockHash = hash
req.Params.Verbose = true
err := b.Call(&req, &res)
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
}
if res.Error != nil {
if isErrBlockNotFound(res.Error) {
return nil, bchain.ErrBlockNotFound
}
return nil, errors.Annotatef(res.Error, "hash %v", hash)
}
txs := make([]bchain.Tx, len(res.Result.Txids))
for i, txid := range res.Result.Txids {
tx, err := b.GetTransaction(txid)
if err != nil {
return nil, err
}
txs[i] = *tx
}
block := &bchain.Block{
BlockHeader: res.Result.BlockHeader,
Txs: txs,
}
return block, nil
}
// GetBlockFull returns block with given hash.
func (b *BCashRPC) GetBlockFull(hash string) (*bchain.Block, error) {
return nil, errors.New("Not implemented")
}
// EstimateSmartFee returns fee estimation.
func (b *BCashRPC) EstimateSmartFee(blocks int, conservative bool) (float64, error) {
glog.V(1).Info("rpc: estimatesmartfee ", blocks)
res := resEstimateSmartFee{}
req := cmdEstimateSmartFee{Method: "estimatesmartfee"}
req.Params.Blocks = blocks
// conservative param is omitted
err := b.Call(&req, &res)
if err != nil {
return 0, err
}
if res.Error != nil {
return 0, res.Error
}
return res.Result.Feerate, nil
}
func isErrBlockNotFound(err *bchain.RPCError) bool {
return err.Message == "Block not found" ||
err.Message == "Block height out of range"
}

View File

@@ -2,6 +2,7 @@ package coins
import (
"blockbook/bchain"
"blockbook/bchain/coins/bch"
"blockbook/bchain/coins/btc"
"blockbook/bchain/coins/eth"
"blockbook/bchain/coins/zec"
@@ -25,6 +26,8 @@ func init() {
blockChainFactories["zec"] = zec.NewZCashRPC
blockChainFactories["eth"] = eth.NewEthereumRPC
blockChainFactories["eth-testnet"] = eth.NewEthereumRPC
blockChainFactories["bch"] = bch.NewBCashRPC
blockChainFactories["bch-testnet"] = bch.NewBCashRPC
}
// NewBlockChain creates bchain.BlockChain of type defined by parameter coin
@@ -82,6 +85,10 @@ func (c *blockChainWithMetrics) GetNetworkName() string {
return c.b.GetNetworkName()
}
func (c *blockChainWithMetrics) GetSubversion() string {
return c.b.GetSubversion()
}
func (c *blockChainWithMetrics) GetBestBlockHash() (v string, err error) {
defer func(s time.Time) { c.observeRPCLatency("GetBestBlockHash", s, err) }(time.Now())
return c.b.GetBestBlockHash()
@@ -142,10 +149,6 @@ func (c *blockChainWithMetrics) GetMempoolTransactions(address string) (v []stri
return c.b.GetMempoolTransactions(address)
}
func (c *blockChainWithMetrics) GetMempoolSpentOutput(outputTxid string, vout uint32) (v string) {
return c.b.GetMempoolSpentOutput(outputTxid, vout)
}
func (c *blockChainWithMetrics) GetMempoolEntry(txid string) (v *bchain.MempoolEntry, err error) {
defer func(s time.Time) { c.observeRPCLatency("GetMempoolEntry", s, err) }(time.Now())
return c.b.GetMempoolEntry(txid)

View File

@@ -169,5 +169,12 @@ func (p *BitcoinParser) UnpackTx(buf []byte) (*bchain.Tx, uint32, error) {
return nil, 0, err
}
tx.Blocktime = bt
for i, vout := range tx.Vout {
if len(vout.ScriptPubKey.Addresses) == 1 {
tx.Vout[i].Address = bchain.NewBaseAddress(vout.ScriptPubKey.Addresses[0])
}
}
return tx, height, nil
}

View File

@@ -135,6 +135,7 @@ var testTx1 = bchain.Tx{
"3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK",
},
},
Address: bchain.NewBaseAddress("3AZKvpKhSh1o8t1QrX3UeXG9d2BhCRnbcK"),
},
},
}
@@ -165,6 +166,7 @@ var testTx2 = bchain.Tx{
"2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu",
},
},
Address: bchain.NewBaseAddress("2NByHN6A8QYkBATzxf4pRGbCSHD5CEN2TRu"),
},
{
Value: 9.20081157,
@@ -175,6 +177,7 @@ var testTx2 = bchain.Tx{
"2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D",
},
},
Address: bchain.NewBaseAddress("2MvZguYaGjM7JihBgNqgLF2Ca2Enb76Hj9D"),
},
},
}

View File

@@ -29,6 +29,7 @@ type BitcoinRPC struct {
Mempool *bchain.UTXOMempool
ParseBlocks bool
mq *bchain.MQ
Subversion string
}
type configuration struct {
@@ -38,6 +39,7 @@ type configuration struct {
RPCTimeout int `json:"rpcTimeout"`
Parse bool `json:"parse"`
ZeroMQBinding string `json:"zeroMQBinding"`
Subversion string `json:"subversion"`
}
// NewBitcoinRPC returns new BitcoinRPC instance.
@@ -60,6 +62,7 @@ func NewBitcoinRPC(config json.RawMessage, pushHandler func(bchain.NotificationT
user: c.RPCUser,
password: c.RPCPass,
ParseBlocks: c.Parse,
Subversion: c.Subversion,
}
mq, err := bchain.NewMQ(c.ZeroMQBinding, pushHandler)
@@ -119,6 +122,10 @@ func (b *BitcoinRPC) GetNetworkName() string {
return b.Network
}
func (b *BitcoinRPC) GetSubversion() string {
return b.Subversion
}
// getblockhash
type cmdGetBlockHash struct {
@@ -568,11 +575,6 @@ func (b *BitcoinRPC) GetMempoolTransactions(address string) ([]string, error) {
return b.Mempool.GetTransactions(address)
}
// GetMempoolSpentOutput returns transaction in mempool which spends given outpoint
func (b *BitcoinRPC) GetMempoolSpentOutput(outputTxid string, vout uint32) string {
return b.Mempool.GetSpentOutput(outputTxid, vout)
}
// EstimateSmartFee returns fee estimation.
func (b *BitcoinRPC) EstimateSmartFee(blocks int, conservative bool) (float64, error) {
glog.V(1).Info("rpc: estimatesmartfee ", blocks)

View File

@@ -272,3 +272,9 @@ func (p *EthereumParser) UnpackBlockHash(buf []byte) (string, error) {
func (p *EthereumParser) IsUTXOChain() bool {
return false
}
// KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column
// do not use the blockaddresses for eth
func (p *EthereumParser) KeepBlockAddresses() int {
return 0
}

View File

@@ -240,6 +240,10 @@ func (b *EthereumRPC) GetNetworkName() string {
return b.Network
}
func (b *EthereumRPC) GetSubversion() string {
return ""
}
func (b *EthereumRPC) getBestHeader() (*ethtypes.Header, error) {
b.bestHeaderMu.Lock()
defer b.bestHeaderMu.Unlock()
@@ -482,10 +486,6 @@ func (b *EthereumRPC) GetMempoolTransactions(address string) ([]string, error) {
return b.Mempool.GetTransactions(address)
}
func (b *EthereumRPC) GetMempoolSpentOutput(outputTxid string, vout uint32) string {
return ""
}
func (b *EthereumRPC) GetMempoolEntry(txid string) (*bchain.MempoolEntry, error) {
return nil, errors.New("GetMempoolEntry: not implemented")
}

View File

@@ -33,6 +33,7 @@ var testTx1 = bchain.Tx{
"t1Y4yL14ACHaAbjemkdpW7nYNHWnv1yQbDA",
},
},
Address: bchain.NewBaseAddress("t1Y4yL14ACHaAbjemkdpW7nYNHWnv1yQbDA"),
},
},
}
@@ -65,6 +66,7 @@ var testTx2 = bchain.Tx{
"t1VmHTTwpEtwvojxodN2CSQqLYi1hzY3cAq",
},
},
Address: bchain.NewBaseAddress("t1VmHTTwpEtwvojxodN2CSQqLYi1hzY3cAq"),
},
{
Value: .1,
@@ -75,6 +77,7 @@ var testTx2 = bchain.Tx{
"t1ecxMXpphUTRQXGLXnVhJ6ucqD3DZipddg",
},
},
Address: bchain.NewBaseAddress("t1ecxMXpphUTRQXGLXnVhJ6ucqD3DZipddg"),
},
},
}

View File

@@ -22,13 +22,13 @@ func NewNonUTXOMempool(chain BlockChain) *NonUTXOMempool {
// GetTransactions returns slice of mempool transactions for given address
func (m *NonUTXOMempool) GetTransactions(address string) ([]string, error) {
m.mux.Lock()
defer m.mux.Unlock()
parser := m.chain.GetChainParser()
addrID, err := parser.GetAddrIDFromAddress(address)
if err != nil {
return nil, err
}
m.mux.Lock()
defer m.mux.Unlock()
outpoints := m.addrIDToTx[string(addrID)]
txs := make([]string, 0, len(outpoints))
for _, o := range outpoints {
@@ -55,8 +55,9 @@ func (m *NonUTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) erro
return err
}
parser := m.chain.GetChainParser()
newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+1)
newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+1)
// allocate slightly larger capacity of the maps
newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+5)
newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+5)
for _, txid := range txs {
io, exists := m.txToInputOutput[txid]
if !exists {

View File

@@ -7,7 +7,7 @@ import (
"github.com/golang/glog"
)
// addrIndex and outpoint are used also in nonutxo mempool
// addrIndex and outpoint are used also in non utxo mempool
type addrIndex struct {
addrID string
n int32
@@ -18,58 +18,41 @@ type outpoint struct {
vout int32
}
type inputOutput struct {
outputs []addrIndex
inputs []outpoint
}
// UTXOMempool is mempool handle.
type UTXOMempool struct {
chain BlockChain
mux sync.Mutex
txToInputOutput map[string]inputOutput
txToInputOutput map[string][]addrIndex
addrIDToTx map[string][]outpoint
inputs map[outpoint]string
}
// NewMempool creates new mempool handler.
// NewUTXOMempool creates new mempool handler.
func NewUTXOMempool(chain BlockChain) *UTXOMempool {
return &UTXOMempool{chain: chain}
}
// GetTransactions returns slice of mempool transactions for given address
func (m *UTXOMempool) GetTransactions(address string) ([]string, error) {
m.mux.Lock()
defer m.mux.Unlock()
parser := m.chain.GetChainParser()
addrID, err := parser.GetAddrIDFromAddress(address)
if err != nil {
return nil, err
}
m.mux.Lock()
defer m.mux.Unlock()
outpoints := m.addrIDToTx[string(addrID)]
txs := make([]string, 0, len(outpoints)+len(outpoints)/2)
txs := make([]string, 0, len(outpoints))
for _, o := range outpoints {
txs = append(txs, o.txid)
i := m.inputs[o]
if i != "" {
txs = append(txs, i)
}
}
return txs, nil
}
// GetSpentOutput returns transaction which spends given outpoint
func (m *UTXOMempool) GetSpentOutput(outputTxid string, vout uint32) string {
o := outpoint{txid: outputTxid, vout: int32(vout)}
return m.inputs[o]
}
func (m *UTXOMempool) updateMappings(newTxToInputOutput map[string]inputOutput, newAddrIDToTx map[string][]outpoint, newInputs map[outpoint]string) {
func (m *UTXOMempool) updateMappings(newTxToInputOutput map[string][]addrIndex, newAddrIDToTx map[string][]outpoint) {
m.mux.Lock()
defer m.mux.Unlock()
m.txToInputOutput = newTxToInputOutput
m.addrIDToTx = newAddrIDToTx
m.inputs = newInputs
}
// Resync gets mempool transactions and maps outputs to transactions.
@@ -83,9 +66,9 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error {
return err
}
parser := m.chain.GetChainParser()
newTxToInputOutput := make(map[string]inputOutput, len(m.txToInputOutput)+1)
newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+1)
newInputs := make(map[outpoint]string, len(m.inputs)+1)
// allocate slightly larger capacity of the maps
newTxToInputOutput := make(map[string][]addrIndex, len(m.txToInputOutput)+5)
newAddrIDToTx := make(map[string][]outpoint, len(m.addrIDToTx)+5)
for _, txid := range txs {
io, exists := m.txToInputOutput[txid]
if !exists {
@@ -94,7 +77,7 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error {
glog.Error("cannot get transaction ", txid, ": ", err)
continue
}
io.outputs = make([]addrIndex, 0, len(tx.Vout))
io = make([]addrIndex, 0, len(tx.Vout)+len(tx.Vin))
for _, output := range tx.Vout {
addrID, err := parser.GetAddrIDFromVout(&output)
if err != nil {
@@ -102,29 +85,40 @@ func (m *UTXOMempool) Resync(onNewTxAddr func(txid string, addr string)) error {
continue
}
if len(addrID) > 0 {
io.outputs = append(io.outputs, addrIndex{string(addrID), int32(output.N)})
io = append(io, addrIndex{string(addrID), int32(output.N)})
}
if onNewTxAddr != nil && len(output.ScriptPubKey.Addresses) == 1 {
onNewTxAddr(tx.Txid, output.ScriptPubKey.Addresses[0])
}
}
io.inputs = make([]outpoint, 0, len(tx.Vin))
for _, input := range tx.Vin {
if input.Coinbase != "" {
continue
}
io.inputs = append(io.inputs, outpoint{input.Txid, int32(input.Vout)})
// TODO - possibly get from DB unspenttxs - however some output txs can be in mempool only
itx, err := m.chain.GetTransaction(input.Txid)
if err != nil {
glog.Error("cannot get transaction ", input.Txid, ": ", err)
continue
}
if int(input.Vout) >= len(itx.Vout) {
glog.Error("Vout len in transaction ", input.Txid, " ", len(itx.Vout), " input.Vout=", input.Vout)
continue
}
addrID, err := parser.GetAddrIDFromVout(&itx.Vout[input.Vout])
if err != nil {
glog.Error("error in addrID in ", input.Txid, " ", input.Vout, ": ", err)
continue
}
io = append(io, addrIndex{string(addrID), int32(^input.Vout)})
}
}
newTxToInputOutput[txid] = io
for _, si := range io.outputs {
for _, si := range io {
newAddrIDToTx[si.addrID] = append(newAddrIDToTx[si.addrID], outpoint{txid, si.n})
}
for _, i := range io.inputs {
newInputs[i] = txid
}
}
m.updateMappings(newTxToInputOutput, newAddrIDToTx, newInputs)
m.updateMappings(newTxToInputOutput, newAddrIDToTx)
glog.Info("Mempool: resync finished in ", time.Since(start), ", ", len(m.txToInputOutput), " transactions in mempool")
return nil
}

View File

@@ -62,13 +62,19 @@ func NewMQ(binding string, callback func(NotificationType)) (*MQ, error) {
}
func (mq *MQ) run(callback func(NotificationType)) {
defer func() {
if r := recover(); r != nil {
glog.Error("MQ loop recovered from ", r)
}
mq.isRunning = false
close(mq.finished)
glog.Info("MQ loop terminated")
}()
mq.isRunning = true
for {
msg, err := mq.socket.RecvMessageBytes(0)
if err != nil {
if zmq.AsErrno(err) == zmq.Errno(zmq.ETERM) || err.Error() == "Socket is closed" {
close(mq.finished)
glog.Info("MQ loop terminated")
break
}
glog.Error("MQ RecvMessageBytes error ", err, ", ", zmq.AsErrno(err))
@@ -96,7 +102,6 @@ func (mq *MQ) run(callback func(NotificationType)) {
callback(nt)
}
}
mq.isRunning = false
}
// Shutdown stops listening to the ZeroMQ and closes the connection

View File

@@ -14,6 +14,9 @@ var (
// ErrAddressMissing is returned if address is not specified
// for example To address in ethereum can be missing in case of contract transaction
ErrAddressMissing = errors.New("Address missing")
// ErrTxidMissing is returned if txid is not specified
// for example coinbase transactions in Bitcoin
ErrTxidMissing = errors.New("Txid missing")
)
type ScriptSig struct {
@@ -37,10 +40,25 @@ type ScriptPubKey struct {
Addresses []string `json:"addresses,omitempty"`
}
type AddressFormat = uint8
const (
DefaultAddress AddressFormat = iota
BCashAddress
)
type Address interface {
String() string
EncodeAddress(format AddressFormat) (string, error)
AreEqual(addr string) (bool, error)
InSlice(addrs []string) (bool, error)
}
type Vout struct {
Value float64 `json:"value"`
N uint32 `json:"n"`
ScriptPubKey ScriptPubKey `json:"scriptPubKey"`
Address Address
}
// Tx is blockchain transaction
@@ -108,6 +126,7 @@ type BlockChain interface {
// chain info
IsTestnet() bool
GetNetworkName() string
GetSubversion() string
// requests
GetBestBlockHash() (string, error)
GetBestBlockHeight() (uint32, error)
@@ -122,7 +141,6 @@ type BlockChain interface {
// mempool
ResyncMempool(onNewTxAddr func(txid string, addr string)) error
GetMempoolTransactions(address string) ([]string, error)
GetMempoolSpentOutput(outputTxid string, vout uint32) string
GetMempoolEntry(txid string) (*MempoolEntry, error)
// parser
GetChainParser() BlockChainParser
@@ -134,6 +152,10 @@ type BlockChainParser interface {
// UTXO chains need "inputs" column in db, that map transactions to transactions that spend them
// non UTXO chains have mapping of address to input and output transactions directly in "outputs" column in db
IsUTXOChain() bool
// KeepBlockAddresses returns number of blocks which are to be kept in blockaddresses column
// and used in case of fork
// if 0 the blockaddresses column is not used at all (usually non UTXO chains)
KeepBlockAddresses() int
// address id conversions
GetAddrIDFromVout(output *Vout) ([]byte, error)
GetAddrIDFromAddress(address string) ([]byte, error)

View File

@@ -62,7 +62,7 @@ var (
explorerURL = flag.String("explorer", "", "address of blockchain explorer")
coin = flag.String("coin", "btc", "coin name (default btc)")
coin = flag.String("coin", "btc", "coin name")
)
var (
@@ -235,8 +235,8 @@ func main() {
return
}
} else if !*synchronize {
if err = syncWorker.ConnectBlocksParallelInChunks(height, until); err != nil {
glog.Error("connectBlocksParallelInChunks ", err)
if err = syncWorker.ConnectBlocksParallel(height, until); err != nil {
glog.Error("connectBlocksParallel ", err)
return
}
}

View File

@@ -9,9 +9,15 @@ build-debug: prepare-sources
chown $(PACKAGER) /out/blockbook
test: prepare-sources
cd $(GOPATH)/src/blockbook && go test -v ./...
#cd $(GOPATH)/src/blockbook && go test -short ./... # FIXME
cd $(GOPATH)/src/blockbook && go test -short ./bchain/coins/btc ./bchain/coins/bch ./bchain/coins/eth ./bchain/coins/zec
test-all: prepare-sources
# cd $(GOPATH)/src/blockbook && go test ./... # FIXME
cd $(GOPATH)/src/blockbook && go test ./bchain/coins/btc ./bchain/coins/bch ./bchain/coins/eth ./bchain/coins/zec
prepare-sources:
@ [ -n "`ls /src 2> /dev/null`" ] || (echo "/src doesn't exist or is empty" 1>&2 && exit 1)
cp -r /src $(GOPATH)/src/blockbook
rm -rf $(GOPATH)/src/blockbook/vendor
cd $(GOPATH)/src/blockbook && dep ensure -vendor-only

View File

@@ -3,7 +3,8 @@ set -e
cp -r /src/build/deb/debian .
cp -r /src/configs .
mkdir server && cp -r /src/server/testcert.* /src/server/static server
cp -r /src/static static
mkdir cert && cp /src/server/testcert.* cert
dpkg-buildpackage -us -uc
mv ../*.deb /out

View File

@@ -0,0 +1 @@
/opt/blockbook/bch-testnet/config/blockchaincfg.json

View File

@@ -0,0 +1,2 @@
#!/bin/sh
find /opt/blockbook/bch-testnet/logs -mtime +6 -type f -delete

View File

@@ -0,0 +1,2 @@
/data/bch-testnet/blockbook
/opt/blockbook/bch-testnet/logs

View File

@@ -0,0 +1,5 @@
#!/usr/bin/dh-exec
blockbook /opt/blockbook/bch-testnet/bin
cert /opt/blockbook/bch-testnet
static /opt/blockbook/bch-testnet
configs/bch-testnet.json => /opt/blockbook/bch-testnet/config/blockchaincfg.json

View File

@@ -0,0 +1,2 @@
/opt/blockbook/bch-testnet/cert/testcert.crt /opt/blockbook/bch-testnet/cert/blockbook.crt
/opt/blockbook/bch-testnet/cert/testcert.key /opt/blockbook/bch-testnet/cert/blockbook.key

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
case "$1" in
configure)
if ! id -u blockbook &> /dev/null
then
useradd --system -M -U blockbook
fi
for dir in /data/bch-testnet/blockbook /opt/blockbook/bch-testnet/logs
do
if [ "$(stat -c '%U' $dir)" != "blockbook" ]
then
chown -R blockbook:blockbook $dir
fi
done
;;
esac
#DEBHELPER#

View File

@@ -0,0 +1,39 @@
# It is not recommended to modify this file in-place, because it will
# be overwritten during package upgrades. If you want to add further
# options or overwrite existing ones then use
# $ systemctl edit blockbook-bch-testnet.service
# See "man systemd.service" for details.
[Unit]
Description=Blockbook daemon (BCH testnet)
After=network.target
[Service]
ExecStart=/opt/blockbook/bch-testnet/bin/blockbook -coin=bch-testnet -blockchaincfg=/opt/blockbook/bch-testnet/config/blockchaincfg.json -datadir=/data/bch-testnet/blockbook/db -sync -httpserver=:18435 -socketio=:18436 -certfile=/opt/blockbook/bch-testnet/cert/blockbook -explorer=https://bch-bitcore1.trezor.io/ -log_dir=/opt/blockbook/bch-testnet/logs
User=blockbook
Type=simple
Restart=on-failure
WorkingDirectory=/opt/blockbook/bch-testnet
# Hardening measures
####################
# Provide a private /tmp and /var/tmp.
PrivateTmp=true
# Mount /usr, /boot/ and /etc read-only for the process.
ProtectSystem=full
# Disallow the process and all of its children to gain
# new privileges through execve().
NoNewPrivileges=true
# Use a new /dev namespace only populated with API pseudo devices
# such as /dev/null, /dev/zero and /dev/random.
PrivateDevices=true
# Deny the creation of writable and executable memory mappings.
MemoryDenyWriteExecute=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1 @@
/opt/blockbook/bch/config/blockchaincfg.json

View File

@@ -0,0 +1,2 @@
#!/bin/sh
find /opt/blockbook/bch/logs -mtime +6 -type f -delete

View File

@@ -0,0 +1,2 @@
/data/bch/blockbook
/opt/blockbook/bch/logs

View File

@@ -0,0 +1,5 @@
#!/usr/bin/dh-exec
blockbook /opt/blockbook/bch/bin
cert /opt/blockbook/bch
static /opt/blockbook/bch
configs/bch.json => /opt/blockbook/bch/config/blockchaincfg.json

View File

@@ -0,0 +1,2 @@
/opt/blockbook/bch/cert/testcert.crt /opt/blockbook/bch/cert/blockbook.crt
/opt/blockbook/bch/cert/testcert.key /opt/blockbook/bch/cert/blockbook.key

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
case "$1" in
configure)
if ! id -u blockbook &> /dev/null
then
useradd --system -M -U blockbook
fi
for dir in /data/bch/blockbook /opt/blockbook/bch/logs
do
if [ "$(stat -c '%U' $dir)" != "blockbook" ]
then
chown -R blockbook:blockbook $dir
fi
done
;;
esac
#DEBHELPER#

View File

@@ -0,0 +1,39 @@
# It is not recommended to modify this file in-place, because it will
# be overwritten during package upgrades. If you want to add further
# options or overwrite existing ones then use
# $ systemctl edit blockbook-bch.service
# See "man systemd.service" for details.
[Unit]
Description=Blockbook daemon (BCH mainnet)
After=network.target
[Service]
ExecStart=/opt/blockbook/bch/bin/blockbook -coin=bch -blockchaincfg=/opt/blockbook/bch/config/blockchaincfg.json -datadir=/data/bch/blockbook/db -sync -httpserver=:8435 -socketio=:8436 -certfile=/opt/blockbook/bch/cert/blockbook -explorer=https://bitcore1.trezor.io/ -log_dir=/opt/blockbook/bch/logs
User=blockbook
Type=simple
Restart=on-failure
WorkingDirectory=/opt/blockbook/bch
# Hardening measures
####################
# Provide a private /tmp and /var/tmp.
PrivateTmp=true
# Mount /usr, /boot/ and /etc read-only for the process.
ProtectSystem=full
# Disallow the process and all of its children to gain
# new privileges through execve().
NoNewPrivileges=true
# Use a new /dev namespace only populated with API pseudo devices
# such as /dev/null, /dev/zero and /dev/random.
PrivateDevices=true
# Deny the creation of writable and executable memory mappings.
MemoryDenyWriteExecute=true
[Install]
WantedBy=multi-user.target

View File

@@ -1,5 +1,5 @@
#!/usr/bin/dh-exec
blockbook /opt/blockbook/btc-testnet/bin
server/testcert.* /opt/blockbook/btc-testnet/cert
server/static /opt/blockbook/btc-testnet
cert /opt/blockbook/btc-testnet
static /opt/blockbook/btc-testnet
configs/btc-testnet.json => /opt/blockbook/btc-testnet/config/blockchaincfg.json

View File

@@ -1,5 +1,5 @@
#!/usr/bin/dh-exec
blockbook /opt/blockbook/btc/bin
server/testcert.* /opt/blockbook/btc/cert
server/static /opt/blockbook/btc
cert /opt/blockbook/btc
static /opt/blockbook/btc
configs/btc.json => /opt/blockbook/btc/config/blockchaincfg.json

View File

@@ -1,5 +1,5 @@
#!/usr/bin/dh-exec --with=install
blockbook /opt/blockbook/zec/bin
server/testcert.* /opt/blockbook/zec/cert
server/static /opt/blockbook/zec
cert /opt/blockbook/zec
static /opt/blockbook/zec
configs/zec.json => /opt/blockbook/zec/config/blockchaincfg.json

View File

@@ -8,14 +8,24 @@ Standards-Version: 3.9.5
Package: blockbook-btc
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils
Description: Satoshilabs blockbook server
Description: Satoshilabs blockbook server (Bitcoin mainnet)
Package: blockbook-btc-testnet
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils
Description: Satoshilabs blockbook server
Description: Satoshilabs blockbook server (Bitcoin testnet)
Package: blockbook-zec
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils
Description: Satoshilabs blockbook server
Description: Satoshilabs blockbook server (ZCash mainnet)
Package: blockbook-bch
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils
Description: Satoshilabs blockbook server (Bitcoin Cash mainnet)
Package: blockbook-bch-testnet
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, coreutils, passwd, findutils
Description: Satoshilabs blockbook server (Bitcoin Cash testnet)

9
configs/bch-testnet.json Normal file
View File

@@ -0,0 +1,9 @@
{
"rpcURL": "http://localhost:18432",
"rpcUser": "rpc",
"rpcPass": "rpc",
"rpcTimeout": 25,
"parse": true,
"zeroMQBinding": "tcp://127.0.0.1:18434",
"subversion": "/Bitcoin ABC:0.17.0(EB8.0; bitcore-sl)/"
}

9
configs/bch.json Normal file
View File

@@ -0,0 +1,9 @@
{
"rpcURL": "http://127.0.0.1:8432",
"rpcUser": "rpc",
"rpcPass": "rpc",
"rpcTimeout": 25,
"parse": true,
"zeroMQBinding": "tcp://127.0.0.1:8434",
"subversion": "/Bitcoin ABC:0.17.0(EB8.0; bitcore-sl)/"
}

View File

@@ -1,4 +1,4 @@
TARGETS = bitcoin zcash
TARGETS = bitcoin zcash bcash
IMAGE = blockbook-backend-build-deb
NO_CACHE = false

View File

@@ -0,0 +1,14 @@
BITCOINABC_VERSION := 0.17.0
all:
wget https://download.bitcoinabc.org/0.17.0/linux/bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz
tar -xf bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz
mv bitcoin-abc-${BITCOINABC_VERSION} bitcoin-abc
rm bitcoin-abc/bin/bitcoin-qt
rm bitcoin-abc/bin/bitcoin-tx
rm bitcoin-abc/bin/bitcoin-seeder
rm bitcoin-abc/bin/test_bitcoin
clean:
rm -rf bitcoin-abc
rm -f bitcoin-abc-${BITCOINABC_VERSION}-x86_64-linux-gnu.tar.gz*

View File

@@ -0,0 +1,13 @@
daemon=1
server=1
testnet=1
nolisten=1
rpcuser=rpc
rpcpassword=rpc
rpcport=18432
txindex=1
rpcworkqueue=32
zmqpubhashtx=tcp://127.0.0.1:18434
zmqpubhashblock=tcp://127.0.0.1:18434
zmqpubrawblock=tcp://127.0.0.1:18434
zmqpubrawtx=tcp://127.0.0.1:18434

View File

@@ -0,0 +1,12 @@
daemon=1
server=1
nolisten=1
rpcuser=rpc
rpcpassword=rpc
rpcport=8432
txindex=1
rpcworkqueue=32
zmqpubhashtx=tcp://127.0.0.1:8434
zmqpubhashblock=tcp://127.0.0.1:8434
zmqpubrawblock=tcp://127.0.0.1:8434
zmqpubrawtx=tcp://127.0.0.1:8434

View File

@@ -0,0 +1 @@
/opt/bitcoin/bch/bch.conf

View File

@@ -0,0 +1 @@
/data/bch/bitcoin

View File

@@ -0,0 +1,2 @@
bitcoin-abc/* /opt/bitcoin/bch
bch.conf /opt/bitcoin/bch

View File

@@ -0,0 +1,10 @@
/data/bch/bitcoin/debug.log
/data/bch/bitcoin/db.log
{
rotate 7
daily
compress
missingok
notifempty
copytruncate
}

View File

@@ -0,0 +1,20 @@
#!/bin/bash
set -e
case "$1" in
configure)
if ! id -u bitcoin &> /dev/null
then
useradd --system -M -U bitcoin
fi
if [ "$(stat -c '%U' /data/bch/bitcoin)" != "bitcoin" ]
then
chown bitcoin:bitcoin /data/bch/bitcoin
fi
;;
esac
#DEBHELPER#

View File

@@ -0,0 +1,44 @@
# It is not recommended to modify this file in-place, because it will
# be overwritten during package upgrades. If you want to add further
# options or overwrite existing ones then use
# $ systemctl edit bcash-bch.service
# See "man systemd.service" for details.
# Note that almost all daemon options could be specified in
# /opt/bitcoin/bch/bch.conf
[Unit]
Description=Bitcoin Cash daemon (mainnet)
After=network.target
[Service]
ExecStart=/opt/bitcoin/bch/bin/bitcoind -datadir=/data/bch/bitcoin -conf=/opt/bitcoin/bch/bch.conf -pid=/run/bitcoind/bch.pid
# Creates /run/bitcoind owned by bitcoin
RuntimeDirectory=bitcoind
User=bitcoin
Type=forking
PIDFile=/run/bitcoind/bch.pid
Restart=on-failure
# Hardening measures
####################
# Provide a private /tmp and /var/tmp.
PrivateTmp=true
# Mount /usr, /boot/ and /etc read-only for the process.
ProtectSystem=full
# Disallow the process and all of its children to gain
# new privileges through execve().
NoNewPrivileges=true
# Use a new /dev namespace only populated with API pseudo devices
# such as /dev/null, /dev/zero and /dev/random.
PrivateDevices=true
# Deny the creation of writable and executable memory mappings.
MemoryDenyWriteExecute=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1 @@
/opt/bitcoin/bch-testnet/bch-testnet.conf

View File

@@ -0,0 +1 @@
/data/bch-testnet/bitcoin

View File

@@ -0,0 +1,2 @@
bitcoin-abc/* /opt/bitcoin/bch-testnet
bch-testnet.conf /opt/bitcoin/bch-testnet

View File

@@ -0,0 +1,10 @@
/data/bch-testnet/bitcoin/testnet3/debug.log
/data/bch-testnet/bitcoin/testnet3/db.log
{
rotate 7
daily
compress
missingok
notifempty
copytruncate
}

View File

@@ -0,0 +1,20 @@
#!/bin/bash
set -e
case "$1" in
configure)
if ! id -u bitcoin &> /dev/null
then
useradd --system -M -U bitcoin
fi
if [ "$(stat -c '%U' /data/bch-testnet/bitcoin)" != "bitcoin" ]
then
chown bitcoin:bitcoin /data/bch-testnet/bitcoin
fi
;;
esac
#DEBHELPER#

View File

@@ -0,0 +1,44 @@
# It is not recommended to modify this file in-place, because it will
# be overwritten during package upgrades. If you want to add further
# options or overwrite existing ones then use
# $ systemctl edit bcash-testnet.service
# See "man systemd.service" for details.
# Note that almost all daemon options could be specified in
# /opt/bitcoin/bch-testnet/bch-testnet.conf
[Unit]
Description=Bitcoin Cash daemon (testnet)
After=network.target
[Service]
ExecStart=/opt/bitcoin/bch-testnet/bin/bitcoind -datadir=/data/bch-testnet/bitcoin -conf=/opt/bitcoin/bch-testnet/bch-testnet.conf -pid=/run/bitcoind/bch-testnet.pid
# Creates /run/bitcoind owned by bitcoin
RuntimeDirectory=bitcoind
User=bitcoin
Type=forking
PIDFile=/run/bitcoind/bch-testnet.pid
Restart=on-failure
# Hardening measures
####################
# Provide a private /tmp and /var/tmp.
PrivateTmp=true
# Mount /usr, /boot/ and /etc read-only for the process.
ProtectSystem=full
# Disallow the process and all of its children to gain
# new privileges through execve().
NoNewPrivileges=true
# Use a new /dev namespace only populated with API pseudo devices
# such as /dev/null, /dev/zero and /dev/random.
PrivateDevices=true
# Deny the creation of writable and executable memory mappings.
MemoryDenyWriteExecute=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,5 @@
bcash (0.17.0-satoshilabs1) unstable; urgency=medium
* Initial build
-- Jakub Matys <jakub.matys@satoshilabs.com> Fri, 13 Apr 2018 11:31:01 +0200

View File

@@ -0,0 +1 @@
9

View File

@@ -0,0 +1,16 @@
Source: bcash
Section: satoshilabs
Priority: optional
Maintainer: jakub.matys@satoshilabs.com
Build-Depends: debhelper, wget, tar, gzip, make, dh-systemd, dh-exec
Standards-Version: 3.9.5
Package: bcash-bch
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, logrotate
Description: Satoshilabs packaged bitcoin-cash server
Package: bcash-testnet
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, logrotate
Description: Satoshilabs packaged bitcoin-cash server

View File

@@ -0,0 +1,6 @@
#!/usr/bin/make -f
DH_VERBOSE = 1
%:
dh $@ --with=systemd

View File

@@ -5,14 +5,12 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
"math"
"os"
"path/filepath"
"github.com/juju/errors"
"github.com/bsm/go-vlq"
"github.com/golang/glog"
"github.com/juju/errors"
"github.com/tecbot/gorocksdb"
)
@@ -20,7 +18,9 @@ import (
// iterator creates snapshot, which takes lots of resources
// when doing huge scan, it is better to close it and reopen from time to time to free the resources
const disconnectBlocksRefreshIterator = uint64(1000000)
const packedHeightBytes = 4
// RepairRocksDB calls RocksDb db repair function
func RepairRocksDB(name string) error {
glog.Infof("rocksdb: repair")
opts := gorocksdb.NewDefaultOptions()
@@ -40,12 +40,13 @@ type RocksDB struct {
const (
cfDefault = iota
cfHeight
cfOutputs
cfInputs
cfAddresses
cfUnspentTxs
cfTransactions
cfBlockAddresses
)
var cfNames = []string{"default", "height", "outputs", "inputs", "transactions"}
var cfNames = []string{"default", "height", "addresses", "unspenttxs", "transactions", "blockaddresses"}
func openDB(path string) (*gorocksdb.DB, []*gorocksdb.ColumnFamilyHandle, error) {
c := gorocksdb.NewLRUCache(8 << 30) // 8GB
@@ -83,7 +84,7 @@ func openDB(path string) (*gorocksdb.DB, []*gorocksdb.ColumnFamilyHandle, error)
optsOutputs.SetMaxOpenFiles(25000)
optsOutputs.SetCompression(gorocksdb.NoCompression)
fcOptions := []*gorocksdb.Options{opts, opts, optsOutputs, opts, opts}
fcOptions := []*gorocksdb.Options{opts, opts, optsOutputs, opts, opts, opts}
db, cfh, err := gorocksdb.OpenDbColumnFamilies(opts, path, cfNames, fcOptions)
if err != nil {
@@ -147,27 +148,19 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f
return err
}
kstart, err := packOutputKey(addrID, lower)
if err != nil {
return err
}
kstop, err := packOutputKey(addrID, higher)
if err != nil {
return err
}
kstart := packAddressKey(addrID, lower)
kstop := packAddressKey(addrID, higher)
it := d.db.NewIteratorCF(d.ro, d.cfh[cfOutputs])
it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses])
defer it.Close()
isUTXO := d.chainParser.IsUTXOChain()
for it.Seek(kstart); it.Valid(); it.Next() {
key := it.Key().Data()
val := it.Value().Data()
if bytes.Compare(key, kstop) > 0 {
break
}
outpoints, err := d.unpackOutputValue(val)
outpoints, err := d.unpackOutpoints(val)
if err != nil {
return err
}
@@ -184,22 +177,12 @@ func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, f
vout = uint32(o.vout)
isOutput = true
}
if err := fn(o.txid, vout, isOutput); err != nil {
tx, err := d.chainParser.UnpackTxid(o.btxID)
if err != nil {
return err
}
if isUTXO {
stxid, so, err := d.GetSpentOutput(o.txid, o.vout)
if err != nil {
return err
}
if stxid != "" {
if glog.V(2) {
glog.Infof("rocksdb: input %s/%d: %s/%d", o.txid, o.vout, stxid, so)
}
if err := fn(stxid, uint32(so), false); err != nil {
return err
}
}
if err := fn(tx, vout, isOutput); err != nil {
return err
}
}
}
@@ -211,10 +194,12 @@ const (
opDelete = 1
)
// ConnectBlock indexes addresses in the block and stores them in db
func (d *RocksDB) ConnectBlock(block *bchain.Block) error {
return d.writeBlock(block, opInsert)
}
// DisconnectBlock removes addresses in the block from the db
func (d *RocksDB) DisconnectBlock(block *bchain.Block) error {
return d.writeBlock(block, opDelete)
}
@@ -237,11 +222,12 @@ func (d *RocksDB) writeBlock(block *bchain.Block, op int) error {
if err := d.writeHeight(wb, block, op); err != nil {
return err
}
if err := d.writeOutputs(wb, block, op, isUTXO); err != nil {
return err
}
if isUTXO {
if err := d.writeInputs(wb, block, op); err != nil {
if err := d.writeAddressesUTXO(wb, block, op); err != nil {
return err
}
} else {
if err := d.writeAddressesNonUTXO(wb, block, op); err != nil {
return err
}
}
@@ -249,40 +235,245 @@ func (d *RocksDB) writeBlock(block *bchain.Block, op int) error {
return d.db.Write(d.wo, wb)
}
// Output Index
// Addresses index
type outpoint struct {
txid string
vout int32
btxID []byte
vout int32
}
func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records map[string][]outpoint, addrID []byte, txid string, vout int32, bh uint32) error {
if len(addrID) > 0 {
if len(addrID) > 1024 {
glog.Infof("block %d, skipping addrID of length %d", bh, len(addrID))
} else {
strAddrID := string(addrID)
records[strAddrID] = append(records[strAddrID], outpoint{
txid: txid,
vout: vout,
})
if op == opDelete {
// remove transactions from cache
b, err := d.chainParser.PackTxid(txid)
func (d *RocksDB) packBlockAddress(addrID []byte, spentTxs map[string][]outpoint) []byte {
vBuf := make([]byte, vlq.MaxLen32)
vl := packVarint(int32(len(addrID)), vBuf)
blockAddress := append([]byte(nil), vBuf[:vl]...)
blockAddress = append(blockAddress, addrID...)
if spentTxs == nil {
} else {
addrUnspentTxs := spentTxs[string(addrID)]
vl = packVarint(int32(len(addrUnspentTxs)), vBuf)
blockAddress = append(blockAddress, vBuf[:vl]...)
buf := d.packOutpoints(addrUnspentTxs)
blockAddress = append(blockAddress, buf...)
}
return blockAddress
}
func (d *RocksDB) writeAddressRecords(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, addresses map[string][]outpoint, spentTxs map[string][]outpoint) error {
keep := d.chainParser.KeepBlockAddresses()
blockAddresses := make([]byte, 0)
for addrID, outpoints := range addresses {
baddrID := []byte(addrID)
key := packAddressKey(baddrID, block.Height)
switch op {
case opInsert:
val := d.packOutpoints(outpoints)
wb.PutCF(d.cfh[cfAddresses], key, val)
if keep > 0 {
// collect all addresses be stored in blockaddresses
// they are used in disconnect blocks
blockAddress := d.packBlockAddress(baddrID, spentTxs)
blockAddresses = append(blockAddresses, blockAddress...)
}
case opDelete:
wb.DeleteCF(d.cfh[cfAddresses], key)
}
}
if keep > 0 && op == opInsert {
// write new block address and txs spent in this block
key := packUint(block.Height)
wb.PutCF(d.cfh[cfBlockAddresses], key, blockAddresses)
// cleanup old block address
if block.Height > uint32(keep) {
for rh := block.Height - uint32(keep); rh < block.Height; rh-- {
key = packUint(rh)
val, err := d.db.GetCF(d.ro, d.cfh[cfBlockAddresses], key)
if err != nil {
return err
}
wb.DeleteCF(d.cfh[cfTransactions], b)
if val.Size() == 0 {
break
}
val.Free()
d.db.DeleteCF(d.wo, d.cfh[cfBlockAddresses], key)
}
}
}
return nil
}
func (d *RocksDB) writeOutputs(wb *gorocksdb.WriteBatch, block *bchain.Block, op int, isUTXO bool) error {
records := make(map[string][]outpoint)
func (d *RocksDB) addAddrIDToRecords(op int, wb *gorocksdb.WriteBatch, records map[string][]outpoint, addrID []byte, btxid []byte, vout int32, bh uint32) error {
if len(addrID) > 0 {
if len(addrID) > 1024 {
glog.Infof("rocksdb: block %d, skipping addrID of length %d", bh, len(addrID))
} else {
strAddrID := string(addrID)
records[strAddrID] = append(records[strAddrID], outpoint{
btxID: btxid,
vout: vout,
})
if op == opDelete {
// remove transactions from cache
wb.DeleteCF(d.cfh[cfTransactions], btxid)
}
}
}
return nil
}
func (d *RocksDB) getUnspentTx(btxID []byte) ([]byte, error) {
// find it in db, in the column cfUnspentTxs
val, err := d.db.GetCF(d.ro, d.cfh[cfUnspentTxs], btxID)
if err != nil {
return nil, err
}
defer val.Free()
data := append([]byte(nil), val.Data()...)
return data, nil
}
func appendPackedAddrID(txAddrs []byte, addrID []byte, n uint32, remaining int) []byte {
// resize the addr buffer if necessary by a new estimate
if cap(txAddrs)-len(txAddrs) < 2*vlq.MaxLen32+len(addrID) {
txAddrs = append(txAddrs, make([]byte, vlq.MaxLen32+len(addrID)+remaining*32)...)[:len(txAddrs)]
}
// addrID is packed as number of bytes of the addrID + bytes of addrID + vout
lv := packVarint(int32(len(addrID)), txAddrs[len(txAddrs):len(txAddrs)+vlq.MaxLen32])
txAddrs = txAddrs[:len(txAddrs)+lv]
txAddrs = append(txAddrs, addrID...)
lv = packVarint(int32(n), txAddrs[len(txAddrs):len(txAddrs)+vlq.MaxLen32])
txAddrs = txAddrs[:len(txAddrs)+lv]
return txAddrs
}
func findAndRemoveUnspentAddr(unspentAddrs []byte, vout uint32) ([]byte, []byte) {
// the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints
for i := 0; i < len(unspentAddrs); {
l, lv1 := unpackVarint(unspentAddrs[i:])
// index of vout of address in unspentAddrs
j := i + int(l) + lv1
if j >= len(unspentAddrs) {
glog.Error("rocksdb: Inconsistent data in unspentAddrs ", hex.EncodeToString(unspentAddrs), ", ", vout)
return nil, unspentAddrs
}
n, lv2 := unpackVarint(unspentAddrs[j:])
if uint32(n) == vout {
addrID := append([]byte(nil), unspentAddrs[i+lv1:j]...)
unspentAddrs = append(unspentAddrs[:i], unspentAddrs[j+lv2:]...)
return addrID, unspentAddrs
}
i = j + lv2
}
return nil, unspentAddrs
}
func (d *RocksDB) writeAddressesUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error {
if op == opDelete {
// block does not contain mapping tx-> input address, which is necessary to recreate
// unspentTxs; therefore it is not possible to DisconnectBlocks this way
return errors.New("DisconnectBlock is not supported for UTXO chains")
}
addresses := make(map[string][]outpoint)
unspentTxs := make(map[string][]byte)
thisBlockTxs := make(map[string]struct{})
btxIDs := make([][]byte, len(block.Txs))
// first process all outputs, build mapping of addresses to outpoints and mappings of unspent txs to addresses
for txi, tx := range block.Txs {
btxID, err := d.chainParser.PackTxid(tx.Txid)
if err != nil {
return err
}
btxIDs[txi] = btxID
// preallocate estimated size of addresses (32 bytes is 1 byte length of addrID, 25 bytes addrID, 1-2 bytes vout and reserve)
txAddrs := make([]byte, 0, len(tx.Vout)*32)
for i, output := range tx.Vout {
addrID, err := d.chainParser.GetAddrIDFromVout(&output)
if err != nil {
// do not log ErrAddressMissing, transactions can be without to address (for example eth contracts)
if err != bchain.ErrAddressMissing {
glog.Warningf("rocksdb: addrID: %v - height %d, tx %v, output %v", err, block.Height, tx.Txid, output)
}
continue
}
err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(output.N), block.Height)
if err != nil {
return err
}
txAddrs = appendPackedAddrID(txAddrs, addrID, output.N, len(tx.Vout)-i)
}
stxID := string(btxID)
unspentTxs[stxID] = txAddrs
thisBlockTxs[stxID] = struct{}{}
}
// locate addresses spent by this tx and remove them from unspent addresses
// keep them so that they be stored for DisconnectBlock functionality
spentTxs := make(map[string][]outpoint)
for txi, tx := range block.Txs {
spendingTxid := btxIDs[txi]
for i, input := range tx.Vin {
btxID, err := d.chainParser.PackTxid(input.Txid)
if err != nil {
// do not process inputs without input txid
if err == bchain.ErrTxidMissing {
continue
}
return err
}
// find the tx in current block or already processed
stxID := string(btxID)
unspentAddrs, exists := unspentTxs[stxID]
if !exists {
// else find it in previous blocks
unspentAddrs, err = d.getUnspentTx(btxID)
if err != nil {
return err
}
if unspentAddrs == nil {
glog.Warningf("rocksdb: height %d, tx %v, input tx %v vin %v %v missing in unspentTxs", block.Height, tx.Txid, input.Txid, input.Vout, i)
continue
}
}
var addrID []byte
addrID, unspentAddrs = findAndRemoveUnspentAddr(unspentAddrs, input.Vout)
if addrID == nil {
glog.Warningf("rocksdb: height %d, tx %v, input tx %v vin %v %v not found in unspentAddrs", block.Height, tx.Txid, input.Txid, input.Vout, i)
continue
}
// record what was spent in this tx
// skip transactions that were created in this block
if _, exists := thisBlockTxs[stxID]; !exists {
saddrID := string(addrID)
rut := spentTxs[saddrID]
rut = append(rut, outpoint{btxID, int32(input.Vout)})
spentTxs[saddrID] = rut
}
err = d.addAddrIDToRecords(op, wb, addresses, addrID, spendingTxid, int32(^i), block.Height)
if err != nil {
return err
}
unspentTxs[stxID] = unspentAddrs
}
}
if err := d.writeAddressRecords(wb, block, op, addresses, spentTxs); err != nil {
return err
}
// save unspent txs from current block
for tx, val := range unspentTxs {
if len(val) == 0 {
wb.DeleteCF(d.cfh[cfUnspentTxs], []byte(tx))
} else {
wb.PutCF(d.cfh[cfUnspentTxs], []byte(tx), val)
}
}
return nil
}
func (d *RocksDB) writeAddressesNonUTXO(wb *gorocksdb.WriteBatch, block *bchain.Block, op int) error {
addresses := make(map[string][]outpoint)
for _, tx := range block.Txs {
btxID, err := d.chainParser.PackTxid(tx.Txid)
if err != nil {
return err
}
for _, output := range tx.Vout {
addrID, err := d.chainParser.GetAddrIDFromVout(&output)
if err != nil {
@@ -292,122 +483,98 @@ func (d *RocksDB) writeOutputs(wb *gorocksdb.WriteBatch, block *bchain.Block, op
}
continue
}
err = d.addAddrIDToRecords(op, wb, records, addrID, tx.Txid, int32(output.N), block.Height)
err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(output.N), block.Height)
if err != nil {
return err
}
}
if !isUTXO {
// store inputs in output column in format txid ^index
for _, input := range tx.Vin {
for i, a := range input.Addresses {
addrID, err := d.chainParser.GetAddrIDFromAddress(a)
if err != nil {
glog.Warningf("rocksdb: addrID: %v - %d %s", err, block.Height, addrID)
continue
}
err = d.addAddrIDToRecords(op, wb, records, addrID, tx.Txid, int32(^i), block.Height)
if err != nil {
return err
}
// store inputs in format txid ^index
for _, input := range tx.Vin {
for i, a := range input.Addresses {
addrID, err := d.chainParser.GetAddrIDFromAddress(a)
if err != nil {
glog.Warningf("rocksdb: addrID: %v - %d %s", err, block.Height, addrID)
continue
}
err = d.addAddrIDToRecords(op, wb, addresses, addrID, btxID, int32(^i), block.Height)
if err != nil {
return err
}
}
}
}
return d.writeAddressRecords(wb, block, op, addresses, nil)
}
for addrID, outpoints := range records {
key, err := packOutputKey([]byte(addrID), block.Height)
func (d *RocksDB) unpackBlockAddresses(buf []byte) ([][]byte, [][]outpoint, error) {
addresses := make([][]byte, 0)
outpointsArray := make([][]outpoint, 0)
// the addresses are packed as lenaddrID addrID vout, where lenaddrID and vout are varints
for i := 0; i < len(buf); {
l, lv := unpackVarint(buf[i:])
j := i + int(l) + lv
if j > len(buf) {
glog.Error("rocksdb: Inconsistent data in blockAddresses ", hex.EncodeToString(buf))
return nil, nil, errors.New("Inconsistent data in blockAddresses")
}
addrID := append([]byte(nil), buf[i+lv:j]...)
outpoints, ol, err := d.unpackNOutpoints(buf[j:])
if err != nil {
glog.Warningf("rocksdb: packOutputKey: %v - %d %s", err, block.Height, addrID)
continue
}
switch op {
case opInsert:
val, err := d.packOutputValue(outpoints)
if err != nil {
glog.Warningf("rocksdb: packOutputValue: %v", err)
continue
}
wb.PutCF(d.cfh[cfOutputs], key, val)
case opDelete:
wb.DeleteCF(d.cfh[cfOutputs], key)
glog.Error("rocksdb: Inconsistent data in blockAddresses ", hex.EncodeToString(buf))
return nil, nil, errors.New("Inconsistent data in blockAddresses")
}
addresses = append(addresses, addrID)
outpointsArray = append(outpointsArray, outpoints)
i = j + ol
}
return nil
return addresses, outpointsArray, nil
}
func packOutputKey(outputScript []byte, height uint32) ([]byte, error) {
bheight := packUint(height)
buf := make([]byte, 0, len(outputScript)+len(bheight))
buf = append(buf, outputScript...)
buf = append(buf, bheight...)
return buf, nil
}
func (d *RocksDB) packOutputValue(outpoints []outpoint) ([]byte, error) {
func (d *RocksDB) packOutpoints(outpoints []outpoint) []byte {
buf := make([]byte, 0)
bvout := make([]byte, vlq.MaxLen32)
for _, o := range outpoints {
btxid, err := d.chainParser.PackTxid(o.txid)
if err != nil {
return nil, err
}
bvout := packVarint(o.vout)
buf = append(buf, btxid...)
buf = append(buf, bvout...)
l := packVarint(o.vout, bvout)
buf = append(buf, []byte(o.btxID)...)
buf = append(buf, bvout[:l]...)
}
return buf, nil
return buf
}
func (d *RocksDB) unpackOutputValue(buf []byte) ([]outpoint, error) {
func (d *RocksDB) unpackOutpoints(buf []byte) ([]outpoint, error) {
txidUnpackedLen := d.chainParser.PackedTxidLen()
outpoints := make([]outpoint, 0)
for i := 0; i < len(buf); {
txid, err := d.chainParser.UnpackTxid(buf[i : i+txidUnpackedLen])
if err != nil {
return nil, err
}
btxID := append([]byte(nil), buf[i:i+txidUnpackedLen]...)
i += txidUnpackedLen
vout, voutLen := unpackVarint(buf[i:])
i += voutLen
outpoints = append(outpoints, outpoint{
txid: txid,
vout: vout,
btxID: btxID,
vout: vout,
})
}
return outpoints, nil
}
// Input index
func (d *RocksDB) writeInputs(
wb *gorocksdb.WriteBatch,
block *bchain.Block,
op int,
) error {
for _, tx := range block.Txs {
for i, input := range tx.Vin {
if input.Coinbase != "" {
continue
}
key, err := d.packOutpoint(input.Txid, int32(input.Vout))
if err != nil {
return err
}
val, err := d.packOutpoint(tx.Txid, int32(i))
if err != nil {
return err
}
switch op {
case opInsert:
wb.PutCF(d.cfh[cfInputs], key, val)
case opDelete:
wb.DeleteCF(d.cfh[cfInputs], key)
}
func (d *RocksDB) unpackNOutpoints(buf []byte) ([]outpoint, int, error) {
txidUnpackedLen := d.chainParser.PackedTxidLen()
n, p := unpackVarint(buf)
outpoints := make([]outpoint, n)
for i := int32(0); i < n; i++ {
if p+txidUnpackedLen >= len(buf) {
return nil, 0, errors.New("Inconsistent data in unpackNOutpoints")
}
btxID := append([]byte(nil), buf[p:p+txidUnpackedLen]...)
p += txidUnpackedLen
vout, voutLen := unpackVarint(buf[p:])
p += voutLen
outpoints[i] = outpoint{
btxID: btxID,
vout: vout,
}
}
return nil
return outpoints, p, nil
}
func (d *RocksDB) packOutpoint(txid string, vout int32) ([]byte, error) {
@@ -415,10 +582,11 @@ func (d *RocksDB) packOutpoint(txid string, vout int32) ([]byte, error) {
if err != nil {
return nil, err
}
bvout := packVarint(vout)
buf := make([]byte, 0, len(btxid)+len(bvout))
bv := make([]byte, vlq.MaxLen32)
l := packVarint(vout, bv)
buf := make([]byte, 0, l+len(btxid))
buf = append(buf, btxid...)
buf = append(buf, bvout...)
buf = append(buf, bv[:l]...)
return buf, nil
}
@@ -457,30 +625,6 @@ func (d *RocksDB) GetBlockHash(height uint32) (string, error) {
return d.chainParser.UnpackBlockHash(val.Data())
}
// GetSpentOutput returns output which is spent by input tx
func (d *RocksDB) GetSpentOutput(txid string, i int32) (string, int32, error) {
b, err := d.packOutpoint(txid, i)
if err != nil {
return "", 0, err
}
val, err := d.db.GetCF(d.ro, d.cfh[cfInputs], b)
if err != nil {
return "", 0, err
}
defer val.Free()
p, err := d.unpackOutputValue(val.Data())
if err != nil {
return "", 0, err
}
var otxid string
var oi int32
for _, i := range p {
otxid, oi = i.txid, i.vout
break
}
return otxid, oi, nil
}
func (d *RocksDB) writeHeight(
wb *gorocksdb.WriteBatch,
block *bchain.Block,
@@ -502,17 +646,28 @@ func (d *RocksDB) writeHeight(
return nil
}
// DisconnectBlocksFullScan removes all data belonging to blocks in range lower-higher
// it finds the data by doing full scan of outputs column, therefore it is quite slow
func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error {
glog.Infof("db: disconnecting blocks %d-%d using full scan", lower, higher)
outputKeys := [][]byte{}
outputValues := [][]byte{}
func (d *RocksDB) getBlockAddresses(key []byte) ([][]byte, [][]outpoint, error) {
b, err := d.db.GetCF(d.ro, d.cfh[cfBlockAddresses], key)
if err != nil {
return nil, nil, err
}
defer b.Free()
// block is missing in DB
if b.Data() == nil {
return nil, nil, errors.New("Block addresses missing")
}
return d.unpackBlockAddresses(b.Data())
}
func (d *RocksDB) allAddressesScan(lower uint32, higher uint32) ([][]byte, [][]byte, error) {
glog.Infof("db: doing full scan of addresses column")
addrKeys := [][]byte{}
addrValues := [][]byte{}
var totalOutputs, count uint64
var seekKey []byte
for {
var key []byte
it := d.db.NewIteratorCF(d.ro, d.cfh[cfOutputs])
it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses])
if totalOutputs == 0 {
it.SeekToFirst()
} else {
@@ -524,16 +679,16 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error {
count++
key = it.Key().Data()
l := len(key)
if l > 4 {
height := unpackUint(key[l-4 : l])
if l > packedHeightBytes {
height := unpackUint(key[l-packedHeightBytes : l])
if height >= lower && height <= higher {
outputKey := make([]byte, len(key))
copy(outputKey, key)
outputKeys = append(outputKeys, outputKey)
addrKey := make([]byte, len(key))
copy(addrKey, key)
addrKeys = append(addrKeys, addrKey)
value := it.Value().Data()
outputValue := make([]byte, len(value))
copy(outputValue, value)
outputValues = append(outputValues, outputValue)
addrValue := make([]byte, len(value))
copy(addrValue, value)
addrValues = append(addrValues, addrValue)
}
}
}
@@ -545,43 +700,99 @@ func (d *RocksDB) DisconnectBlocksFullScan(lower uint32, higher uint32) error {
break
}
}
glog.Infof("rocksdb: about to disconnect %d outputs from %d", len(outputKeys), totalOutputs)
glog.Infof("rocksdb: scanned %d addresses, found %d to disconnect", totalOutputs, len(addrKeys))
return addrKeys, addrValues, nil
}
// DisconnectBlockRange removes all data belonging to blocks in range lower-higher
// it finds the data in blockaddresses column if available,
// otherwise by doing quite slow full scan of addresses column
func (d *RocksDB) DisconnectBlockRange(lower uint32, higher uint32) error {
glog.Infof("db: disconnecting blocks %d-%d", lower, higher)
addrKeys := [][]byte{}
addrOutpoints := [][]byte{}
addrUnspentOutpoints := [][]outpoint{}
keep := d.chainParser.KeepBlockAddresses()
var err error
if keep > 0 {
for height := lower; height <= higher; height++ {
addresses, unspentOutpoints, err := d.getBlockAddresses(packUint(height))
if err != nil {
glog.Error(err)
return err
}
for i, addrID := range addresses {
addrKey := packAddressKey(addrID, height)
val, err := d.db.GetCF(d.ro, d.cfh[cfAddresses], addrKey)
if err != nil {
glog.Error(err)
return err
}
addrKeys = append(addrKeys, addrKey)
av := append([]byte(nil), val.Data()...)
val.Free()
addrOutpoints = append(addrOutpoints, av)
addrUnspentOutpoints = append(addrUnspentOutpoints, unspentOutpoints[i])
}
}
} else {
addrKeys, addrOutpoints, err = d.allAddressesScan(lower, higher)
if err != nil {
return err
}
}
glog.Infof("rocksdb: about to disconnect %d addresses ", len(addrKeys))
wb := gorocksdb.NewWriteBatch()
defer wb.Destroy()
for i := 0; i < len(outputKeys); i++ {
unspentTxs := make(map[string][]byte)
for addrIndex, addrKey := range addrKeys {
if glog.V(2) {
glog.Info("output ", hex.EncodeToString(outputKeys[i]))
glog.Info("address ", hex.EncodeToString(addrKey))
}
wb.DeleteCF(d.cfh[cfOutputs], outputKeys[i])
outpoints, err := d.unpackOutputValue(outputValues[i])
// delete address:height from the index
wb.DeleteCF(d.cfh[cfAddresses], addrKey)
addrID, _, err := unpackAddressKey(addrKey)
if err != nil {
return err
}
// recreate unspentTxs, which were spent by this block (that is being disconnected)
for _, o := range addrUnspentOutpoints[addrIndex] {
stxID := string(o.btxID)
txAddrs, exists := unspentTxs[stxID]
if !exists {
txAddrs, err = d.getUnspentTx(o.btxID)
if err != nil {
return err
}
}
txAddrs = appendPackedAddrID(txAddrs, addrID, uint32(o.vout), 1)
unspentTxs[stxID] = txAddrs
}
// delete unspentTxs from this block
outpoints, err := d.unpackOutpoints(addrOutpoints[addrIndex])
if err != nil {
return err
}
for _, o := range outpoints {
// delete from inputs
boutpoint, err := d.packOutpoint(o.txid, o.vout)
if err != nil {
return err
}
if glog.V(2) {
glog.Info("input ", hex.EncodeToString(boutpoint))
}
wb.DeleteCF(d.cfh[cfInputs], boutpoint)
// delete from txCache
b, err := d.chainParser.PackTxid(o.txid)
if err != nil {
return err
}
wb.DeleteCF(d.cfh[cfTransactions], b)
wb.DeleteCF(d.cfh[cfUnspentTxs], o.btxID)
wb.DeleteCF(d.cfh[cfTransactions], o.btxID)
}
}
for key, val := range unspentTxs {
wb.PutCF(d.cfh[cfUnspentTxs], []byte(key), val)
}
for height := lower; height <= higher; height++ {
if glog.V(2) {
glog.Info("height ", height)
}
wb.DeleteCF(d.cfh[cfHeight], packUint(height))
key := packUint(height)
if keep > 0 {
wb.DeleteCF(d.cfh[cfBlockAddresses], key)
}
wb.DeleteCF(d.cfh[cfHeight], key)
}
err := d.db.Write(d.wo, wb)
err = d.db.Write(d.wo, wb)
if err == nil {
glog.Infof("rocksdb: blocks %d-%d disconnected", lower, higher)
}
@@ -651,7 +862,21 @@ func (d *RocksDB) DeleteTx(txid string) error {
// Helpers
var ErrInvalidAddress = errors.New("invalid address")
func packAddressKey(addrID []byte, height uint32) []byte {
bheight := packUint(height)
buf := make([]byte, 0, len(addrID)+len(bheight))
buf = append(buf, addrID...)
buf = append(buf, bheight...)
return buf
}
func unpackAddressKey(key []byte) ([]byte, uint32, error) {
i := len(key) - packedHeightBytes
if i <= 0 {
return nil, 0, errors.New("Invalid address key")
}
return key[:i], unpackUint(key[i : i+packedHeightBytes]), nil
}
func packUint(i uint32) []byte {
buf := make([]byte, 4)
@@ -663,34 +888,11 @@ func unpackUint(buf []byte) uint32 {
return binary.BigEndian.Uint32(buf)
}
func packFloat64(f float64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, math.Float64bits(f))
return buf
}
func unpackFloat64(buf []byte) float64 {
return math.Float64frombits(binary.BigEndian.Uint64(buf))
}
func packVarint(i int32) []byte {
buf := make([]byte, vlq.MaxLen32)
ofs := vlq.PutInt(buf, int64(i))
return buf[:ofs]
func packVarint(i int32, buf []byte) int {
return vlq.PutInt(buf, int64(i))
}
func unpackVarint(buf []byte) (int32, int) {
i, ofs := vlq.Uint(buf)
i, ofs := vlq.Int(buf)
return int32(i), ofs
}
func packVarint64(i int64) []byte {
buf := make([]byte, vlq.MaxLen64)
ofs := vlq.PutInt(buf, i)
return buf[:ofs]
}
func unpackVarint64(buf []byte) (int64, int) {
i, ofs := vlq.Int(buf)
return i, ofs
}

718
db/rocksdb_test.go Normal file
View File

@@ -0,0 +1,718 @@
package db
import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"github.com/juju/errors"
)
// simplified explanation of signed varint packing, used in many index data structures
// for number n, the packing is: 2*n if n>=0 else 2*(-n)-1
// take only 1 byte if abs(n)<127
func setupRocksDB(t *testing.T, p bchain.BlockChainParser) *RocksDB {
tmp, err := ioutil.TempDir("", "testdb")
if err != nil {
t.Fatal(err)
}
d, err := NewRocksDB(tmp, p)
if err != nil {
t.Fatal(err)
}
return d
}
func closeAnddestroyRocksDB(t *testing.T, d *RocksDB) {
if err := d.Close(); err != nil {
t.Fatal(err)
}
os.RemoveAll(d.path)
}
func addressToPubKeyHex(addr string, t *testing.T, d *RocksDB) string {
b, err := d.chainParser.AddressToOutputScript(addr)
if err != nil {
t.Fatal(err)
}
return hex.EncodeToString(b)
}
func addressToPubKeyHexWithLength(addr string, t *testing.T, d *RocksDB) string {
h := addressToPubKeyHex(addr, t, d)
return strconv.FormatInt(int64(len(h)), 16) + h
}
// keyPair is used to compare given key value in DB with expected
// for more complicated compares it is possible to specify CompareFunc
type keyPair struct {
Key, Value string
CompareFunc func(string) bool
}
func compareFuncBlockAddresses(t *testing.T, v string, expected []string) bool {
for _, e := range expected {
lb := len(v)
v = strings.Replace(v, e, "", 1)
if lb == len(v) {
t.Error(e, " not found in ", v)
return false
}
}
if len(v) != 0 {
t.Error("not expected content ", v)
}
return len(v) == 0
}
func checkColumn(d *RocksDB, col int, kp []keyPair) error {
sort.Slice(kp, func(i, j int) bool {
return kp[i].Key < kp[j].Key
})
it := d.db.NewIteratorCF(d.ro, d.cfh[col])
defer it.Close()
i := 0
for it.SeekToFirst(); it.Valid(); it.Next() {
if i >= len(kp) {
return errors.Errorf("Expected less rows in column %v", cfNames[col])
}
key := hex.EncodeToString(it.Key().Data())
if key != kp[i].Key {
return errors.Errorf("Incorrect key %v found in column %v row %v, expecting %v", key, cfNames[col], i, kp[i].Key)
}
val := hex.EncodeToString(it.Value().Data())
var valOK bool
if kp[i].CompareFunc == nil {
valOK = val == kp[i].Value
} else {
valOK = kp[i].CompareFunc(val)
}
if !valOK {
return errors.Errorf("Incorrect value %v found in column %v row %v, expecting %v", val, cfNames[col], i, kp[i].Value)
}
i++
}
if i != len(kp) {
return errors.Errorf("Expected more rows in column %v: got %v, expected %v", cfNames[col], i, len(kp))
}
return nil
}
func getTestUTXOBlock1(t *testing.T, d *RocksDB) *bchain.Block {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Height: 225493,
Hash: "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997",
},
Txs: []bchain.Tx{
bchain.Tx{
Txid: "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840",
Vout: []bchain.Vout{
bchain.Vout{
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d),
},
},
bchain.Vout{
N: 1,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d),
},
},
},
Blocktime: 22549300000,
Time: 22549300000,
},
bchain.Tx{
Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75",
Vout: []bchain.Vout{
bchain.Vout{
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d),
},
},
bchain.Vout{
N: 1,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d),
},
},
bchain.Vout{
N: 2,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d),
},
},
},
Blocktime: 22549300001,
Time: 22549300001,
},
},
}
}
func getTestUTXOBlock2(t *testing.T, d *RocksDB) *bchain.Block {
return &bchain.Block{
BlockHeader: bchain.BlockHeader{
Height: 225494,
Hash: "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6",
},
Txs: []bchain.Tx{
bchain.Tx{
Txid: "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25",
Vin: []bchain.Vin{
bchain.Vin{
Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75",
Vout: 0,
},
bchain.Vin{
Txid: "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840",
Vout: 1,
},
},
Vout: []bchain.Vout{
bchain.Vout{
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d),
},
},
bchain.Vout{
N: 1,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d),
},
},
},
Blocktime: 22549400000,
Time: 22549400000,
},
bchain.Tx{
Txid: "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71",
Vin: []bchain.Vin{
// spending an output in the same block
bchain.Vin{
Txid: "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25",
Vout: 0,
},
// spending an output in the previous block
bchain.Vin{
Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75",
Vout: 1,
},
},
Vout: []bchain.Vout{
bchain.Vout{
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d),
},
},
bchain.Vout{
N: 1,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d),
},
},
},
Blocktime: 22549400001,
Time: 22549400001,
},
// transaction from the same address in the previous block
bchain.Tx{
Txid: "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07",
Vin: []bchain.Vin{
bchain.Vin{
Txid: "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75",
Vout: 2,
},
},
Vout: []bchain.Vout{
bchain.Vout{
N: 0,
ScriptPubKey: bchain.ScriptPubKey{
Hex: addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d),
},
},
},
Blocktime: 22549400002,
Time: 22549400002,
},
},
}
}
func verifyAfterUTXOBlock1(t *testing.T, d *RocksDB, noBlockAddresses bool) {
if err := checkColumn(d, cfHeight, []keyPair{
keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", nil},
}); err != nil {
{
t.Fatal(err)
}
}
// the vout is encoded as signed varint, i.e. value * 2 for non negative values
if err := checkColumn(d, cfAddresses, []keyPair{
keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00", nil},
keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil},
keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil},
keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil},
keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04", nil},
}); err != nil {
{
t.Fatal(err)
}
}
if err := checkColumn(d, cfUnspentTxs, []keyPair{
keyPair{
"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", "",
func(v string) bool {
return compareFuncBlockAddresses(t, v, []string{
addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00",
addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02",
})
},
},
keyPair{
"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", "",
func(v string) bool {
return compareFuncBlockAddresses(t, v, []string{
addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00",
addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02",
addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "04",
})
},
},
}); err != nil {
{
t.Fatal(err)
}
}
// after disconnect there are no blockaddresses for the previous block
var blockAddressesKp []keyPair
if noBlockAddresses {
blockAddressesKp = []keyPair{}
} else {
// the values in cfBlockAddresses are in random order, must use CompareFunc
blockAddressesKp = []keyPair{
keyPair{"000370d5", "",
func(v string) bool {
return compareFuncBlockAddresses(t, v, []string{
addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00",
addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "00",
addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "00",
addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "00",
addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "00",
})
},
},
}
}
if err := checkColumn(d, cfBlockAddresses, blockAddressesKp); err != nil {
{
t.Fatal(err)
}
}
}
func verifyAfterUTXOBlock2(t *testing.T, d *RocksDB) {
if err := checkColumn(d, cfHeight, []keyPair{
keyPair{"000370d5", "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997", nil},
keyPair{"000370d6", "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6", nil},
}); err != nil {
{
t.Fatal(err)
}
}
if err := checkColumn(d, cfAddresses, []keyPair{
keyPair{addressToPubKeyHex("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "00", nil},
keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d5", "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02", nil},
keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00", nil},
keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02", nil},
keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d5", "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04", nil},
keyPair{addressToPubKeyHex("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "00" + "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "01", nil},
keyPair{addressToPubKeyHex("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "02", nil},
keyPair{addressToPubKeyHex("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "00", nil},
keyPair{addressToPubKeyHex("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "02", nil},
keyPair{addressToPubKeyHex("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "01", nil},
keyPair{addressToPubKeyHex("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "000370d6", "7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25" + "03", nil},
keyPair{addressToPubKeyHex("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "000370d6", "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07" + "00" + "05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07" + "01", nil},
keyPair{addressToPubKeyHex("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "000370d6", "3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71" + "03", nil},
}); err != nil {
{
t.Fatal(err)
}
}
if err := checkColumn(d, cfUnspentTxs, []keyPair{
keyPair{
"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840",
addressToPubKeyHexWithLength("mfcWp7DB6NuaZsExybTTXpVgWz559Np4Ti", t, d) + "00",
nil,
},
keyPair{
"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25",
addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "02",
nil,
},
keyPair{
"3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", "",
func(v string) bool {
return compareFuncBlockAddresses(t, v, []string{
addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00",
addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "02",
})
},
},
keyPair{
"05e2e48aeabdd9b75def7b48d756ba304713c2aba7b522bf9dbc893fc4231b07",
addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "00",
nil,
},
}); err != nil {
{
t.Fatal(err)
}
}
if err := checkColumn(d, cfBlockAddresses, []keyPair{
keyPair{"000370d6", "",
func(v string) bool {
return compareFuncBlockAddresses(t, v, []string{
addressToPubKeyHexWithLength("mzB8cYrfRwFRFAGTDzV8LkUQy5BQicxGhX", t, d) + "00",
addressToPubKeyHexWithLength("mtR97eM2HPWVM6c8FGLGcukgaHHQv7THoL", t, d) + "00",
addressToPubKeyHexWithLength("mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", t, d) + "00",
addressToPubKeyHexWithLength("mmJx9Y8ayz9h14yd9fgCW1bUKoEpkBAquP", t, d) + "00",
addressToPubKeyHexWithLength("mv9uLThosiEnGRbVPS7Vhyw6VssbVRsiAw", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "00",
addressToPubKeyHexWithLength("mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", t, d) + "02" + "00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840" + "02",
addressToPubKeyHexWithLength("2Mz1CYoppGGsLNUGF2YDhTif6J661JitALS", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "02",
addressToPubKeyHexWithLength("2NEVv9LJmAnY99W1pFoc5UJjVdypBqdnvu1", t, d) + "02" + "effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75" + "04",
})
},
},
}); err != nil {
{
t.Fatal(err)
}
}
}
type txidVoutOutput struct {
txid string
vout uint32
isOutput bool
}
func verifyGetTransactions(t *testing.T, d *RocksDB, addr string, low, high uint32, wantTxids []txidVoutOutput, wantErr error) {
gotTxids := make([]txidVoutOutput, 0)
addToTxids := func(txid string, vout uint32, isOutput bool) error {
gotTxids = append(gotTxids, txidVoutOutput{txid, vout, isOutput})
return nil
}
if err := d.GetTransactions(addr, low, high, addToTxids); err != nil {
if wantErr == nil || wantErr.Error() != err.Error() {
t.Fatal(err)
}
}
if !reflect.DeepEqual(gotTxids, wantTxids) {
t.Errorf("GetTransactions() = %v, want %v", gotTxids, wantTxids)
}
}
type testBitcoinParser struct {
*btc.BitcoinParser
}
// override btc.KeepBlockAddresses to keep only one blockaddress
func (p *testBitcoinParser) KeepBlockAddresses() int {
return 1
}
// override PackTx and UnpackTx to default BaseParser functionality
// BitcoinParser uses tx hex which is not available for the test transactions
func (p *testBitcoinParser) PackTx(tx *bchain.Tx, height uint32, blockTime int64) ([]byte, error) {
return p.BaseParser.PackTx(tx, height, blockTime)
}
func (p *testBitcoinParser) UnpackTx(buf []byte) (*bchain.Tx, uint32, error) {
return p.BaseParser.UnpackTx(buf)
}
func testTxCache(t *testing.T, d *RocksDB, b *bchain.Block, tx *bchain.Tx) {
if err := d.PutTx(tx, b.Height, tx.Blocktime); err != nil {
t.Fatal(err)
}
gtx, height, err := d.GetTx(tx.Txid)
if err != nil {
t.Fatal(err)
}
if b.Height != height {
t.Fatalf("GetTx: got height %v, expected %v", height, b.Height)
}
if fmt.Sprint(gtx) != fmt.Sprint(tx) {
t.Errorf("GetTx: %v, want %v", gtx, tx)
}
if err := d.DeleteTx(tx.Txid); err != nil {
t.Fatal(err)
}
}
// TestRocksDB_Index_UTXO is an integration test probing the whole indexing functionality for UTXO chains
// It does the following:
// 1) Connect two blocks (inputs from 2nd block are spending some outputs from the 1st block)
// 2) GetTransactions for various addresses / low-high ranges
// 3) GetBestBlock, GetBlockHash
// 4) Test tx caching functionality
// 5) Disconnect block 2 - expect error
// 6) Disconnect the block 2 using blockaddresses column
// 7) Reconnect block 2 and disconnect blocks 1 and 2 using full scan - expect error
// After each step, the content of DB is examined and any difference against expected state is regarded as failure
func TestRocksDB_Index_UTXO(t *testing.T) {
d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}})
defer closeAnddestroyRocksDB(t, d)
// connect 1st block - will log warnings about missing UTXO transactions in cfUnspentTxs column
block1 := getTestUTXOBlock1(t, d)
if err := d.ConnectBlock(block1); err != nil {
t.Fatal(err)
}
verifyAfterUTXOBlock1(t, d, false)
// connect 2nd block - use some outputs from the 1st block as the inputs and 1 input uses tx from the same block
block2 := getTestUTXOBlock2(t, d)
if err := d.ConnectBlock(block2); err != nil {
t.Fatal(err)
}
verifyAfterUTXOBlock2(t, d)
// get transactions for various addresses / low-high ranges
verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 0, 1000000, []txidVoutOutput{
txidVoutOutput{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1, true},
txidVoutOutput{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 1, false},
}, nil)
verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 225493, 225493, []txidVoutOutput{
txidVoutOutput{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1, true},
}, nil)
verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 225494, 1000000, []txidVoutOutput{
txidVoutOutput{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 1, false},
}, nil)
verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eEtz", 500000, 1000000, []txidVoutOutput{}, nil)
verifyGetTransactions(t, d, "mwwoKQE5Lb1G4picHSHDQKg8jw424PF9SC", 0, 1000000, []txidVoutOutput{
txidVoutOutput{"3d90d15ed026dc45e19ffb52875ed18fa9e8012ad123d7f7212176e2b0ebdb71", 0, true},
}, nil)
verifyGetTransactions(t, d, "mtGXQvBowMkBpnhLckhxhbwYK44Gs9eBad", 500000, 1000000, []txidVoutOutput{}, errors.New("checksum mismatch"))
// GetBestBlock
height, hash, err := d.GetBestBlock()
if err != nil {
t.Fatal(err)
}
if height != 225494 {
t.Fatalf("GetBestBlock: got height %v, expected %v", height, 225494)
}
if hash != "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6" {
t.Fatalf("GetBestBlock: got hash %v, expected %v", hash, "00000000eb0443fd7dc4a1ed5c686a8e995057805f9a161d9a5a77a95e72b7b6")
}
// GetBlockHash
hash, err = d.GetBlockHash(225493)
if err != nil {
t.Fatal(err)
}
if hash != "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997" {
t.Fatalf("GetBlockHash: got hash %v, expected %v", hash, "0000000076fbbed90fd75b0e18856aa35baa984e9c9d444cf746ad85e94e2997")
}
// Test tx caching functionality, leave one tx in db to test cleanup in DisconnectBlock
testTxCache(t, d, block1, &block1.Txs[0])
testTxCache(t, d, block2, &block2.Txs[0])
if err = d.PutTx(&block2.Txs[1], block2.Height, block2.Txs[1].Blocktime); err != nil {
t.Fatal(err)
}
// check that there is only the last tx in the cache
packedTx, err := d.chainParser.PackTx(&block2.Txs[1], block2.Height, block2.Txs[1].Blocktime)
if err := checkColumn(d, cfTransactions, []keyPair{
keyPair{block2.Txs[1].Txid, hex.EncodeToString(packedTx), nil},
}); err != nil {
{
t.Fatal(err)
}
}
// DisconnectBlock for UTXO chains is not possible
err = d.DisconnectBlock(block2)
if err == nil || err.Error() != "DisconnectBlock is not supported for UTXO chains" {
t.Fatal(err)
}
verifyAfterUTXOBlock2(t, d)
// disconnect the 2nd block, verify that the db contains only data from the 1st block with restored unspentTxs
// and that the cached tx is removed
err = d.DisconnectBlockRange(225494, 225494)
if err != nil {
t.Fatal(err)
}
verifyAfterUTXOBlock1(t, d, true)
if err := checkColumn(d, cfTransactions, []keyPair{}); err != nil {
{
t.Fatal(err)
}
}
}
func Test_findAndRemoveUnspentAddr(t *testing.T) {
type args struct {
unspentAddrs string
vout uint32
}
tests := []struct {
name string
args args
want string
want2 string
}{
{
name: "3",
args: args{
unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114",
vout: 3,
},
want: "64635167006868",
want2: "029c0010517a0115887452870212709393588893935687040e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114",
},
{
name: "10",
args: args{
unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114",
vout: 10,
},
want: "61",
want2: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112",
},
{
name: "not there",
args: args{
unspentAddrs: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114",
vout: 11,
},
want: "",
want2: "029c0010517a0115887452870212709393588893935687040e64635167006868060e76519351880087080a7b7b0115870a3276a9144150837fb91d9461d6b95059842ab85262c2923f88ac0c08636751680e04578710029112026114",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b, err := hex.DecodeString(tt.args.unspentAddrs)
if err != nil {
panic(err)
}
got, got2 := findAndRemoveUnspentAddr(b, tt.args.vout)
h := hex.EncodeToString(got)
if !reflect.DeepEqual(h, tt.want) {
t.Errorf("findAndRemoveUnspentAddr() got = %v, want %v", h, tt.want)
}
h2 := hex.EncodeToString(got2)
if !reflect.DeepEqual(h2, tt.want2) {
t.Errorf("findAndRemoveUnspentAddr() got2 = %v, want %v", h2, tt.want2)
}
})
}
}
type hexoutpoint struct {
txID string
vout int32
}
func Test_unpackBlockAddresses(t *testing.T) {
d := setupRocksDB(t, &testBitcoinParser{BitcoinParser: &btc.BitcoinParser{Params: btc.GetChainParams("test")}})
defer closeAnddestroyRocksDB(t, d)
type args struct {
buf string
}
tests := []struct {
name string
args args
want []string
want2 [][]hexoutpoint
wantErr bool
}{
{
name: "1",
args: args{"029c0010517a011588745287047c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d250000b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa38400612709393588893935687000e64635167006868000e7651935188008702effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac7502"},
want: []string{"9c", "517a011588745287", "709393588893935687", "64635167006868", "76519351880087"},
want2: [][]hexoutpoint{
[]hexoutpoint{},
[]hexoutpoint{
hexoutpoint{"7c3be24063f268aaa1ed81b64776798f56088757641a34fb156c4f51ed2e9d25", 0},
hexoutpoint{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 3},
},
[]hexoutpoint{},
[]hexoutpoint{},
[]hexoutpoint{
hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 1},
},
},
},
{
name: "1",
args: args{"3276A914B434EB0C1A3B7A02E8A29CC616E791EF1E0BF51F88AC003276A9143F8BA3FDA3BA7B69F5818086E12223C6DD25E3C888AC003276A914A08EAE93007F22668AB5E4A9C83C8CD1C325E3E088AC02EFFD9EF509383D536B1C8AF5BF434C8EFBF521A4F2BEFD4022BBD68694B4AC75003276A9148BDF0AA3C567AA5975C2E61321B8BEBBE7293DF688AC0200B2C06055E5E90E9C82BD4181FDE310104391A7FA4F289B1704E5D90CAA3840022EA9144A21DB08FB6882CB152E1FF06780A430740F77048702EFFD9EF509383D536B1C8AF5BF434C8EFBF521A4F2BEFD4022BBD68694B4AC75023276A914CCAAAF374E1B06CB83118453D102587B4273D09588AC003276A9148D802C045445DF49613F6A70DDD2E48526F3701F88AC00"},
want: []string{"76a914b434eb0c1a3b7a02e8a29cc616e791ef1e0bf51f88ac", "76a9143f8ba3fda3ba7b69f5818086e12223c6dd25e3c888ac", "76a914a08eae93007f22668ab5e4a9c83c8cd1c325e3e088ac", "76a9148bdf0aa3c567aa5975c2e61321b8bebbe7293df688ac", "a9144a21db08fb6882cb152e1ff06780a430740f770487", "76a914ccaaaf374e1b06cb83118453d102587b4273d09588ac", "76a9148d802c045445df49613f6a70ddd2e48526f3701f88ac"},
want2: [][]hexoutpoint{
[]hexoutpoint{},
[]hexoutpoint{},
[]hexoutpoint{
hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 0},
},
[]hexoutpoint{
hexoutpoint{"00b2c06055e5e90e9c82bd4181fde310104391a7fa4f289b1704e5d90caa3840", 1},
},
[]hexoutpoint{
hexoutpoint{"effd9ef509383d536b1c8af5bf434c8efbf521a4f2befd4022bbd68694b4ac75", 1},
},
[]hexoutpoint{},
[]hexoutpoint{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
b, err := hex.DecodeString(tt.args.buf)
if err != nil {
panic(err)
}
got, got2, err := d.unpackBlockAddresses(b)
if (err != nil) != tt.wantErr {
t.Errorf("unpackBlockAddresses() error = %v, wantErr %v", err, tt.wantErr)
return
}
h := make([]string, len(got))
for i, g := range got {
h[i] = hex.EncodeToString(g)
}
if !reflect.DeepEqual(h, tt.want) {
t.Errorf("unpackBlockAddresses() = %v, want %v", h, tt.want)
}
h2 := make([][]hexoutpoint, len(got2))
for i, g := range got2 {
ho := make([]hexoutpoint, len(g))
for j, o := range g {
ho[j] = hexoutpoint{hex.EncodeToString(o.btxID), o.vout}
}
h2[i] = ho
}
if !reflect.DeepEqual(h2, tt.want2) {
t.Errorf("unpackBlockAddresses() = %v, want %v", h2, tt.want2)
}
})
}
}

View File

@@ -115,7 +115,7 @@ func (w *SyncWorker) resyncIndex(onNewBlock func(hash string)) error {
}
if remoteBestHeight-w.startHeight > uint32(w.syncChunk) {
glog.Infof("resync: parallel sync of blocks %d-%d, using %d workers", w.startHeight, remoteBestHeight, w.syncWorkers)
err = w.connectBlocksParallel(w.startHeight, remoteBestHeight)
err = w.ConnectBlocksParallel(w.startHeight, remoteBestHeight)
if err != nil {
return err
}
@@ -175,6 +175,9 @@ func (w *SyncWorker) connectBlocks(onNewBlock func(hash string)) error {
if onNewBlock != nil {
onNewBlock(res.block.Hash)
}
if res.block.Height > 0 && res.block.Height%1000 == 0 {
glog.Info("connected block ", res.block.Height, " ", res.block.Hash)
}
}
if lastRes.block != nil {
@@ -184,17 +187,38 @@ func (w *SyncWorker) connectBlocks(onNewBlock func(hash string)) error {
return nil
}
func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error {
// ConnectBlocksParallel uses parallel goroutines to get data from blockchain daemon
func (w *SyncWorker) ConnectBlocksParallel(lower, higher uint32) error {
type hashHeight struct {
hash string
height uint32
}
var err error
var wg sync.WaitGroup
bch := make(chan *bchain.Block, w.syncWorkers)
hch := make(chan hashHeight, w.syncWorkers)
hchClosed := atomic.Value{}
hchClosed.Store(false)
work := func(i int) {
var getBlockMux sync.Mutex
getBlockCond := sync.NewCond(&getBlockMux)
lastConnectedBlock := lower - 1
writeBlockDone := make(chan struct{})
writeBlockWorker := func() {
defer close(writeBlockDone)
lastBlock := lower - 1
for b := range bch {
if lastBlock+1 != b.Height {
glog.Error("writeBlockWorker skipped block, last connected block", lastBlock, ", new block ", b.Height)
}
err := w.db.ConnectBlock(b)
if err != nil {
glog.Error("writeBlockWorker ", b.Height, " ", b.Hash, " error ", err)
}
lastBlock = b.Height
}
glog.Info("WriteBlock exiting...")
}
getBlockWorker := func(i int) {
defer wg.Done()
var err error
var block *bchain.Block
@@ -204,10 +228,10 @@ func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error {
if err != nil {
// signal came while looping in the error loop
if hchClosed.Load() == true {
glog.Error("Worker ", i, " connect block error ", err, ". Exiting...")
glog.Error("getBlockWorker ", i, " connect block error ", err, ". Exiting...")
return
}
glog.Error("Worker ", i, " connect block error ", err, ". Retrying...")
glog.Error("getBlockWorker ", i, " connect block error ", err, ". Retrying...")
w.metrics.IndexResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
time.Sleep(time.Millisecond * 500)
} else {
@@ -217,18 +241,32 @@ func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error {
if w.dryRun {
continue
}
err = w.db.ConnectBlock(block)
if err != nil {
glog.Error("Worker ", i, " connect block ", hh.height, " ", hh.hash, " error ", err)
getBlockMux.Lock()
for {
// we must make sure that the blocks are written to db in the correct order
if lastConnectedBlock+1 == hh.height {
// we have the right block, pass it to the writeBlockWorker
lastConnectedBlock = hh.height
bch <- block
getBlockCond.Broadcast()
break
}
// break the endless loop on OS signal
if hchClosed.Load() == true {
break
}
// wait for the time this block is top be passed to the writeBlockWorker
getBlockCond.Wait()
}
getBlockMux.Unlock()
}
glog.Info("Worker ", i, " exiting...")
glog.Info("getBlockWorker ", i, " exiting...")
}
for i := 0; i < w.syncWorkers; i++ {
wg.Add(1)
go work(i)
go getBlockWorker(i)
}
go writeBlockWorker()
var hash string
ConnectLoop:
for h := lower; h <= higher; {
@@ -252,96 +290,18 @@ ConnectLoop:
}
}
close(hch)
// signal stop to workers that are in w.chain.GetBlockWithoutHeader error loop
// signal stop to workers that are in a loop
hchClosed.Store(true)
wg.Wait()
return err
}
func (w *SyncWorker) connectBlockChunk(lower, higher uint32) error {
connected, err := w.isBlockConnected(higher)
if err != nil || connected {
// if higher is over the best block, continue with lower block, otherwise return error
if err != bchain.ErrBlockNotFound {
return err
}
}
height := lower
hash, err := w.chain.GetBlockHash(lower)
if err != nil {
return err
}
for height <= higher {
block, err := w.chain.GetBlock(hash, height)
if err != nil {
return err
}
hash = block.Next
height = block.Height + 1
if w.dryRun {
continue
}
err = w.db.ConnectBlock(block)
if err != nil {
return err
}
if block.Height%1000 == 0 {
glog.Info("connected block ", block.Height, " ", block.Hash)
go w.metrics.IndexDBSize.Set(float64(w.db.DatabaseSizeOnDisk()))
}
}
return nil
}
// ConnectBlocksParallelInChunks connect blocks in chunks
func (w *SyncWorker) ConnectBlocksParallelInChunks(lower, higher uint32) error {
var wg sync.WaitGroup
work := func(i int) {
defer wg.Done()
offset := uint32(w.syncChunk * i)
stride := uint32(w.syncChunk * w.syncWorkers)
for low := lower + offset; low <= higher; low += stride {
high := low + uint32(w.syncChunk-1)
if high > higher {
high = higher
}
err := w.connectBlockChunk(low, high)
if err != nil {
if err == bchain.ErrBlockNotFound {
break
}
glog.Fatalf("connectBlocksParallel %d-%d %v", low, high, err)
}
}
}
// broadcast syncWorkers times to unstuck all waiting getBlockWorkers
for i := 0; i < w.syncWorkers; i++ {
wg.Add(1)
go work(i)
getBlockCond.Broadcast()
}
// first wait for the getBlockWorkers to finish and then close bch channel
// so that the getBlockWorkers do not write to the closed channel
wg.Wait()
return nil
}
func (w *SyncWorker) isBlockConnected(height uint32) (bool, error) {
local, err := w.db.GetBlockHash(height)
if err != nil {
return false, err
}
remote, err := w.chain.GetBlockHash(height)
if err != nil {
return false, err
}
if local != remote {
return false, nil
}
return true, nil
close(bch)
<-writeBlockDone
return err
}
type blockResult struct {
@@ -382,14 +342,18 @@ func (w *SyncWorker) getBlockChain(out chan blockResult, done chan struct{}) {
// otherwise doing full scan
func (w *SyncWorker) DisconnectBlocks(lower uint32, higher uint32, hashes []string) error {
glog.Infof("sync: disconnecting blocks %d-%d", lower, higher)
// if the chain uses Block to Addresses mapping, always use DisconnectBlockRange
if w.chain.GetChainParser().KeepBlockAddresses() > 0 {
return w.db.DisconnectBlockRange(lower, higher)
}
blocks := make([]*bchain.Block, len(hashes))
var err error
// get all blocks first to see if we can avoid full scan
for i, hash := range hashes {
blocks[i], err = w.chain.GetBlock(hash, 0)
if err != nil {
// cannot get block, do full range scan
return w.db.DisconnectBlocksFullScan(lower, higher)
// cannot get a block, we must do full range scan
return w.db.DisconnectBlockRange(lower, higher)
}
}
// then disconnect one after another

View File

@@ -196,12 +196,6 @@ func (s *HTTPServer) transactions(w http.ResponseWriter, r *http.Request) {
txList := transactionList{}
err = s.db.GetTransactions(address, lower, higher, func(txid string, vout uint32, isOutput bool) error {
txList.Txid = append(txList.Txid, txid)
if isOutput {
input := s.chain.GetMempoolSpentOutput(txid, vout)
if input != "" {
txList.Txid = append(txList.Txid, txid)
}
}
return nil
})
if err != nil {

View File

@@ -124,27 +124,31 @@ func (s *SocketIoServer) txRedirect(w http.ResponseWriter, r *http.Request) {
}
}
type reqRange struct {
Start int `json:"start"`
End int `json:"end"`
QueryMempol bool `json:"queryMempol"`
QueryMempoolOnly bool `json:"queryMempoolOnly"`
From int `json:"from"`
To int `json:"to"`
type addrOpts struct {
Start int `json:"start"`
End int `json:"end"`
QueryMempoolOnly bool `json:"queryMempoolOnly"`
From int `json:"from"`
To int `json:"to"`
AddressFormat uint8 `json:"addressFormat"`
}
type txOpts struct {
AddressFormat uint8 `json:"addressFormat"`
}
var onMessageHandlers = map[string]func(*SocketIoServer, json.RawMessage) (interface{}, error){
"getAddressTxids": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) {
addr, rr, err := unmarshalGetAddressRequest(params)
addr, opts, err := unmarshalGetAddressRequest(params)
if err == nil {
rv, err = s.getAddressTxids(addr, &rr)
rv, err = s.getAddressTxids(addr, &opts)
}
return
},
"getAddressHistory": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) {
addr, rr, err := unmarshalGetAddressRequest(params)
addr, opts, err := unmarshalGetAddressRequest(params)
if err == nil {
rv, err = s.getAddressHistory(addr, &rr)
rv, err = s.getAddressHistory(addr, &opts)
}
return
},
@@ -173,9 +177,9 @@ var onMessageHandlers = map[string]func(*SocketIoServer, json.RawMessage) (inter
return s.getInfo()
},
"getDetailedTransaction": func(s *SocketIoServer, params json.RawMessage) (rv interface{}, err error) {
txid, err := unmarshalStringParameter(params)
txid, opts, err := unmarshalGetDetailedTransaction(params)
if err == nil {
rv, err = s.getDetailedTransaction(txid)
rv, err = s.getDetailedTransaction(txid, opts)
}
return
},
@@ -226,7 +230,7 @@ func (s *SocketIoServer) onMessage(c *gosocketio.Channel, req map[string]json.Ra
return e
}
func unmarshalGetAddressRequest(params []byte) (addr []string, rr reqRange, err error) {
func unmarshalGetAddressRequest(params []byte) (addr []string, opts addrOpts, err error) {
var p []json.RawMessage
err = json.Unmarshal(params, &p)
if err != nil {
@@ -240,58 +244,51 @@ func unmarshalGetAddressRequest(params []byte) (addr []string, rr reqRange, err
if err != nil {
return
}
err = json.Unmarshal(p[1], &rr)
err = json.Unmarshal(p[1], &opts)
return
}
func uniqueTxids(txids []string) []string {
uniqueTxids := make([]string, 0, len(txids))
// bitcore returns txids from the newest to the oldest, we have to revert the order
func uniqueTxidsInReverse(txids []string) []string {
i := len(txids)
ut := make([]string, i)
txidsMap := make(map[string]struct{})
for _, txid := range txids {
_, e := txidsMap[txid]
if !e {
uniqueTxids = append(uniqueTxids, txid)
i--
ut[i] = txid
txidsMap[txid] = struct{}{}
}
}
return uniqueTxids
return ut[i:]
}
type resultAddressTxids struct {
Result []string `json:"result"`
}
func (s *SocketIoServer) getAddressTxids(addr []string, rr *reqRange) (res resultAddressTxids, err error) {
func (s *SocketIoServer) getAddressTxids(addr []string, opts *addrOpts) (res resultAddressTxids, err error) {
txids := make([]string, 0)
lower, higher := uint32(rr.To), uint32(rr.Start)
lower, higher := uint32(opts.To), uint32(opts.Start)
for _, address := range addr {
if !rr.QueryMempoolOnly {
if !opts.QueryMempoolOnly {
err = s.db.GetTransactions(address, lower, higher, func(txid string, vout uint32, isOutput bool) error {
txids = append(txids, txid)
if isOutput && rr.QueryMempol {
input := s.chain.GetMempoolSpentOutput(txid, vout)
if input != "" {
txids = append(txids, txid)
}
}
return nil
})
if err != nil {
return res, err
}
}
if rr.QueryMempoolOnly || rr.QueryMempol {
mtxids, err := s.chain.GetMempoolTransactions(address)
} else {
m, err := s.chain.GetMempoolTransactions(address)
if err != nil {
return res, err
}
txids = append(txids, mtxids...)
}
if err != nil {
return res, err
txids = append(txids, m...)
}
}
res.Result = uniqueTxids(txids)
res.Result = uniqueTxidsInReverse(txids)
return res, nil
}
@@ -375,8 +372,8 @@ func txToResTx(tx *bchain.Tx, height int, hi []txInputs, ho []txOutputs) resTx {
}
}
func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res resultGetAddressHistory, err error) {
txr, err := s.getAddressTxids(addr, rr)
func (s *SocketIoServer) getAddressHistory(addr []string, opts *addrOpts) (res resultGetAddressHistory, err error) {
txr, err := s.getAddressTxids(addr, opts)
if err != nil {
return
}
@@ -388,7 +385,7 @@ func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res res
res.Result.TotalCount = len(txids)
res.Result.Items = make([]addressHistoryItem, 0)
for i, txid := range txids {
if i >= rr.From && i < rr.To {
if i >= opts.From && i < opts.To {
tx, height, err := s.txCache.GetTransaction(txid, bestheight)
if err != nil {
return res, err
@@ -402,10 +399,17 @@ func (s *SocketIoServer) getAddressHistory(addr []string, rr *reqRange) (res res
Script: &vout.ScriptPubKey.Hex,
SpentIndex: int(vout.N),
}
if len(vout.ScriptPubKey.Addresses) == 1 {
a := vout.ScriptPubKey.Addresses[0]
if vout.Address != nil {
a, err := vout.Address.EncodeAddress(opts.AddressFormat)
if err != nil {
return res, err
}
ao.Address = &a
if stringInSlice(a, addr) {
found, err := vout.Address.InSlice(addr)
if err != nil {
return res, err
}
if found {
hi, ok := ads[a]
if ok {
hi.OutputIndexes = append(hi.OutputIndexes, int(vout.N))
@@ -587,6 +591,7 @@ func (s *SocketIoServer) getInfo() (res resultGetInfo, err error) {
res.Result.Blocks = int(height)
res.Result.Testnet = s.chain.IsTestnet()
res.Result.Network = s.chain.GetNetworkName()
res.Result.Subversion = s.chain.GetSubversion()
return
}
@@ -603,11 +608,31 @@ func unmarshalStringParameter(params []byte) (s string, err error) {
return
}
func unmarshalGetDetailedTransaction(params []byte) (txid string, opts txOpts, err error) {
var p []json.RawMessage
err = json.Unmarshal(params, &p)
if err != nil {
return
}
if len(p) < 1 || len(p) > 2 {
err = errors.New("incorrect number of parameters")
return
}
err = json.Unmarshal(p[0], &txid)
if err != nil {
return
}
if len(p) > 1 {
err = json.Unmarshal(p[1], &opts)
}
return
}
type resultGetDetailedTransaction struct {
Result resTx `json:"result"`
}
func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetailedTransaction, err error) {
func (s *SocketIoServer) getDetailedTransaction(txid string, opts txOpts) (res resultGetDetailedTransaction, err error) {
bestheight, _, err := s.db.GetBestBlock()
if err != nil {
return
@@ -631,8 +656,12 @@ func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetai
}
if len(otx.Vout) > int(vin.Vout) {
vout := otx.Vout[vin.Vout]
if len(vout.ScriptPubKey.Addresses) == 1 {
ai.Address = &vout.ScriptPubKey.Addresses[0]
if vout.Address != nil {
a, err := vout.Address.EncodeAddress(opts.AddressFormat)
if err != nil {
return res, err
}
ai.Address = &a
}
ai.Satoshis = int64(vout.Value * 1E8)
}
@@ -641,12 +670,16 @@ func (s *SocketIoServer) getDetailedTransaction(txid string) (res resultGetDetai
}
for _, vout := range tx.Vout {
ao := txOutputs{
Satoshis: int64(vout.Value * 1E8),
Script: &vout.ScriptPubKey.Hex,
SpentIndex: int(vout.N),
Satoshis: int64(vout.Value * 1E8),
Script: &vout.ScriptPubKey.Hex,
// SpentIndex: int(vout.N),
}
if len(vout.ScriptPubKey.Addresses) == 1 {
ao.Address = &vout.ScriptPubKey.Addresses[0]
if vout.Address != nil {
a, err := vout.Address.EncodeAddress(opts.AddressFormat)
if err != nil {
return res, err
}
ao.Address = &a
}
ho = append(ho, ao)
}

View File

@@ -56,16 +56,17 @@
var addresses = document.getElementById('getAddressHistoryAddresses').value.split(",");
addresses = addresses.map(s => s.trim());
var mempool = document.getElementById("getAddressHistoryMempool").checked;
lookupAddressHistories(addresses, 0, 5, mempool, 20000000, 0, function (result) {
var format = document.getElementById("getAddressHistoryFormat").value;
lookupAddressHistories(addresses, 0, 5, mempool, 20000000, 0, format, function (result) {
console.log('getAddressHistory sent successfully');
console.log(result);
document.getElementById('getAddressHistoryResult').innerText = JSON.stringify(result).replace(/,/g, ", ");
});
}
function lookupAddressHistories(addresses, from, to, mempool, start, end, f) {
function lookupAddressHistories(addresses, from, to, mempool, start, end, format, f) {
const method = 'getAddressHistory';
const rangeParam = mempool ? {
const opts = mempool ? {
start, // needed for older bitcores (so we don't load all history if bitcore-node < 3.1.3)
end,
queryMempoolOnly: true,
@@ -77,9 +78,10 @@
const params = [
addresses,
{
...rangeParam,
...opts,
from,
to,
addressFormat: parseInt(format),
},
];
return socket.send({ method, params }, f);
@@ -87,7 +89,7 @@
function lookupTransactionsIdsMempool(addresses, mempool, start, end, f) {
const method = 'getAddressTxids';
const rangeParam = mempool ? {
const opts = mempool ? {
start,
end,
queryMempoolOnly: true,
@@ -98,7 +100,7 @@
};
const params = [
addresses,
rangeParam,
opts,
];
return socket.send({ method, params }, f);
}
@@ -139,6 +141,21 @@
return socket.send({ method, params }, f);
}
function estimateFee() {
var blocks = document.getElementById('estimateFeeBlocks').value.trim();
estimateTxFee(parseInt(blocks), function (result) {
console.log('estimateFee sent successfully');
console.log(result);
document.getElementById('estimateFeeResult').innerText = JSON.stringify(result).replace(/,/g, ", ");
});
}
function estimateTxFee(blocks, f) {
const method = 'estimateFee';
const params = [blocks];
return socket.send({ method, params }, f);
}
function getInfo() {
lookupSyncStatus(function (result) {
console.log('getInfo sent successfully');
@@ -155,18 +172,28 @@
function getDetailedTransaction() {
var hash = document.getElementById('getDetailedTransactionHash').value.trim();
lookupDetailedTransaction(hash, function (result) {
var format = document.getElementById("getDetailedTransactionFormat").value;
lookupDetailedTransaction(hash, format, function (result) {
console.log('getDetailedTransaction sent successfully');
console.log(result);
document.getElementById('getDetailedTransactionResult').innerText = JSON.stringify(result).replace(/,/g, ", ");
});
}
function lookupDetailedTransaction(hash, f) {
function lookupDetailedTransaction(hash, format, f) {
const method = 'getDetailedTransaction';
const params = [
const af = parseInt(format)
var params = [
hash,
];
if (af !== 0) {
params = [
hash,
{
addressFormat: af,
},
];
}
return socket.send({ method, params }, f);
}
@@ -275,6 +302,14 @@
<input type="checkbox" id="getAddressHistoryMempool">&nbsp;
<label>only mempool</label>
</div>
<div class="col-9"></div>
<div class="col form-inline">
<label>address format</label>&nbsp;
<select id="getAddressHistoryFormat" value="0">
<option value="0">default</option>
<option value="1">bitcoincash</option>
</select>
</div>
</div>
<div class="row">
<div class="col" id="getAddressHistoryResult">
@@ -310,6 +345,19 @@
<div class="col" id="estimateSmartFeeResult">
</div>
</div>
<div class="row">
<div class="col">
<input class="btn btn-secondary" type="button" value="estimateFee" onclick="estimateFee()">
</div>
<div class="col-8">
<input type="text" class="form-control" id="estimateFeeBlocks" value="20">
</div>
<div class="col"></div>
</div>
<div class="row">
<div class="col" id="estimateFeeResult">
</div>
</div>
<div class="row">
<div class="col">
<input class="btn btn-secondary" type="button" value="getInfo" onclick="getInfo()">
@@ -324,7 +372,14 @@
<div class="col-8">
<input type="text" class="form-control" id="getDetailedTransactionHash" value="474e6795760ebe81cb4023dc227e5a0efe340e1771c89a0035276361ed733de7">
</div>
<div class="col">
<div class="col"></div>
<div class="col-9"></div>
<div class="col form-inline">
<label>address format</label>&nbsp;
<select id="getDetailedTransactionFormat" value="0">
<option value="0">default</option>
<option value="1">bitcoincash</option>
</select>
</div>
</div>
<div class="row">
@@ -388,4 +443,4 @@
document.getElementById('serverAddress').value = window.location.protocol.replace("http", "ws") + "//" + window.location.host;
</script>
</html>
</html>