2016-01-15 06:56:25 +03:00
|
|
|
package uspv
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"os"
|
|
|
|
|
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil/bloom"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
keyFileName = "testseed.hex"
|
|
|
|
headerFileName = "headers.bin"
|
|
|
|
// Except hash-160s, those aren't backwards. But anything that's 32 bytes is.
|
|
|
|
// because, cmon, 32? Gotta reverse that. But 20? 20 is OK.
|
|
|
|
|
2016-01-19 10:43:41 +03:00
|
|
|
// version hardcoded for now, probably ok...?
|
|
|
|
VERSION = 70011
|
2016-01-15 06:56:25 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
type SPVCon struct {
|
|
|
|
con net.Conn // the (probably tcp) connection to the node
|
|
|
|
headerFile *os.File // file for SPV headers
|
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
//[doesn't work without fancy mutexes, nevermind, just use header file]
|
|
|
|
// localHeight int32 // block height we're on
|
|
|
|
remoteHeight int32 // block height they're on
|
2016-01-15 06:56:25 +03:00
|
|
|
localVersion uint32 // version we report
|
|
|
|
remoteVersion uint32 // version remote node
|
|
|
|
|
|
|
|
// what's the point of the input queue? remove? leave for now...
|
|
|
|
inMsgQueue chan wire.Message // Messages coming in from remote node
|
|
|
|
outMsgQueue chan wire.Message // Messages going out to remote node
|
|
|
|
|
|
|
|
WBytes uint64 // total bytes written
|
|
|
|
RBytes uint64 // total bytes read
|
2016-01-15 13:40:56 +03:00
|
|
|
|
2016-01-20 01:23:18 +03:00
|
|
|
TS *TxStore // transaction store to write to
|
|
|
|
param *chaincfg.Params // network parameters (testnet3, testnetL)
|
|
|
|
|
|
|
|
// mBlockQueue is for keeping track of what height we've requested.
|
2016-01-23 03:04:27 +03:00
|
|
|
mBlockQueue chan HashAndHeight
|
2016-01-29 06:35:49 +03:00
|
|
|
// fPositives is a channel to keep track of bloom filter false positives.
|
|
|
|
fPositives chan int32
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
2016-01-21 08:08:05 +03:00
|
|
|
func OpenSPV(remoteNode string, hfn, tsfn string,
|
2016-01-19 10:43:41 +03:00
|
|
|
inTs *TxStore, p *chaincfg.Params) (SPVCon, error) {
|
|
|
|
// create new SPVCon
|
|
|
|
var s SPVCon
|
|
|
|
|
|
|
|
// assign network parameters to SPVCon
|
|
|
|
s.param = p
|
|
|
|
|
2016-01-15 06:56:25 +03:00
|
|
|
// open header file
|
|
|
|
err := s.openHeaderFile(headerFileName)
|
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// open TCP connection
|
|
|
|
s.con, err = net.Dial("tcp", remoteNode)
|
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
2016-01-19 10:43:41 +03:00
|
|
|
// assign version bits for local node
|
2016-01-15 06:56:25 +03:00
|
|
|
s.localVersion = VERSION
|
|
|
|
|
2016-01-19 10:43:41 +03:00
|
|
|
// transaction store for this SPV connection
|
2016-01-21 08:08:05 +03:00
|
|
|
err = inTs.OpenDB(tsfn)
|
2016-01-20 07:02:18 +03:00
|
|
|
if err != nil {
|
|
|
|
return s, err
|
|
|
|
}
|
2016-01-21 12:04:45 +03:00
|
|
|
inTs.Param = p
|
|
|
|
s.TS = inTs // copy pointer of txstore into spvcon
|
2016-01-15 13:40:56 +03:00
|
|
|
|
2016-01-15 06:56:25 +03:00
|
|
|
myMsgVer, err := wire.NewMsgVersionFromConn(s.con, 0, 0)
|
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
err = myMsgVer.AddUserAgent("test", "zero")
|
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
// must set this to enable SPV stuff
|
|
|
|
myMsgVer.AddService(wire.SFNodeBloom)
|
|
|
|
|
|
|
|
// this actually sends
|
2016-01-19 10:43:41 +03:00
|
|
|
n, err := wire.WriteMessageN(s.con, myMsgVer, s.localVersion, s.param.Net)
|
2016-01-15 06:56:25 +03:00
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
s.WBytes += uint64(n)
|
|
|
|
log.Printf("wrote %d byte version message to %s\n",
|
|
|
|
n, s.con.RemoteAddr().String())
|
|
|
|
|
2016-01-19 10:43:41 +03:00
|
|
|
n, m, b, err := wire.ReadMessageN(s.con, s.localVersion, s.param.Net)
|
2016-01-15 06:56:25 +03:00
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
s.RBytes += uint64(n)
|
|
|
|
log.Printf("got %d byte response %x\n command: %s\n", n, b, m.Command())
|
|
|
|
|
|
|
|
mv, ok := m.(*wire.MsgVersion)
|
|
|
|
if ok {
|
|
|
|
log.Printf("connected to %s", mv.UserAgent)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("remote reports version %x (dec %d)\n",
|
|
|
|
mv.ProtocolVersion, mv.ProtocolVersion)
|
|
|
|
|
2016-01-20 10:40:04 +03:00
|
|
|
// set remote height
|
|
|
|
s.remoteHeight = mv.LastBlock
|
|
|
|
|
2016-01-15 06:56:25 +03:00
|
|
|
mva := wire.NewMsgVerAck()
|
2016-01-19 10:43:41 +03:00
|
|
|
n, err = wire.WriteMessageN(s.con, mva, s.localVersion, s.param.Net)
|
2016-01-15 06:56:25 +03:00
|
|
|
if err != nil {
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
s.WBytes += uint64(n)
|
|
|
|
|
2016-01-29 06:35:49 +03:00
|
|
|
s.inMsgQueue = make(chan wire.Message)
|
2016-01-15 06:56:25 +03:00
|
|
|
go s.incomingMessageHandler()
|
2016-01-29 06:35:49 +03:00
|
|
|
s.outMsgQueue = make(chan wire.Message)
|
2016-01-15 06:56:25 +03:00
|
|
|
go s.outgoingMessageHandler()
|
2016-01-23 03:04:27 +03:00
|
|
|
s.mBlockQueue = make(chan HashAndHeight, 32) // queue depth 32 is a thing
|
2016-01-29 06:35:49 +03:00
|
|
|
s.fPositives = make(chan int32, 4000) // a block full, approx
|
|
|
|
go s.fPositiveHandler()
|
2016-01-20 07:02:18 +03:00
|
|
|
|
2016-01-19 10:43:41 +03:00
|
|
|
return s, nil
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SPVCon) openHeaderFile(hfn string) error {
|
|
|
|
_, err := os.Stat(hfn)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
var b bytes.Buffer
|
2016-01-19 10:43:41 +03:00
|
|
|
err = s.param.GenesisBlock.Header.Serialize(&b)
|
2016-01-15 06:56:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = ioutil.WriteFile(hfn, b.Bytes(), 0600)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("created hardcoded genesis header at %s\n",
|
|
|
|
hfn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.headerFile, err = os.OpenFile(hfn, os.O_RDWR, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("opened header file %s\n", s.headerFile.Name())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SPVCon) PongBack(nonce uint64) {
|
|
|
|
mpong := wire.NewMsgPong(nonce)
|
|
|
|
|
|
|
|
s.outMsgQueue <- mpong
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SPVCon) SendFilter(f *bloom.Filter) {
|
|
|
|
s.outMsgQueue <- f.MsgFilterLoad()
|
2016-01-22 08:50:42 +03:00
|
|
|
|
2016-01-15 06:56:25 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-19 12:33:58 +03:00
|
|
|
// HeightFromHeader gives you the block height given a 80 byte block header
|
|
|
|
// seems like looking for the merkle root is the best way to do this
|
|
|
|
func (s *SPVCon) HeightFromHeader(query wire.BlockHeader) (uint32, error) {
|
|
|
|
// start from the most recent and work back in time; even though that's
|
|
|
|
// kind of annoying it's probably a lot faster since things tend to have
|
|
|
|
// happened recently.
|
|
|
|
|
|
|
|
// seek to last header
|
|
|
|
lastPos, err := s.headerFile.Seek(-80, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
height := lastPos / 80
|
|
|
|
|
|
|
|
var current wire.BlockHeader
|
|
|
|
|
|
|
|
for height > 0 {
|
|
|
|
// grab header from disk
|
|
|
|
err = current.Deserialize(s.headerFile)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
// check if merkle roots match
|
|
|
|
if current.MerkleRoot.IsEqual(&query.MerkleRoot) {
|
|
|
|
// if they do, great, return height
|
|
|
|
return uint32(height), nil
|
|
|
|
}
|
|
|
|
// skip back one header (2 because we just read one)
|
|
|
|
_, err = s.headerFile.Seek(-160, os.SEEK_CUR)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
// decrement height
|
|
|
|
height--
|
|
|
|
}
|
|
|
|
// finished for loop without finding match
|
|
|
|
return 0, fmt.Errorf("Header not found on disk")
|
|
|
|
}
|
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
// AskForTx requests a tx we heard about from an inv message.
|
|
|
|
// It's one at a time but should be fast enough.
|
|
|
|
func (s *SPVCon) AskForTx(txid wire.ShaHash) {
|
|
|
|
gdata := wire.NewMsgGetData()
|
|
|
|
inv := wire.NewInvVect(wire.InvTypeTx, &txid)
|
|
|
|
gdata.AddInvVect(inv)
|
|
|
|
s.outMsgQueue <- gdata
|
|
|
|
}
|
|
|
|
|
|
|
|
// AskForBlock requests a merkle block we heard about from an inv message.
|
|
|
|
// We don't have it in our header file so when we get it we do both operations:
|
|
|
|
// appending and checking the header, and checking spv proofs
|
|
|
|
func (s *SPVCon) AskForBlock(hsh wire.ShaHash) {
|
2016-01-27 12:24:16 +03:00
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
gdata := wire.NewMsgGetData()
|
|
|
|
inv := wire.NewInvVect(wire.InvTypeFilteredBlock, &hsh)
|
|
|
|
gdata.AddInvVect(inv)
|
|
|
|
|
|
|
|
info, err := s.headerFile.Stat() // get
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err) // crash if header file disappears
|
|
|
|
}
|
|
|
|
nextHeight := int32(info.Size() / 80)
|
|
|
|
|
|
|
|
hah := NewRootAndHeight(hsh, nextHeight)
|
2016-01-29 06:35:49 +03:00
|
|
|
fmt.Printf("AskForBlock - %s height %d\n", hsh.String(), nextHeight)
|
2016-01-23 03:04:27 +03:00
|
|
|
s.mBlockQueue <- hah // push height and mroot of requested block on queue
|
|
|
|
s.outMsgQueue <- gdata // push request to outbox
|
|
|
|
}
|
|
|
|
|
2016-01-15 10:08:37 +03:00
|
|
|
func (s *SPVCon) AskForHeaders() error {
|
2016-01-15 06:56:25 +03:00
|
|
|
var hdr wire.BlockHeader
|
|
|
|
ghdr := wire.NewMsgGetHeaders()
|
|
|
|
ghdr.ProtocolVersion = s.localVersion
|
|
|
|
|
|
|
|
info, err := s.headerFile.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
headerFileSize := info.Size()
|
|
|
|
if headerFileSize == 0 || headerFileSize%80 != 0 { // header file broken
|
|
|
|
return fmt.Errorf("Header file not a multiple of 80 bytes")
|
|
|
|
}
|
|
|
|
|
|
|
|
// seek to 80 bytes from end of file
|
|
|
|
ns, err := s.headerFile.Seek(-80, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("can't seek\n")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("suk to offset %d (should be near the end\n", ns)
|
2016-01-23 03:04:27 +03:00
|
|
|
|
2016-01-15 06:56:25 +03:00
|
|
|
// get header from last 80 bytes of file
|
|
|
|
err = hdr.Deserialize(s.headerFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("can't Deserialize")
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cHash := hdr.BlockSha()
|
|
|
|
err = ghdr.AddBlockLocatorHash(&cHash)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("get headers message has %d header hashes, first one is %s\n",
|
|
|
|
len(ghdr.BlockLocatorHashes), ghdr.BlockLocatorHashes[0].String())
|
|
|
|
|
|
|
|
s.outMsgQueue <- ghdr
|
|
|
|
|
|
|
|
return nil
|
2016-01-15 10:08:37 +03:00
|
|
|
}
|
2016-01-15 06:56:25 +03:00
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
func (s *SPVCon) IngestMerkleBlock(m *wire.MsgMerkleBlock) error {
|
|
|
|
txids, err := checkMBlock(m) // check self-consistency
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
hah := <-s.mBlockQueue // pop height off mblock queue
|
|
|
|
// this verifies order, and also that the returned header fits
|
|
|
|
// into our SPV header file
|
|
|
|
newMerkBlockSha := m.Header.BlockSha()
|
|
|
|
if !hah.blockhash.IsEqual(&newMerkBlockSha) {
|
|
|
|
return fmt.Errorf("merkle block out of order error")
|
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
for _, txid := range txids {
|
|
|
|
err := s.TS.AddTxid(txid, hah.height)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Txid store error: %s\n", err.Error())
|
|
|
|
}
|
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
err = s.TS.SetDBSyncHeight(hah.height)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// IngestHeaders takes in a bunch of headers and appends them to the
|
|
|
|
// local header file, checking that they fit. If there's no headers,
|
|
|
|
// it assumes we're done and returns false. If it worked it assumes there's
|
2016-01-29 06:35:49 +03:00
|
|
|
// more to request and returns true.
|
2016-01-15 10:08:37 +03:00
|
|
|
func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) {
|
|
|
|
var err error
|
2016-01-19 12:33:58 +03:00
|
|
|
// seek to last header
|
2016-01-15 06:56:25 +03:00
|
|
|
_, err = s.headerFile.Seek(-80, os.SEEK_END)
|
|
|
|
if err != nil {
|
2016-01-15 10:08:37 +03:00
|
|
|
return false, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
var last wire.BlockHeader
|
|
|
|
err = last.Deserialize(s.headerFile)
|
|
|
|
if err != nil {
|
2016-01-15 10:08:37 +03:00
|
|
|
return false, err
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
prevHash := last.BlockSha()
|
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
endPos, err := s.headerFile.Seek(0, os.SEEK_END)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
tip := int32(endPos/80) - 1 // move back 1 header length to read
|
|
|
|
|
2016-01-15 10:08:37 +03:00
|
|
|
gotNum := int64(len(m.Headers))
|
2016-01-15 06:56:25 +03:00
|
|
|
if gotNum > 0 {
|
|
|
|
fmt.Printf("got %d headers. Range:\n%s - %s\n",
|
2016-01-15 10:08:37 +03:00
|
|
|
gotNum, m.Headers[0].BlockSha().String(),
|
|
|
|
m.Headers[len(m.Headers)-1].BlockSha().String())
|
|
|
|
} else {
|
|
|
|
log.Printf("got 0 headers, we're probably synced up")
|
|
|
|
return false, nil
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
2016-01-15 10:08:37 +03:00
|
|
|
// check first header returned to make sure it fits on the end
|
|
|
|
// of our header file
|
|
|
|
if !m.Headers[0].PrevBlock.IsEqual(&prevHash) {
|
|
|
|
// delete 100 headers if this happens! Dumb reorg.
|
2016-01-23 03:04:27 +03:00
|
|
|
log.Printf("reorg? header msg doesn't fit. points to %s, expect %s",
|
2016-01-15 10:08:37 +03:00
|
|
|
m.Headers[0].PrevBlock.String(), prevHash.String())
|
|
|
|
if endPos < 8080 {
|
|
|
|
// jeez I give up, back to genesis
|
|
|
|
s.headerFile.Truncate(80)
|
|
|
|
} else {
|
|
|
|
err = s.headerFile.Truncate(endPos - 8000)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("couldn't truncate header file")
|
|
|
|
}
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
return true, fmt.Errorf("Truncated header file to try again")
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
2016-01-15 10:08:37 +03:00
|
|
|
for _, resphdr := range m.Headers {
|
|
|
|
// write to end of file
|
|
|
|
err = resphdr.Serialize(s.headerFile)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2016-01-15 06:56:25 +03:00
|
|
|
|
2016-01-15 10:08:37 +03:00
|
|
|
// advance chain tip
|
|
|
|
tip++
|
|
|
|
// check last header
|
2016-01-19 10:43:41 +03:00
|
|
|
worked := CheckHeader(s.headerFile, tip, s.param)
|
2016-01-15 10:08:37 +03:00
|
|
|
if !worked {
|
|
|
|
if endPos < 8080 {
|
|
|
|
// jeez I give up, back to genesis
|
|
|
|
s.headerFile.Truncate(80)
|
|
|
|
} else {
|
|
|
|
err = s.headerFile.Truncate(endPos - 8000)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("couldn't truncate header file")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// probably should disconnect from spv node at this point,
|
|
|
|
// since they're giving us invalid headers.
|
|
|
|
return false, fmt.Errorf(
|
|
|
|
"Header %d - %s doesn't fit, dropping 100 headers.",
|
|
|
|
resphdr.BlockSha().String(), tip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Printf("Headers to height %d OK.", tip)
|
2016-01-29 06:35:49 +03:00
|
|
|
// if we got post DB syncheight headers, get merkleblocks for them
|
|
|
|
// this is always true except for first pre-birthday sync
|
2016-01-23 03:04:27 +03:00
|
|
|
|
2016-01-29 06:35:49 +03:00
|
|
|
syncTip, err := s.TS.GetDBSyncHeight()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if syncTip < tip {
|
|
|
|
err = s.AskForMerkBlocks(syncTip, tip)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
2016-01-15 10:08:37 +03:00
|
|
|
return true, nil
|
2016-01-15 06:56:25 +03:00
|
|
|
}
|
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
// HashAndHeight is needed instead of just height in case a fullnode
|
2016-01-20 01:23:18 +03:00
|
|
|
// responds abnormally (?) by sending out of order merkleblocks.
|
|
|
|
// we cache a merkleroot:height pair in the queue so we don't have to
|
|
|
|
// look them up from the disk.
|
2016-01-23 03:04:27 +03:00
|
|
|
// Also used when inv messages indicate blocks so we can add the header
|
|
|
|
// and parse the txs in one request instead of requesting headers first.
|
|
|
|
type HashAndHeight struct {
|
|
|
|
blockhash wire.ShaHash
|
|
|
|
height int32
|
2016-01-20 01:23:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewRootAndHeight saves like 2 lines.
|
2016-01-23 03:04:27 +03:00
|
|
|
func NewRootAndHeight(b wire.ShaHash, h int32) (hah HashAndHeight) {
|
|
|
|
hah.blockhash = b
|
|
|
|
hah.height = h
|
2016-01-20 01:23:18 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-22 04:59:50 +03:00
|
|
|
func (s *SPVCon) PushTx(tx *wire.MsgTx) error {
|
|
|
|
txid := tx.TxSha()
|
|
|
|
err := s.TS.AddTxid(&txid, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
_, err = s.TS.AckTx(tx) // our own tx so don't need to track relevance
|
2016-01-22 04:59:50 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.outMsgQueue <- tx
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-29 06:35:49 +03:00
|
|
|
func (s *SPVCon) GetNextHeaderHeight() (int32, error) {
|
|
|
|
info, err := s.headerFile.Stat() // get
|
|
|
|
if err != nil {
|
|
|
|
return 0, err // crash if header file disappears
|
|
|
|
}
|
|
|
|
nextHeight := int32(info.Size() / 80)
|
|
|
|
return nextHeight, nil
|
|
|
|
}
|
|
|
|
|
2016-01-20 01:23:18 +03:00
|
|
|
// AskForMerkBlocks requests blocks from current to last
|
|
|
|
// right now this asks for 1 block per getData message.
|
|
|
|
// Maybe it's faster to ask for many in a each message?
|
2016-01-20 10:40:04 +03:00
|
|
|
func (s *SPVCon) AskForMerkBlocks(current, last int32) error {
|
2016-01-15 13:40:56 +03:00
|
|
|
var hdr wire.BlockHeader
|
2016-01-29 06:35:49 +03:00
|
|
|
|
|
|
|
nextHeight, err := s.GetNextHeaderHeight()
|
2016-01-23 03:04:27 +03:00
|
|
|
if err != nil {
|
2016-01-29 06:35:49 +03:00
|
|
|
return err
|
2016-01-23 03:04:27 +03:00
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
|
2016-01-23 03:04:27 +03:00
|
|
|
fmt.Printf("have headers up to height %d\n", nextHeight-1)
|
2016-01-20 10:40:04 +03:00
|
|
|
// if last is 0, that means go as far as we can
|
|
|
|
if last == 0 {
|
2016-01-23 03:04:27 +03:00
|
|
|
last = nextHeight - 1
|
2016-01-20 10:40:04 +03:00
|
|
|
}
|
2016-01-23 03:04:27 +03:00
|
|
|
fmt.Printf("will request merkleblocks %d to %d\n", current, last)
|
2016-01-27 12:24:16 +03:00
|
|
|
|
2016-01-22 08:50:42 +03:00
|
|
|
// create initial filter
|
|
|
|
filt, err := s.TS.GimmeFilter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// send filter
|
|
|
|
s.SendFilter(filt)
|
|
|
|
fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter)
|
|
|
|
|
|
|
|
_, err = s.headerFile.Seek(int64(current*80), os.SEEK_SET)
|
2016-01-15 13:40:56 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-20 01:23:18 +03:00
|
|
|
// loop through all heights where we want merkleblocks.
|
2016-01-15 13:40:56 +03:00
|
|
|
for current < last {
|
2016-01-22 12:41:08 +03:00
|
|
|
// check if we need to update filter... diff of 5 utxos...?
|
2016-01-22 08:50:42 +03:00
|
|
|
|
2016-01-20 01:23:18 +03:00
|
|
|
// load header from file
|
2016-01-15 13:40:56 +03:00
|
|
|
err = hdr.Deserialize(s.headerFile)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bHash := hdr.BlockSha()
|
2016-01-20 01:23:18 +03:00
|
|
|
// create inventory we're asking for
|
2016-01-15 13:40:56 +03:00
|
|
|
iv1 := wire.NewInvVect(wire.InvTypeFilteredBlock, &bHash)
|
|
|
|
gdataMsg := wire.NewMsgGetData()
|
2016-01-20 01:23:18 +03:00
|
|
|
// add inventory
|
2016-01-15 13:40:56 +03:00
|
|
|
err = gdataMsg.AddInvVect(iv1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-23 03:04:27 +03:00
|
|
|
hah := NewRootAndHeight(hdr.BlockSha(), current)
|
2016-01-15 13:40:56 +03:00
|
|
|
s.outMsgQueue <- gdataMsg
|
2016-01-23 03:04:27 +03:00
|
|
|
s.mBlockQueue <- hah // push height and mroot of requested block on queue
|
2016-01-20 01:23:18 +03:00
|
|
|
current++
|
2016-01-15 13:40:56 +03:00
|
|
|
}
|
2016-01-29 06:35:49 +03:00
|
|
|
// done syncing blocks known in header file, ask for new headers we missed
|
|
|
|
s.AskForHeaders()
|
|
|
|
|
2016-01-15 13:40:56 +03:00
|
|
|
return nil
|
|
|
|
}
|