problems with concurrent disk access (header file)

maybe add mutexes or something
This commit is contained in:
Tadge Dryja 2016-01-29 00:40:52 -08:00
parent 5c2bbff3eb
commit 4cd9087f9f
2 changed files with 36 additions and 20 deletions

@ -394,21 +394,7 @@ func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) {
} }
} }
log.Printf("Headers to height %d OK.", tip) log.Printf("Headers to height %d OK.", tip)
// if we got post DB syncheight headers, get merkleblocks for them
// this is always true except for first pre-birthday sync
syncTip, err := s.TS.GetDBSyncHeight()
if err != nil {
return false, err
}
if syncTip < tip {
fmt.Printf("syncTip %d headerTip %d\n", syncTip, tip)
err = s.AskForMerkBlocks(syncTip, tip)
if err != nil {
return false, err
}
}
return true, nil return true, nil
} }
@ -492,7 +478,7 @@ func (s *SPVCon) AskForMerkBlocks(current, last int32) error {
s.SendFilter(filt) s.SendFilter(filt)
fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter) fmt.Printf("sent filter %x\n", filt.MsgFilterLoad().Filter)
_, err = s.headerFile.Seek(int64(current*80), os.SEEK_SET) _, err = s.headerFile.Seek(int64((current-1)*80), os.SEEK_SET)
if err != nil { if err != nil {
return err return err
} }
@ -501,6 +487,7 @@ func (s *SPVCon) AskForMerkBlocks(current, last int32) error {
// load header from file // load header from file
err = hdr.Deserialize(s.headerFile) err = hdr.Deserialize(s.headerFile)
if err != nil { if err != nil {
log.Printf("Deserialize err\n")
return err return err
} }
@ -518,10 +505,8 @@ func (s *SPVCon) AskForMerkBlocks(current, last int32) error {
s.mBlockQueue <- hah // push height and mroot of requested block on queue s.mBlockQueue <- hah // push height and mroot of requested block on queue
current++ current++
} }
fmt.Printf("mblock reqs done, more headers\n")
// done syncing blocks known in header file, ask for new headers we missed // done syncing blocks known in header file, ask for new headers we missed
s.AskForHeaders() // s.AskForHeaders()
// don't need this -- will sync to end regardless
return nil return nil
} }

@ -3,6 +3,7 @@ package uspv
import ( import (
"fmt" "fmt"
"log" "log"
"os"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
) )
@ -106,9 +107,33 @@ func (s *SPVCon) HeaderHandler(m *wire.MsgHeaders) {
log.Printf("Header error: %s\n", err.Error()) log.Printf("Header error: %s\n", err.Error())
return return
} }
// if we got post DB syncheight headers, get merkleblocks for them
// this is always true except for first pre-birthday sync
syncTip, err := s.TS.GetDBSyncHeight()
if err != nil {
log.Printf("Header error: %s", err.Error())
return
}
endPos, err := s.headerFile.Seek(0, os.SEEK_END)
if err != nil {
log.Printf("Header error: %s", err.Error())
return
}
tip := int32(endPos/80) - 1 // move back 1 header length to read
// checked header lenght, start req for more if needed
if moar { if moar {
s.AskForHeaders() s.AskForHeaders()
} }
if syncTip < tip {
fmt.Printf("syncTip %d headerTip %d\n", syncTip, tip)
err = s.AskForMerkBlocks(syncTip, tip)
if err != nil {
log.Printf("AskForMerkBlocks error: %s", err.Error())
return
}
}
} }
func (s *SPVCon) TxHandler(m *wire.MsgTx) { func (s *SPVCon) TxHandler(m *wire.MsgTx) {
@ -120,6 +145,9 @@ func (s *SPVCon) TxHandler(m *wire.MsgTx) {
log.Printf("tx %s had no hits, filter false positive.", log.Printf("tx %s had no hits, filter false positive.",
m.TxSha().String()) m.TxSha().String())
s.fPositives <- 1 // add one false positive to chan s.fPositives <- 1 // add one false positive to chan
} else {
log.Printf("tx %s ingested and matches utxo/adrs.",
m.TxSha().String())
} }
} }
@ -135,7 +163,10 @@ func (s *SPVCon) InvHandler(m *wire.MsgInv) {
if thing.Type == wire.InvTypeBlock { // new block, ingest if thing.Type == wire.InvTypeBlock { // new block, ingest
if len(s.mBlockQueue) == 0 { if len(s.mBlockQueue) == 0 {
// don't ask directly; instead ask for header // don't ask directly; instead ask for header
s.AskForHeaders() fmt.Printf("asking for headers due to inv block\n")
// s.AskForHeaders()
} else {
fmt.Printf("inv block but ignoring, not synched\n")
} }
} }
} }