db optimizations

This commit is contained in:
Tadge Dryja 2016-02-25 12:08:35 -08:00
parent f9a740d392
commit caf56aaff0
3 changed files with 31 additions and 31 deletions

@ -141,22 +141,22 @@ func (s *SPVCon) TxHandler(m *wire.MsgTx) {
} }
// check for double spends // check for double spends
allTxs, err := s.TS.GetAllTxs() // allTxs, err := s.TS.GetAllTxs()
if err != nil { // if err != nil {
log.Printf("Can't get txs from db: %s", err.Error()) // log.Printf("Can't get txs from db: %s", err.Error())
return // return
} // }
dubs, err := CheckDoubleSpends(m, allTxs) // dubs, err := CheckDoubleSpends(m, allTxs)
if err != nil { // if err != nil {
log.Printf("CheckDoubleSpends error: %s", err.Error()) // log.Printf("CheckDoubleSpends error: %s", err.Error())
return // return
} // }
if len(dubs) > 0 { // if len(dubs) > 0 {
for i, dub := range dubs { // for i, dub := range dubs {
fmt.Printf("dub %d known tx %s and new tx %s are exclusive!!!\n", // fmt.Printf("dub %d known tx %s and new tx %s are exclusive!!!\n",
i, dub.String(), m.TxSha().String()) // i, dub.String(), m.TxSha().String())
} // }
} // }
hits, err := s.TS.Ingest(m, height) hits, err := s.TS.Ingest(m, height)
if err != nil { if err != nil {
log.Printf("Incoming Tx error: %s\n", err.Error()) log.Printf("Incoming Tx error: %s\n", err.Error())

@ -76,10 +76,10 @@ func (s SortableUtxoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// height 0 means your lesser // height 0 means your lesser
func (s SortableUtxoSlice) Less(i, j int) bool { func (s SortableUtxoSlice) Less(i, j int) bool {
if s[i].AtHeight == 0 { if s[i].AtHeight == 0 && s[j].AtHeight > 0 {
return true return true
} }
if s[j].AtHeight == 0 { if s[j].AtHeight == 0 && s[i].AtHeight > 0 {
return false return false
} }
return s[i].Value < s[j].Value return s[i].Value < s[j].Value

@ -380,11 +380,11 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32) (uint32, error) {
wPKscripts := make([][]byte, len(ts.Adrs)) wPKscripts := make([][]byte, len(ts.Adrs))
aPKscripts := make([][]byte, len(ts.Adrs)) aPKscripts := make([][]byte, len(ts.Adrs))
for i, adr := range ts.Adrs { for i, _ := range ts.Adrs {
// iterate through all our addresses // iterate through all our addresses
// convert regular address to witness address. (split adrs later) // convert regular address to witness address. (split adrs later)
wa, err := btcutil.NewAddressWitnessPubKeyHash( wa, err := btcutil.NewAddressWitnessPubKeyHash(
adr.PkhAdr.ScriptAddress(), ts.Param) ts.Adrs[i].PkhAdr.ScriptAddress(), ts.Param)
if err != nil { if err != nil {
return hits, err return hits, err
} }
@ -393,7 +393,7 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32) (uint32, error) {
if err != nil { if err != nil {
return hits, err return hits, err
} }
aPKscripts[i], err = txscript.PayToAddrScript(adr.PkhAdr) aPKscripts[i], err = txscript.PayToAddrScript(ts.Adrs[i].PkhAdr)
if err != nil { if err != nil {
return hits, err return hits, err
} }
@ -440,10 +440,8 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32) (uint32, error) {
return fmt.Errorf("error: db not initialized") return fmt.Errorf("error: db not initialized")
} }
// first see if we lose utxos
// iterate through duffel bag and look for matches // iterate through duffel bag and look for matches
// this makes us lose money, which is regrettable, but we need to know. // this makes us lose money, which is regrettable, but we need to know.
// var delOPs [][]byte
for _, nOP := range spentOPs { for _, nOP := range spentOPs {
v := duf.Get(nOP) v := duf.Get(nOP)
if v != nil { if v != nil {
@ -478,18 +476,20 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32) (uint32, error) {
if err != nil { if err != nil {
return err return err
} }
// stash for deletion err = duf.Delete(nOP)
// delOPs = append(delOPs, nOP) if err != nil {
return err
}
} }
} }
//delete everything even if it doesn't exist! //delete everything even if it doesn't exist!
for _, dOP := range spentOPs { // for _, dOP := range spentOPs {
err = duf.Delete(dOP) // err = duf.Delete(dOP)
if err != nil { // if err != nil {
return err // return err
} // }
} // }
// done losing utxos, next gain utxos // done losing utxos, next gain utxos
// next add all new utxos to db, this is quick as the work is above // next add all new utxos to db, this is quick as the work is above
for _, ub := range nUtxoBytes { for _, ub := range nUtxoBytes {