diff --git a/block/block.go b/block/block.go index 65f7efd..48f4342 100644 --- a/block/block.go +++ b/block/block.go @@ -20,6 +20,7 @@ import "fmt" import "time" import "bytes" +import "strings" import "runtime/debug" import "encoding/hex" import "encoding/binary" @@ -50,7 +51,6 @@ type Complete_Block struct { Txs []*transaction.Transaction } -// see spec here https://cryptonote.org/cns/cns003.txt // this function gets the block identifier hash // this has been simplified and varint length has been removed // keccak hash of entire block including miniblocks, gives the block id @@ -67,6 +67,25 @@ func (bl *Block) GetTimestamp() time.Time { return time.Unix(0, int64(bl.Timestamp*uint64(time.Millisecond))) } +// stringifier +func (bl Block) String() string { + r := new(strings.Builder) + fmt.Fprintf(r, "BLID:%s\n", bl.GetHash()) + fmt.Fprintf(r, "Major version:%d Minor version: %d", bl.Major_Version, bl.Minor_Version) + fmt.Fprintf(r, "Height:%d\n", bl.Height) + fmt.Fprintf(r, "Timestamp:%d (%s)\n", bl.Timestamp, bl.GetTimestamp()) + for i := range bl.Tips { + fmt.Fprintf(r, "Past %d:%s\n", i, bl.Tips[i]) + } + for i, mbl := range bl.MiniBlocks { + fmt.Fprintf(r, "Mini %d:%s\n", i, mbl) + } + for i, txid := range bl.Tx_hashes { + fmt.Fprintf(r, "tx %d:%s\n", i, txid) + } + return r.String() +} + // this function serializes a block and skips miniblocks is requested func (bl *Block) serialize(includeminiblocks bool) []byte { diff --git a/block/miniblock.go b/block/miniblock.go index 3e3e0d9..6a6f498 100644 --- a/block/miniblock.go +++ b/block/miniblock.go @@ -55,7 +55,7 @@ type MiniBlock struct { func (mbl MiniBlock) String() string { r := new(strings.Builder) - fmt.Fprintf(r, "%d ", mbl.Version) + fmt.Fprintf(r, "%08x %d ", mbl.GetMiniID(), mbl.Version) if mbl.Genesis { fmt.Fprintf(r, "GENESIS height %d", int64(binary.BigEndian.Uint64(mbl.Check[:]))) } else { @@ -63,11 +63,11 @@ func (mbl MiniBlock) String() string { } if mbl.PastCount == 1 { - fmt.Fprintf(r, "Past [%08x]", mbl.Past[0]) + fmt.Fprintf(r, " Past [%08x]", mbl.Past[0]) } else { - fmt.Fprintf(r, "Past [%08x %08x]", mbl.Past[0], mbl.Past[1]) + fmt.Fprintf(r, " Past [%08x %08x]", mbl.Past[0], mbl.Past[1]) } - fmt.Fprintf(r, "time %d", mbl.Timestamp) + fmt.Fprintf(r, " time %d", mbl.Timestamp) return r.String() } diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index ffc4425..ee448cc 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -26,7 +26,6 @@ import "sync" import "time" import "bytes" import "runtime/debug" -import "math/big" import "strings" //import "runtime" @@ -365,7 +364,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro // the block timestamp cannot be less than any of the parents for i := range bl.Tips { if chain.Load_Block_Timestamp(bl.Tips[i]) > bl.Timestamp { - fmt.Printf("timestamp prev %d cur timestamp %d\n", chain.Load_Block_Timestamp(bl.Tips[i]), bl.Timestamp) + //fmt.Printf("timestamp prev %d cur timestamp %d\n", chain.Load_Block_Timestamp(bl.Tips[i]), bl.Timestamp) block_logger.Error(fmt.Errorf("Block timestamp is less than its parent."), "rejecting block") return errormsg.ErrInvalidTimestamp, false @@ -462,8 +461,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro return err, false } } - - // TODO we need to verify address whether they are valid points on curve or not } // now we need to verify each and every tx in detail @@ -526,7 +523,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro { nonce_map := map[crypto.Hash]bool{} for i := 0; i < len(cbl.Txs); i++ { - if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX { for j := range cbl.Txs[i].Payloads { if _, ok := nonce_map[cbl.Txs[i].Payloads[j].Proof.Nonce()]; ok { @@ -535,7 +531,53 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro } nonce_map[cbl.Txs[i].Payloads[j].Proof.Nonce()] = true } + } + } + } + // all blocks except genesis will have history + // so make sure txs are connected + if bl.Height >= 1 && len(cbl.Txs) > 0 { + history := map[crypto.Hash]bool{} + + var history_array []crypto.Hash + for i := range bl.Tips { + history_array = append(history_array, chain.get_ordered_past(bl.Tips[i], 26)...) + } + for _, h := range history_array { + history[h] = true + } + + block_height = chain.Calculate_Height_At_Tips(bl.Tips) + for i, tx := range cbl.Txs { + if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX { + if history[cbl.Txs[i].BLID] != true { + block_logger.Error(fmt.Errorf("Double Spend TX within block"), "unreferable history", "txid", cbl.Txs[i].GetHash()) + return errormsg.ErrTXDoubleSpend, false + } + if tx.Height != uint64(chain.Load_Height_for_BL_ID(cbl.Txs[i].BLID)) { + block_logger.Error(fmt.Errorf("Double Spend TX within block"), "blid/height mismatch", "txid", cbl.Txs[i].GetHash()) + return errormsg.ErrTXDoubleSpend, false + } + + if block_height-int64(tx.Height) < TX_VALIDITY_HEIGHT { + + } else { + block_logger.Error(fmt.Errorf("Double Spend TX within block"), "long distance tx not supported", "txid", cbl.Txs[i].GetHash()) + return errormsg.ErrTXDoubleSpend, false + } + + if tx.TransactionType == transaction.SC_TX { + if tx.SCDATA.Has(rpc.SCACTION, rpc.DataUint64) { + if rpc.SC_INSTALL == rpc.SC_ACTION(tx.SCDATA.Value(rpc.SCACTION, rpc.DataUint64).(uint64)) { + txid := tx.GetHash() + if txid[31] < 0x80 { // last byte should be more than 0x80 + block_logger.Error(fmt.Errorf("Invalid SCID"), "SCID installing tx must end with >0x80 byte", "txid", cbl.Txs[i].GetHash()) + return errormsg.ErrTXDoubleSpend, false + } + } + } + } } } } @@ -549,7 +591,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro hf_version := chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(bl.Tips)) for i := 0; i < len(cbl.Txs); i++ { go func(j int) { - if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, cbl.Txs[j], bl.Tips, false); err != nil { // transaction verification failed + if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, cbl.Txs[j], bl.Tips); err != nil { // transaction verification failed atomic.AddInt32(&fail_count, 1) // increase fail count by 1 block_logger.Error(err, "tx nonce verification failed", "txid", cbl.Txs[j].GetHash()) } @@ -570,10 +612,9 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro wg := sync.WaitGroup{} wg.Add(len(cbl.Txs)) // add total number of tx as work - hf_version := chain.Get_Current_Version_at_Height(chain.Calculate_Height_At_Tips(bl.Tips)) for i := 0; i < len(cbl.Txs); i++ { go func(j int) { - if err := chain.Verify_Transaction_NonCoinbase(hf_version, cbl.Txs[j]); err != nil { // transaction verification failed + if err := chain.Verify_Transaction_NonCoinbase(cbl.Txs[j]); err != nil { // transaction verification failed atomic.AddInt32(&fail_count, 1) // increase fail count by 1 block_logger.Error(err, "tx verification failed", "txid", cbl.Txs[j].GetHash()) } @@ -593,20 +634,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro var check_data cbl_verify // used to verify sanity of new block for i := 0; i < len(cbl.Txs); i++ { if !(cbl.Txs[i].IsCoinbase() || cbl.Txs[i].IsRegistration()) { // all other tx must go through this check - - for _, p := range cbl.Txs[i].Payloads { // make sure tx is expanded - if p.Statement.RingSize != uint64(len(p.Statement.Publickeylist_compressed)) { - if err = chain.Transaction_NonCoinbase_Expand(cbl.Txs[i]); err != nil { - return err, false - } - } - // if still the tx is not expanded, give err - if p.Statement.RingSize != uint64(len(p.Statement.Publickeylist_compressed)) { - err = fmt.Errorf("TXB is not expanded. cannot cbl_verify expected %d Actual %d", p.Statement.RingSize, len(p.Statement.Publickeylist_compressed)) - block_logger.Error(err, "Invalid TX within block", "txid", cbl.Txs[i].GetHash()) - return - } - } if err = check_data.check(cbl.Txs[i], false); err == nil { check_data.check(cbl.Txs[i], true) // keep in record for future tx } else { @@ -630,7 +657,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro } } - chain.StoreBlock(bl) + chain.StoreBlock(bl, 0) // if the block is on a lower height tip, the block will not increase chain height height := chain.Load_Height_for_BL_ID(block_hash) @@ -645,8 +672,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro } - // process tips only if increases the the height - if height_changed { + { var full_order []crypto.Hash var base_topo_index int64 // new topo id will start from here @@ -668,6 +694,8 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro current_topo_block := i + base_topo_index previous_topo_block := current_topo_block - 1 + _ = previous_topo_block + if current_topo_block == chain.Load_Block_Topological_order(full_order[i]) { // skip if same order continue } @@ -712,14 +740,9 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro } } else { // we already have a block before us, use it - record_version := uint64(0) - if previous_topo_block >= 0 { - toporecord, err := chain.Store.Topo_store.Read(previous_topo_block) - - if err != nil { - panic(err) - } - record_version = toporecord.State_Version + record_version, err := chain.ReadBlockSnapshotVersion(full_order[i-1]) + if err != nil { + panic(err) } ss, err = chain.Store.Balance_store.LoadSnapshot(record_version) @@ -828,12 +851,10 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro } //fmt.Printf("committed trees version %d at topo %d\n", commit_version, current_topo_block) - - chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i])) - - //rlog.Debugf("%d %s topo_index %d base topo %d", i, full_order[i], current_topo_block, base_topo_index) - - // this tx must be stored, linked with this block + chain.StoreBlock(bl, commit_version) + if height_changed { + chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i])) + } } } @@ -1134,17 +1155,17 @@ func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) error { provided_fee := tx.Fees() // get fee from tx //logger.WithFields(log.Fields{"txid": txhash}).Warnf("TX fees check disabled provided fee %d calculated fee %d", provided_fee, calculated_fee) - if calculated_fee > provided_fee { + if !chain.simulator && calculated_fee > provided_fee { err = fmt.Errorf("TX %s rejected due to low fees provided fee %d calculated fee %d", txhash, provided_fee, calculated_fee) return err } - if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, chain.Get_TIPS(), true); err != nil { // transaction verification failed + if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, chain.Get_TIPS()); err != nil { // transaction verification failed logger.V(1).Error(err, "Incoming TX nonce verification failed", "txid", txhash) return fmt.Errorf("Incoming TX %s nonce verification failed, err %s", txhash, err) } - if err := chain.Verify_Transaction_NonCoinbase(hf_version, tx); err != nil { + if err := chain.Verify_Transaction_NonCoinbase(tx); err != nil { logger.V(1).Error(err, "Incoming TX could not be verified", "txid", txhash) return fmt.Errorf("Incoming TX %s could not be verified, err %s", txhash, err) } @@ -1361,27 +1382,6 @@ func (chain *Blockchain) Is_TX_Orphan(hash crypto.Hash) (result bool) { return !result } -// verifies whether we are lagging -// return true if we need resync -// returns false if we are good and resync is not required -func (chain *Blockchain) IsLagging(peer_cdiff *big.Int) bool { - - our_diff := new(big.Int).SetInt64(0) - - high_block, err := chain.Load_Block_Topological_order_at_index(chain.Load_TOPO_HEIGHT()) - if err != nil { - return false - } else { - our_diff = chain.Load_Block_Cumulative_Difficulty(high_block) - } - //fmt.Printf("P_cdiff %s cdiff %s our top block %s", peer_cdiff.String(), our_diff.String(), high_block) - - if our_diff.Cmp(peer_cdiff) < 0 { - return true // peer's cumulative difficulty is more than ours , active resync - } - return false -} - // this function will rewind the chain from the topo height one block at a time // this function also runs the client protocol in reverse and also deletes the block from the storage func (chain *Blockchain) Rewind_Chain(rewind_count int) (result bool) { @@ -1535,9 +1535,10 @@ func (chain *Blockchain) IsBlockSyncBlockHeightSpecific(blid crypto.Hash, chain_ // blocks are ordered recursively, till we find a find a block which is already in the chain func (chain *Blockchain) Generate_Full_Order_New(current_tip crypto.Hash, new_tip crypto.Hash) (order []crypto.Hash, topo int64) { - if chain.Load_Height_for_BL_ID(new_tip) != chain.Load_Height_for_BL_ID(current_tip)+1 { + /*if !(chain.Load_Height_for_BL_ID(new_tip) == chain.Load_Height_for_BL_ID(current_tip)+1 || + chain.Load_Height_for_BL_ID(new_tip) == chain.Load_Height_for_BL_ID(current_tip)) { panic("dag can only grow one height at a time") - } + }*/ depth := 20 for ; ; depth += 20 { @@ -1565,8 +1566,8 @@ func (chain *Blockchain) Generate_Full_Order_New(current_tip crypto.Hash, new_ti } if !found { // we have a contention point - topo = chain.Load_Block_Topological_order(new_history_rev[j-1]) + 1 - order = append(order, new_history_rev[j:]...) // order is already stored and store + topo = chain.Load_Block_Topological_order(new_history_rev[j-1]) + order = append(order, new_history_rev[j-1:]...) // order is already stored and store return } } diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index 68ae3cd..8ff7eb1 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -96,25 +96,6 @@ func CheckPowHashBig(pow_hash crypto.Hash, big_difficulty_integer *big.Int) bool return false } -// this function finds a common base which can be used to compare tips based on cumulative difficulty -func (chain *Blockchain) find_best_tip_cumulative_difficulty(tips []crypto.Hash) (best crypto.Hash) { - - tips_scores := make([]BlockScore, len(tips), len(tips)) - - for i := range tips { - tips_scores[i].BLID = tips[i] // we should chose the lowest weight - tips_scores[i].Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(tips[i]) - } - - sort_descending_by_cumulative_difficulty(tips_scores) - - best = tips_scores[0].BLID - // base_height = scores[0].Weight - - return best - -} - // confirms whether the actual tip difficulty is withing 9% deviation with reference // actual tip cannot be less than 91% of main tip // if yes tip is okay, else tip should be declared stale @@ -218,13 +199,13 @@ func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int { } // take the time from the most heavy block - biggest_tip := chain.find_best_tip_cumulative_difficulty(tips) - biggest_difficulty := chain.Load_Block_Difficulty(biggest_tip) - parent_highest_time := chain.Load_Block_Timestamp(biggest_tip) + + biggest_difficulty := chain.Load_Block_Difficulty(tips[0]) + parent_highest_time := chain.Load_Block_Timestamp(tips[0]) // find parents parents tip from the most heavy block's parent - parent_past := chain.Get_Block_Past(biggest_tip) - past_biggest_tip := chain.find_best_tip_cumulative_difficulty(parent_past) + parent_past := chain.Get_Block_Past(tips[0]) + past_biggest_tip := parent_past[0] parent_parent_highest_time := chain.Load_Block_Timestamp(past_biggest_tip) if biggest_difficulty.Cmp(MinimumDifficulty) < 0 { diff --git a/blockchain/miner_block.go b/blockchain/miner_block.go index 0fbc21b..f790856 100644 --- a/blockchain/miner_block.go +++ b/blockchain/miner_block.go @@ -20,7 +20,6 @@ import "fmt" import "bytes" import "sort" import "sync" -import "math/big" import "math/rand" import "runtime/debug" import "encoding/binary" @@ -47,24 +46,20 @@ const TX_VALIDITY_HEIGHT = 11 type BlockScore struct { BLID crypto.Hash //MiniCount int - Height int64 // block height - Cumulative_Difficulty *big.Int // used to score blocks on cumulative difficulty + Height int64 // block height } -// Heighest node weight is ordered first, the condition is reverted see eg. at https://golang.org/pkg/sort/#Slice -// if weights are equal, nodes are sorted by their block ids which will never collide , hopefullly +// Heighest height is ordered first, the condition is reverted see eg. at https://golang.org/pkg/sort/#Slice +// if heights are equal, nodes are sorted by their block ids which will never collide , hopefullly // block ids are sorted by lowest byte first diff -func sort_descending_by_cumulative_difficulty(tips_scores []BlockScore) { - +func sort_descending_by_height_blid(tips_scores []BlockScore) { sort.Slice(tips_scores, func(i, j int) bool { - if tips_scores[i].Cumulative_Difficulty.Cmp(tips_scores[j].Cumulative_Difficulty) != 0 { // if diffculty mismatch use them - - if tips_scores[i].Cumulative_Difficulty.Cmp(tips_scores[j].Cumulative_Difficulty) > 0 { // if i diff > j diff + if tips_scores[i].Height != tips_scores[j].Height { // if height mismatch use them + if tips_scores[i].Height > tips_scores[j].Height { return true } else { return false } - } else { // cumulative difficulty is same, we must check minerblocks return bytes.Compare(tips_scores[i].BLID[:], tips_scores[j].BLID[:]) == -1 } @@ -72,9 +67,7 @@ func sort_descending_by_cumulative_difficulty(tips_scores []BlockScore) { } func sort_ascending_by_height(tips_scores []BlockScore) { - // base is the lowest height sort.Slice(tips_scores, func(i, j int) bool { return tips_scores[i].Height < tips_scores[j].Height }) - } // this will sort the tips based on cumulative difficulty and/or block ids @@ -91,10 +84,9 @@ func (chain *Blockchain) SortTips(tips []crypto.Hash) (sorted []crypto.Hash) { tips_scores := make([]BlockScore, len(tips), len(tips)) for i := range tips { tips_scores[i].BLID = tips[i] - tips_scores[i].Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(tips[i]) + tips_scores[i].Height = chain.Load_Block_Height(tips[i]) } - - sort_descending_by_cumulative_difficulty(tips_scores) + sort_descending_by_height_blid(tips_scores) for i := range tips_scores { sorted = append(sorted, tips_scores[i].BLID) @@ -232,7 +224,7 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl if int64(tx.Height) < height { // fmt.Printf("sanity back %d(%d) nonce check %s\n", height - int64(tx.Height), TX_VALIDITY_HEIGHT, chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version,tx,bl.Tips) ) if height-int64(tx.Height) < TX_VALIDITY_HEIGHT { - if nil == chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, bl.Tips, false) { + if nil == chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, bl.Tips) { if nil == pre_check.check(tx, false) { pre_check.check(tx, true) //rlog.Tracef(1, "Adding Top Sorted tx %s to Complete_Block current size %.2f KB max possible %.2f KB\n", tx_hash_list_sorted[i].Hash, float32(sizeoftxs+tx_hash_list_sorted[i].Size)/1024.0, float32(config.STARGATE_HE_MAX_BLOCK_SIZE)/1024.0) @@ -277,7 +269,7 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl if tx != nil { if int64(tx.Height) < height { if height-int64(tx.Height) < TX_VALIDITY_HEIGHT { - if nil == chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, bl.Tips, false) { + if nil == chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, bl.Tips) { if nil == pre_check.check(tx, false) { pre_check.check(tx, true) diff --git a/blockchain/sc.go b/blockchain/sc.go index bb46425..325584a 100644 --- a/blockchain/sc.go +++ b/blockchain/sc.go @@ -153,7 +153,8 @@ func (chain *Blockchain) execute_sc_function(w_sc_tree *Tree_Wrapper, data_tree //fmt.Printf("tx store %v\n", tx_store) - if err = chain.Transaction_NonCoinbase_Expand(&tx); err != nil { + // we can skip proof check, here + if err = chain.Expand_Transaction_NonCoinbase(&tx); err != nil { return } signer, err := extract_signer(&tx) diff --git a/blockchain/store.go b/blockchain/store.go index e11b615..c6a280d 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -84,33 +84,24 @@ func (s *storage) IsBalancesIntialized() bool { return true } -func (chain *Blockchain) StoreBlock(bl *block.Block) { +func (chain *Blockchain) StoreBlock(bl *block.Block, snapshot_version uint64) { hash := bl.GetHash() serialized_bytes := bl.Serialize() // we are storing the miner transactions within - // calculate cumulative difficulty at last block + + difficulty_of_current_block := new(big.Int) if len(bl.Tips) == 0 { // genesis block has no parent - difficulty_of_current_block := new(big.Int).SetUint64(1) // this is never used, as genesis block is a sync block, only its cumulative difficulty is used - cumulative_difficulty := new(big.Int).SetUint64(1) // genesis block cumulative difficulty is 1 - - err := chain.Store.Block_tx_store.WriteBlock(hash, serialized_bytes, difficulty_of_current_block, cumulative_difficulty) - if err != nil { - panic(fmt.Sprintf("error while writing block")) - } + difficulty_of_current_block.SetUint64(1) // this is never used, as genesis block is a sync block, only its cumulative difficulty is used } else { - - difficulty_of_current_block := chain.Get_Difficulty_At_Tips(bl.Tips) - - cumulative_difficulty := chain.Load_Block_Cumulative_Difficulty(bl.Tips[0]) - cumulative_difficulty.Add(cumulative_difficulty, difficulty_of_current_block) - - err := chain.Store.Block_tx_store.WriteBlock(hash, serialized_bytes, difficulty_of_current_block, cumulative_difficulty) - if err != nil { - panic(fmt.Sprintf("error while writing block")) - } - + difficulty_of_current_block = chain.Get_Difficulty_At_Tips(bl.Tips) } + chain.Store.Block_tx_store.DeleteBlock(hash) // what should we do on error + + err := chain.Store.Block_tx_store.WriteBlock(hash, serialized_bytes, difficulty_of_current_block, snapshot_version) + if err != nil { + panic(fmt.Sprintf("error while writing block")) + } } // loads a block from disk, deserializes it @@ -245,14 +236,6 @@ func (chain *Blockchain) Load_Block_Difficulty(h crypto.Hash) *big.Int { } } -func (chain *Blockchain) Load_Block_Cumulative_Difficulty(h crypto.Hash) *big.Int { - if cdiff, err := chain.Store.Block_tx_store.ReadBlockCDifficulty(h); err != nil { - panic(err) - } else { - return cdiff - } -} - func (chain *Blockchain) Get_Top_ID() crypto.Hash { var h crypto.Hash @@ -326,18 +309,9 @@ func (chain *Blockchain) Load_Block_Topological_order_at_index(index_pos int64) } //load store hash from 2 tree -func (chain *Blockchain) Load_Merkle_Hash(index_pos int64) (hash crypto.Hash, err error) { +func (chain *Blockchain) Load_Merkle_Hash(version uint64) (hash crypto.Hash, err error) { - toporecord, err := chain.Store.Topo_store.Read(index_pos) - if err != nil { - return hash, err - } - if toporecord.IsClean() { - err = fmt.Errorf("cannot query clean block") - return - } - - ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version) + ss, err := chain.Store.Balance_store.LoadSnapshot(version) if err != nil { return } diff --git a/blockchain/storefs.go b/blockchain/storefs.go index 7ac163f..3e467b9 100644 --- a/blockchain/storefs.go +++ b/blockchain/storefs.go @@ -106,39 +106,42 @@ func (s *storefs) ReadBlockDifficulty(h [32]byte) (*big.Int, error) { return nil, os.ErrNotExist } -func (s *storefs) ReadBlockCDifficulty(h [32]byte) (*big.Int, error) { +func (chain *Blockchain) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) { + return chain.Store.Block_tx_store.ReadBlockSnapshotVersion(h) +} +func (s *storefs) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) { dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2])) files, err := ioutil.ReadDir(dir) if err != nil { - return nil, err + return 0, err } filename_start := fmt.Sprintf("%x.block", h[:]) for _, file := range files { if strings.HasPrefix(file.Name(), filename_start) { - diff := new(big.Int) + var diff uint64 parts := strings.Split(file.Name(), "_") if len(parts) != 3 { panic("such filename cannot occur") } - _, err := fmt.Sscan(parts[2], diff) + _, err := fmt.Sscan(parts[2], &diff) if err != nil { - return nil, err + return 0, err } return diff, nil } } - return nil, os.ErrNotExist + return 0, os.ErrNotExist } -func (s *storefs) WriteBlock(h [32]byte, data []byte, difficulty *big.Int, cdiff *big.Int) (err error) { +func (s *storefs) WriteBlock(h [32]byte, data []byte, difficulty *big.Int, ss_version uint64) (err error) { dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2])) - file := filepath.Join(dir, fmt.Sprintf("%x.block_%s_%s", h[:], difficulty.String(), cdiff.String())) + file := filepath.Join(dir, fmt.Sprintf("%x.block_%s_%d", h[:], difficulty.String(), ss_version)) if err = os.MkdirAll(dir, 0700); err != nil { return err } diff --git a/blockchain/transaction_execute.go b/blockchain/transaction_execute.go index 8b5c02b..858aa75 100644 --- a/blockchain/transaction_execute.go +++ b/blockchain/transaction_execute.go @@ -390,31 +390,37 @@ func (chain *Blockchain) process_transaction_sc(cache map[crypto.Hash]*graviton. // an SCID can generate it's token infinitely if transfer.Asset != scid && total_per_asset[transfer.Asset]+transfer.Amount <= total_per_asset[transfer.Asset] { err = fmt.Errorf("Balance calculation overflow") + break } else { total_per_asset[transfer.Asset] = total_per_asset[transfer.Asset] + transfer.Amount } } - for asset, value := range total_per_asset { - stored_value, _ := chain.LoadSCAssetValue(w_sc_data_tree, scid, asset) - // an SCID can generate it's token infinitely - if asset != scid && stored_value-value > stored_value { - err = fmt.Errorf("Balance calculation underflow") - } + if err == nil { + for asset, value := range total_per_asset { + stored_value, _ := chain.LoadSCAssetValue(w_sc_data_tree, scid, asset) + // an SCID can generate it's token infinitely + if asset != scid && stored_value-value > stored_value { + err = fmt.Errorf("Balance calculation underflow stored_value %d transferring %d\n", stored_value, value) + break + } - var new_value [8]byte - binary.BigEndian.PutUint64(new_value[:], stored_value-value) - chain.StoreSCValue(w_sc_data_tree, scid, asset[:], new_value[:]) + var new_value [8]byte + binary.BigEndian.PutUint64(new_value[:], stored_value-value) + chain.StoreSCValue(w_sc_data_tree, scid, asset[:], new_value[:]) + } } //also check whether all destinations are registered - for _, transfer := range w_sc_data_tree.transfere { - if _, err = balance_tree.Get([]byte(transfer.Address)); err == nil || xerrors.Is(err, graviton.ErrNotFound) { - // everything is okay - } else { - err = fmt.Errorf("account is unregistered") - logger.V(1).Error(err, "account is unregistered", "txhash", txhash, "scid", scid, "address", transfer.Address) - break + if err == nil { + for _, transfer := range w_sc_data_tree.transfere { + if _, err = balance_tree.Get([]byte(transfer.Address)); err == nil || xerrors.Is(err, graviton.ErrNotFound) { + // everything is okay + } else { + err = fmt.Errorf("account is unregistered") + logger.V(1).Error(err, "account is unregistered", "txhash", txhash, "scid", scid, "address", transfer.Address) + break + } } } } @@ -456,6 +462,9 @@ func (chain *Blockchain) process_transaction_sc(cache map[crypto.Hash]*graviton. } for k, v := range w_sc_data_tree.entries { // commit entire data to tree + if _, ok := globals.Arguments["--debug"]; ok && globals.Arguments["--debug"] != nil && chain.simulator { + logger.V(1).Info("Writing", "txid", txhash, "scid", scid, "key", fmt.Sprintf("%x", k), "value", fmt.Sprintf("%x", v)) + } if err = w_sc_data_tree.tree.Put([]byte(k), v); err != nil { return } @@ -491,7 +500,7 @@ func (chain *Blockchain) process_transaction_sc(cache map[crypto.Hash]*graviton. } if curbtree == nil { - panic("tree cannot be nil at htis point in time") + panic("tree cannot be nil at this point in time") } addr_bytes := []byte(transfer.Address) diff --git a/blockchain/transaction_verify.go b/blockchain/transaction_verify.go index e348355..70912a8 100644 --- a/blockchain/transaction_verify.go +++ b/blockchain/transaction_verify.go @@ -111,7 +111,7 @@ func (chain *Blockchain) Verify_Transaction_Coinbase(cbl *block.Complete_Block, } // this checks the nonces of a tx agains the current chain state, this basically does a comparision of state trees in limited form -func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version int64, tx *transaction.Transaction, tips []crypto.Hash, mempool bool) (err error) { +func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version int64, tx *transaction.Transaction, tips []crypto.Hash) (err error) { var tx_hash crypto.Hash defer func() { // safety so if anything wrong happens, verification fails if r := recover(); r != nil { @@ -129,44 +129,19 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_versi return fmt.Errorf("no tips provided, cannot verify") } - match_topo := int64(1) - // transaction needs to be expanded. this expansion needs balance state - _, topos := chain.Store.Topo_store.binarySearchHeight(int64(tx.Height)) - // load all db versions one by one and check whether the root hash matches the one mentioned in the tx - if len(topos) < 1 { - return fmt.Errorf("TX could NOT be expanded") + version, err := chain.ReadBlockSnapshotVersion(tx.BLID) + if err != nil { + return err } - for i := range topos { - hash, err := chain.Load_Merkle_Hash(topos[i]) - if err != nil { - continue - } - - if hash == tx.Payloads[0].Statement.Roothash { - match_topo = topos[i] - break // we have found the balance tree with which it was built now lets verify - } - - } - - if match_topo < 0 { - return fmt.Errorf("mentioned balance tree not found, cannot verify TX") + ss_tx, err := chain.Store.Balance_store.LoadSnapshot(version) + if err != nil { + return err } var tx_balance_tree *graviton.Tree - toporecord, err := chain.Store.Topo_store.Read(match_topo) - if err != nil { - return err - } - - ss_tx, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version) - if err != nil { - return err - } - if tx_balance_tree, err = ss_tx.GetTree(config.BALANCE_TREE); err != nil { return err } @@ -178,20 +153,12 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_versi // now we must solve the tips, against which the nonces will be verified for _, tip := range tips { var tip_balance_tree *graviton.Tree - topo := chain.Load_Block_Topological_order(tip) - if topo < 0 && mempool { - continue - } - if topo < 0 && !mempool { // mempool is slightly relaxed - return fmt.Errorf("tip not found in DB") // if this function is running in core, it should satisfy all tips - } - toporecord, err := chain.Store.Topo_store.Read(topo) + version, err := chain.ReadBlockSnapshotVersion(tip) if err != nil { return err } - - ss_tip, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version) + ss_tip, err := chain.Store.Balance_store.LoadSnapshot(version) if err != nil { return err } @@ -253,18 +220,18 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_versi } } } - if mempool { // if it's mempool, if it satisfies any of the tip, we assume it's okay - return nil - } - } - if mempool { - return fmt.Errorf("doesnot satisfy any of the tips") - } return nil } +func (chain *Blockchain) Verify_Transaction_NonCoinbase(tx *transaction.Transaction) (err error) { + return chain.verify_Transaction_NonCoinbase_internal(false, tx) +} +func (chain *Blockchain) Expand_Transaction_NonCoinbase(tx *transaction.Transaction) (err error) { + return chain.verify_Transaction_NonCoinbase_internal(true, tx) +} + // all non miner tx must be non-coinbase tx // each check is placed in a separate block of code, to avoid ambigous code or faulty checks // all check are placed and not within individual functions ( so as we cannot skip a check ) @@ -272,7 +239,7 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_versi // if the transaction has passed the check it can be added to mempool, relayed or added to blockchain // the transaction has already been deserialized thats it // It also expands the transactions, using the repective state trie -func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *transaction.Transaction) (err error) { +func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool, tx *transaction.Transaction) (err error) { var tx_hash crypto.Hash defer func() { // safety so if anything wrong happens, verification fails @@ -334,6 +301,11 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr return fmt.Errorf("tx does not contains base") } } + for t := range tx.Payloads { + if tx.Payloads[t].Statement.Roothash != tx.Payloads[0].Statement.Roothash { + return fmt.Errorf("Roothash corrupted") + } + } for t := range tx.Payloads { // check sanity @@ -367,44 +339,27 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr tx.Payloads[t].Statement.CRn = tx.Payloads[t].Statement.CRn[:0] } - match_topo := int64(1) - // transaction needs to be expanded. this expansion needs balance state - _, topos := chain.Store.Topo_store.binarySearchHeight(int64(tx.Height)) - - // load all db versions one by one and check whether the root hash matches the one mentioned in the tx - if len(topos) < 1 { - return fmt.Errorf("TX could NOT be expanded") + version, err := chain.ReadBlockSnapshotVersion(tx.BLID) + if err != nil { + return err + } + hash, err := chain.Load_Merkle_Hash(version) + if err != nil { + return err } - for i := range topos { - hash, err := chain.Load_Merkle_Hash(topos[i]) - if err != nil { - continue - } - - if hash == tx.Payloads[0].Statement.Roothash { - match_topo = topos[i] - break // we have found the balance tree with which it was built now lets verify - } - + if hash != tx.Payloads[0].Statement.Roothash { + return fmt.Errorf("Tx statement roothash mismatch") } + // we have found the balance tree with which it was built now lets verify - if match_topo < 0 { - return fmt.Errorf("mentioned balance tree not found, cannot verify TX") + ss, err := chain.Store.Balance_store.LoadSnapshot(version) + if err != nil { + return err } var balance_tree *graviton.Tree - toporecord, err := chain.Store.Topo_store.Read(match_topo) - if err != nil { - return err - } - - ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version) - if err != nil { - return err - } - if balance_tree, err = ss.GetTree(config.BALANCE_TREE); err != nil { return err } @@ -413,13 +368,6 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr return fmt.Errorf("mentioned balance tree not found, cannot verify TX") } - /*if _, ok := transaction_valid_cache.Load(tx_hash); ok { - logger.V(1).Info("Found in cache, skipping verification", "txid", tx_hash) - return nil - } else { - //logger.Infof("TX not found in cache %s len %d ",tx_hash, len(tmp_buffer)) - }*/ - //logger.Infof("dTX state tree has been found") trees := map[crypto.Hash]*graviton.Tree{} @@ -498,15 +446,25 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr } } + if _, ok := transaction_valid_cache.Load(tx_hash); ok { + logger.V(2).Info("Found in cache, skipping verification", "txid", tx_hash) + return nil + } else { + //logger.Infof("TX not found in cache %s len %d ",tx_hash, len(tmp_buffer)) + } + + if skip_proof { + return nil + } + // at this point TX has been completely expanded, verify the tx statement scid_map := map[crypto.Hash]int{} for t := range tx.Payloads { index := scid_map[tx.Payloads[t].SCID] if !tx.Payloads[t].Proof.Verify(tx.Payloads[t].SCID, index, &tx.Payloads[t].Statement, tx.GetHash(), tx.Payloads[t].BurnValue) { - - fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement) - fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof) + // fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement) + // fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof) return fmt.Errorf("transaction statement %d verification failed", t) } @@ -532,125 +490,3 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr return nil } - -//this function is only called after tx has been thoroughly verified -// It also expands the transactions, using the repective state trie -// it does not expand for verification but only to extract public key -func (chain *Blockchain) Transaction_NonCoinbase_Expand(tx *transaction.Transaction) (err error) { - - for t := range tx.Payloads { - tx.Payloads[t].Statement.CLn = tx.Payloads[t].Statement.CLn[:0] - tx.Payloads[t].Statement.CRn = tx.Payloads[t].Statement.CRn[:0] - } - - match_topo := int64(1) - - // transaction needs to be expanded. this expansion needs balance state - _, topos := chain.Store.Topo_store.binarySearchHeight(int64(tx.Height)) - for i := range topos { - hash, err := chain.Load_Merkle_Hash(topos[i]) - if err != nil { - continue - } - - if hash == tx.Payloads[0].Statement.Roothash { - match_topo = topos[i] - break // we have found the balance tree with which it was built now lets verify - } - } - - var balance_tree *graviton.Tree - toporecord, err := chain.Store.Topo_store.Read(match_topo) - if err != nil { - return err - } - - ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version) - if err != nil { - return err - } - - if balance_tree, err = ss.GetTree(config.BALANCE_TREE); err != nil { - return err - } - - if balance_tree == nil { - return fmt.Errorf("mentioned balance tree not found, cannot expand TX") - } - - trees := map[crypto.Hash]*graviton.Tree{} - - var zerohash crypto.Hash - trees[zerohash] = balance_tree // initialize main tree by default - - for t := range tx.Payloads { - tx.Payloads[t].Statement.Publickeylist_compressed = tx.Payloads[t].Statement.Publickeylist_compressed[:0] - tx.Payloads[t].Statement.Publickeylist = tx.Payloads[t].Statement.Publickeylist[:0] - - var tree *graviton.Tree - - if _, ok := trees[tx.Payloads[t].SCID]; ok { - tree = trees[tx.Payloads[t].SCID] - } else { - - // fmt.Printf("SCID loading %s tree\n", tx.Payloads[t].SCID) - tree, _ = ss.GetTree(string(tx.Payloads[t].SCID[:])) - trees[tx.Payloads[t].SCID] = tree - } - - // now lets calculate CLn and CRn - for i := 0; i < int(tx.Payloads[t].Statement.RingSize); i++ { - key_pointer := tx.Payloads[t].Statement.Publickeylist_pointers[i*int(tx.Payloads[t].Statement.Bytes_per_publickey) : (i+1)*int(tx.Payloads[t].Statement.Bytes_per_publickey)] - _, key_compressed, balance_serialized, err := tree.GetKeyValueFromHash(key_pointer) - - // if destination address could be found be found in main balance tree, assume its zero balance - needs_init := false - if err != nil && !tx.Payloads[t].SCID.IsZero() { - if xerrors.Is(err, graviton.ErrNotFound) { // if the address is not found, lookup in main tree - _, key_compressed, _, err = balance_tree.GetKeyValueFromHash(key_pointer) - if err != nil { - return fmt.Errorf("balance not obtained err %s\n", err) - } - needs_init = true - } - } - if err != nil { - return fmt.Errorf("balance not obtained err %s\n", err) - } - - // decode public key and expand - { - var p bn256.G1 - var pcopy [33]byte - copy(pcopy[:], key_compressed) - if err = p.DecodeCompressed(key_compressed[:]); err != nil { - return fmt.Errorf("key %d could not be decompressed", i) - } - tx.Payloads[t].Statement.Publickeylist_compressed = append(tx.Payloads[t].Statement.Publickeylist_compressed, pcopy) - tx.Payloads[t].Statement.Publickeylist = append(tx.Payloads[t].Statement.Publickeylist, &p) - - if needs_init { - var nb crypto.NonceBalance - nb.Balance = crypto.ConstructElGamal(&p, crypto.ElGamal_BASE_G) // init zero balance - balance_serialized = nb.Serialize() - } - } - - var ll, rr bn256.G1 - nb := new(crypto.NonceBalance).Deserialize(balance_serialized) - ebalance := nb.Balance - - ll.Add(ebalance.Left, tx.Payloads[t].Statement.C[i]) - tx.Payloads[t].Statement.CLn = append(tx.Payloads[t].Statement.CLn, &ll) - rr.Add(ebalance.Right, tx.Payloads[t].Statement.D) - tx.Payloads[t].Statement.CRn = append(tx.Payloads[t].Statement.CRn, &rr) - - echanges := crypto.ConstructElGamal(tx.Payloads[t].Statement.C[i], tx.Payloads[t].Statement.D) - nb = new(crypto.NonceBalance).Deserialize(balance_serialized) - nb.Balance = nb.Balance.Add(echanges) // homomorphic addition of changes - tree.Put(key_compressed, nb.Serialize()) // reserialize and store temporarily, tree will be discarded after verification - - } - } - return nil -} diff --git a/cmd/derod/main.go b/cmd/derod/main.go index d6715d9..71f9dde 100644 --- a/cmd/derod/main.go +++ b/cmd/derod/main.go @@ -485,23 +485,24 @@ func main() { continue } var timestamp uint64 - diff, cdiff := new(big.Int), new(big.Int) + diff := new(big.Int) if chain.Block_Exists(current_block_id) { - timestamp = chain.Load_Block_Timestamp(current_block_id) - - cdiff = chain.Load_Block_Cumulative_Difficulty(current_block_id) - diff = chain.Load_Block_Difficulty(current_block_id) } - balance_hash, err := chain.Load_Merkle_Hash(i) + version, err := chain.ReadBlockSnapshotVersion(current_block_id) + if err != nil { + panic(err) + } + + balance_hash, err := chain.Load_Merkle_Hash(version) if err != nil { panic(err) } - logger.Info("", "topo height", i, "height", chain.Load_Height_for_BL_ID(current_block_id), "timestamp", timestamp, "difficulty", diff.String(), "cdiff", cdiff.String()) + logger.Info("", "topo height", i, "height", chain.Load_Height_for_BL_ID(current_block_id), "timestamp", timestamp, "difficulty", diff.String()) logger.Info("", "Block Id", current_block_id.String(), "balance_tree hash", balance_hash.String()) logger.Info("\n") @@ -559,70 +560,55 @@ func main() { case command == "print_block": fmt.Printf("printing block\n") + var hash crypto.Hash + if len(line_parts) == 2 && len(line_parts[1]) == 64 { bl_raw, err := hex.DecodeString(strings.ToLower(line_parts[1])) - if err != nil { - fmt.Printf("err while decoding txid err %s\n", err) + fmt.Printf("err while decoding blid err %s\n", err) continue } - var hash crypto.Hash copy(hash[:32], []byte(bl_raw)) - - bl, err := chain.Load_BL_FROM_ID(hash) - if err == nil { - fmt.Printf("Block ID : %s\n", hash) - fmt.Printf("Block : %x\n", bl.Serialize()) - fmt.Printf("difficulty: %s\n", chain.Load_Block_Difficulty(hash).String()) - fmt.Printf("cdifficulty: %s\n", chain.Load_Block_Cumulative_Difficulty(hash).String()) - //fmt.Printf("Orphan: %v\n",chain.Is_Block_Orphan(hash)) - - json_bytes, err := json.Marshal(bl) - - fmt.Printf("%s err : %s\n", string(prettyprint_json(json_bytes)), err) - } else { - fmt.Printf("Err %s\n", err) - } } else if len(line_parts) == 2 { if s, err := strconv.ParseInt(line_parts[1], 10, 64); err == nil { _ = s // first load block id from topo height - hash, err := chain.Load_Block_Topological_order_at_index(s) + hash, err = chain.Load_Block_Topological_order_at_index(s) if err != nil { fmt.Printf("Skipping block at topo height %d due to error %s\n", s, err) continue } - bl, err := chain.Load_BL_FROM_ID(hash) - if err == nil { - fmt.Printf("Block ID : %s\n", hash) - fmt.Printf("Block : %x\n", bl.Serialize()) - fmt.Printf("difficulty: %s\n", chain.Load_Block_Difficulty(hash).String()) - fmt.Printf("cdifficulty: %s\n", chain.Load_Block_Cumulative_Difficulty(hash).String()) - fmt.Printf("Height: %d\n", chain.Load_Height_for_BL_ID(hash)) - fmt.Printf("TopoHeight: %d\n", s) - - bhash, err := chain.Load_Merkle_Hash(s) - - if err != nil { - panic(err) - } - - fmt.Printf("BALANCE_TREE : %s\n", bhash) - - //fmt.Printf("Orphan: %v\n",chain.Is_Block_Orphan(hash)) - - json_bytes, err := json.Marshal(bl) - - fmt.Printf("%s err : %s\n", string(prettyprint_json(json_bytes)), err) - } else { - fmt.Printf("Err %s\n", err) - } - - } else { - fmt.Printf("print_block needs a single block id as argument\n") } + } else { + fmt.Printf("print_block needs a single block id as argument\n") + break } + bl, err := chain.Load_BL_FROM_ID(hash) + if err != nil { + fmt.Printf("Err %s\n", err) + } + fmt.Printf("%s\n", bl.String()) + fmt.Printf("difficulty: %s\n", chain.Load_Block_Difficulty(hash).String()) + fmt.Printf("TopoHeight: %d\n", chain.Load_Block_Topological_order(hash)) + + version, err := chain.ReadBlockSnapshotVersion(hash) + if err != nil { + panic(err) + } + + bhash, err := chain.Load_Merkle_Hash(version) + if err != nil { + panic(err) + } + + fmt.Printf("BALANCE_TREE : %s\n", bhash) + + //fmt.Printf("Orphan: %v\n",chain.Is_Block_Orphan(hash)) + + //json_bytes, err := json.Marshal(bl) + + //fmt.Printf("%s err : %s\n", string(prettyprint_json(json_bytes)), err) // can be used to debug/deserialize blocks // it can be used for blocks not in chain @@ -647,7 +633,7 @@ func main() { } // decode and print block as much as possible - fmt.Printf("Block ID : %s\n", bl.GetHash()) + fmt.Printf("%s\n", bl.String()) fmt.Printf("Height: %d\n", bl.Height) tips_found := true for i := range bl.Tips { @@ -658,8 +644,7 @@ func main() { break } } - fmt.Printf("Tips: %d %+v\n", len(bl.Tips), bl.Tips) // block height - fmt.Printf("Txs: %d %+v\n", len(bl.Tx_hashes), bl.Tx_hashes) // block height + expected_difficulty := new(big.Int).SetUint64(0) if tips_found { // we can solve diffculty expected_difficulty = chain.Get_Difficulty_At_Tips(bl.Tips) @@ -947,7 +932,7 @@ func writenode(chain *blockchain.Blockchain, w *bufio.Writer, blid crypto.Hash, addr := rpc.NewAddressFromKeys(&acckey) addr.Mainnet = globals.IsMainnet() - w.WriteString(fmt.Sprintf("L%s [ fillcolor=%s label = \"%s %d height %d score %d stored %d order %d\nminer %s\" ];\n", blid.String(), color, blid.String(), 0, chain.Load_Height_for_BL_ID(blid), 0, chain.Load_Block_Cumulative_Difficulty(blid), chain.Load_Block_Topological_order(blid), addr.String())) + w.WriteString(fmt.Sprintf("L%s [ fillcolor=%s label = \"%s %d height %d score %d order %d\nminer %s\" ];\n", blid.String(), color, blid.String(), 0, chain.Load_Height_for_BL_ID(blid), 0, chain.Load_Block_Topological_order(blid), addr.String())) w.WriteString(fmt.Sprintf("}\n")) if err != nil { diff --git a/cmd/derod/rpc/blockheader.go b/cmd/derod/rpc/blockheader.go new file mode 100644 index 0000000..6a89e7e --- /dev/null +++ b/cmd/derod/rpc/blockheader.go @@ -0,0 +1,58 @@ +// Copyright 2017-2021 DERO Project. All rights reserved. +// Use of this source code in any form is governed by RESEARCH license. +// license can be found in the LICENSE file. +// GPG: 0F39 E425 8C65 3947 702A 8234 08B2 0360 A03A 9DE8 +// +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package rpc + +//import "fmt" +import "github.com/deroproject/derohe/cryptography/crypto" +import "github.com/deroproject/derohe/rpc" +import "github.com/deroproject/derohe/blockchain" + +// this function is only used by the RPC and is not used by the core and should be moved to RPC interface + +/* fill up the above structure from the blockchain */ +func GetBlockHeader(chain *blockchain.Blockchain, hash crypto.Hash) (result rpc.BlockHeader_Print, err error) { + bl, err := chain.Load_BL_FROM_ID(hash) + if err != nil { + return + } + + result.TopoHeight = -1 + if chain.Is_Block_Topological_order(hash) { + result.TopoHeight = chain.Load_Block_Topological_order(hash) + } + result.Height = chain.Load_Height_for_BL_ID(hash) + result.Depth = chain.Get_Height() - result.Height + result.Difficulty = chain.Load_Block_Difficulty(hash).String() + result.Hash = hash.String() + result.Major_Version = uint64(bl.Major_Version) + result.Minor_Version = uint64(bl.Minor_Version) + result.Orphan_Status = chain.Is_Block_Orphan(hash) + if result.TopoHeight >= chain.LocatePruneTopo()+10 { // this result may/may not be valid at just above prune heights + result.SyncBlock = chain.IsBlockSyncBlockHeight(hash) + } + result.SideBlock = chain.Isblock_SideBlock(hash) + //result.Reward = chain.Load_Block_Total_Reward(dbtx, hash) + result.TXCount = int64(len(bl.Tx_hashes)) + + for i := range bl.Tips { + result.Tips = append(result.Tips, bl.Tips[i].String()) + } + //result.Prev_Hash = bl.Prev_Hash.String() + result.Timestamp = bl.Timestamp + + return +} diff --git a/cmd/derod/rpc/rpc_dero_getblock.go b/cmd/derod/rpc/rpc_dero_getblock.go index 3386813..1b6fc04 100644 --- a/cmd/derod/rpc/rpc_dero_getblock.go +++ b/cmd/derod/rpc/rpc_dero_getblock.go @@ -51,7 +51,7 @@ func GetBlock(ctx context.Context, p rpc.GetBlock_Params) (result rpc.GetBlock_R hash = crypto.HashHexToHash(p.Hash) } - block_header, err := chain.GetBlockHeader(hash) + block_header, err := GetBlockHeader(chain, hash) if err != nil { // if err return err return } diff --git a/cmd/derod/rpc/rpc_dero_getblockheaderbyhash.go b/cmd/derod/rpc/rpc_dero_getblockheaderbyhash.go index d988def..b83c31e 100644 --- a/cmd/derod/rpc/rpc_dero_getblockheaderbyhash.go +++ b/cmd/derod/rpc/rpc_dero_getblockheaderbyhash.go @@ -29,7 +29,7 @@ func GetBlockHeaderByHash(ctx context.Context, p rpc.GetBlockHeaderByHash_Params } }() hash := crypto.HashHexToHash(p.Hash) - if block_header, err := chain.GetBlockHeader(hash); err == nil { // if err return err + if block_header, err := GetBlockHeader(chain, hash); err == nil { // if err return err return rpc.GetBlockHeaderByHash_Result{ // return success Block_Header: block_header, Status: "OK", diff --git a/cmd/derod/rpc/rpc_dero_getblockheaderbytopoheight.go b/cmd/derod/rpc/rpc_dero_getblockheaderbytopoheight.go index 550ebf2..db7f9b0 100644 --- a/cmd/derod/rpc/rpc_dero_getblockheaderbytopoheight.go +++ b/cmd/derod/rpc/rpc_dero_getblockheaderbytopoheight.go @@ -41,7 +41,7 @@ func GetBlockHeaderByTopoHeight(ctx context.Context, p rpc.GetBlockHeaderByTopoH return } - block_header, err := chain.GetBlockHeader(hash) + block_header, err := GetBlockHeader(chain, hash) if err != nil { // if err return err err = fmt.Errorf("User requested %d height block, chain topo height %d but err occured %s", p.TopoHeight, chain.Load_TOPO_HEIGHT(), err) return diff --git a/cmd/derod/rpc/rpc_dero_getencryptedbalance.go b/cmd/derod/rpc/rpc_dero_getencryptedbalance.go index d4bd51d..b80a216 100644 --- a/cmd/derod/rpc/rpc_dero_getencryptedbalance.go +++ b/cmd/derod/rpc/rpc_dero_getencryptedbalance.go @@ -89,7 +89,13 @@ func GetEncryptedBalance(ctx context.Context, p rpc.GetEncryptedBalance_Params) panic(err) } } - merkle_hash, err := chain.Load_Merkle_Hash(topoheight) + + version, err := chain.ReadBlockSnapshotVersion(toporecord.BLOCK_ID) + if err != nil { + panic(err) + } + + merkle_hash, err := chain.Load_Merkle_Hash(version) if err != nil { panic(err) } @@ -97,7 +103,12 @@ func GetEncryptedBalance(ctx context.Context, p rpc.GetEncryptedBalance_Params) // calculate top height merkle tree hash //var dmerkle_hash crypto.Hash - dmerkle_hash, err := chain.Load_Merkle_Hash(chain.Load_TOPO_HEIGHT()) + version, err = chain.ReadBlockSnapshotVersion(chain.Get_Top_ID()) + if err != nil { + panic(err) + } + + dmerkle_hash, err := chain.Load_Merkle_Hash(version) if err != nil { panic(err) } diff --git a/cmd/derod/rpc/rpc_dero_getinfo.go b/cmd/derod/rpc/rpc_dero_getinfo.go index c7fd774..d174051 100644 --- a/cmd/derod/rpc/rpc_dero_getinfo.go +++ b/cmd/derod/rpc/rpc_dero_getinfo.go @@ -39,7 +39,11 @@ func GetInfo(ctx context.Context) (result rpc.GetInfo_Result, err error) { result.TopoHeight = chain.Load_TOPO_HEIGHT() { - balance_merkle_hash, err := chain.Load_Merkle_Hash(result.TopoHeight) + version, err := chain.ReadBlockSnapshotVersion(chain.Get_Top_ID()) + if err != nil { + panic(err) + } + balance_merkle_hash, err := chain.Load_Merkle_Hash(version) if err != nil { panic(err) } diff --git a/cmd/derod/rpc/rpc_dero_getsc.go b/cmd/derod/rpc/rpc_dero_getsc.go index dd4645f..965a529 100644 --- a/cmd/derod/rpc/rpc_dero_getsc.go +++ b/cmd/derod/rpc/rpc_dero_getsc.go @@ -105,31 +105,31 @@ func GetSC(ctx context.Context, p rpc.GetSC_Params) (result rpc.GetSC_Result, er _ = varv _ = k _ = v - /* - fmt.Printf("key '%x' value '%x'\n", k,v) - if len(k) == 32 && len(v) == 8 { // it's SC balance - result.Balances[fmt.Sprintf("%x", k)] = binary.BigEndian.Uint64(v) - } else if nil == vark.UnmarshalBinary(k) && nil == varv.UnmarshalBinary(v) { - switch vark.Type { - case dvm.Uint64: - if varv.Type == dvm.Uint64 { - result.VariableUint64Keys[vark.ValueUint64] = varv.ValueUint64 - } else { - result.VariableUint64Keys[vark.ValueUint64] = fmt.Sprintf("%x", []byte(varv.ValueString)) - } - case dvm.String: - if varv.Type == dvm.Uint64 { - result.VariableStringKeys[vark.ValueString] = varv.ValueUint64 - } else { - result.VariableStringKeys[vark.ValueString] = fmt.Sprintf("%x", []byte(varv.ValueString)) - } - default: - err = fmt.Errorf("UNKNOWN Data type") - return + fmt.Printf("key '%x' value '%x'\n", k, v) + if len(k) == 32 && len(v) == 8 { // it's SC balance + result.Balances[fmt.Sprintf("%x", k)] = binary.BigEndian.Uint64(v) + } else if k[len(k)-1] >= 0x3 && k[len(k)-1] < 0x80 && nil == vark.UnmarshalBinary(k) && nil == varv.UnmarshalBinary(v) { + switch vark.Type { + case dvm.Uint64: + if varv.Type == dvm.Uint64 { + result.VariableUint64Keys[vark.ValueUint64] = varv.ValueUint64 + } else { + result.VariableUint64Keys[vark.ValueUint64] = fmt.Sprintf("%x", []byte(varv.ValueString)) } - } */ + case dvm.String: + if varv.Type == dvm.Uint64 { + result.VariableStringKeys[vark.ValueString] = varv.ValueUint64 + } else { + result.VariableStringKeys[vark.ValueString] = fmt.Sprintf("%x", []byte(varv.ValueString)) + } + default: + err = fmt.Errorf("UNKNOWN Data type") + return + } + + } } } diff --git a/cmd/derod/rpc/rpc_dero_gettransactions.go b/cmd/derod/rpc/rpc_dero_gettransactions.go index cf36659..035f713 100644 --- a/cmd/derod/rpc/rpc_dero_gettransactions.go +++ b/cmd/derod/rpc/rpc_dero_gettransactions.go @@ -135,8 +135,8 @@ func GetTransaction(ctx context.Context, p rpc.GetTransaction_Params) (result rp } } - // expand the tx - err = chain.Transaction_NonCoinbase_Expand(&tx) + // expand the tx, no need to do proof checking + err = chain.Expand_Transaction_NonCoinbase(&tx) if err != nil { return result, err } diff --git a/cmd/derod/rpc/rpc_get_getlastblockheader.go b/cmd/derod/rpc/rpc_get_getlastblockheader.go index 12421ed..99fd88d 100644 --- a/cmd/derod/rpc/rpc_get_getlastblockheader.go +++ b/cmd/derod/rpc/rpc_get_getlastblockheader.go @@ -30,7 +30,7 @@ func GetLastBlockHeader(ctx context.Context) (result rpc.GetLastBlockHeader_Resu } }() top_hash := chain.Get_Top_ID() - block_header, err := chain.GetBlockHeader(top_hash) + block_header, err := GetBlockHeader(chain, top_hash) if err != nil { return } diff --git a/cmd/simulator/blockchain_sim_test.go b/cmd/simulator/blockchain_sim_test.go index 059ad5c..bbddf1d 100644 --- a/cmd/simulator/blockchain_sim_test.go +++ b/cmd/simulator/blockchain_sim_test.go @@ -262,10 +262,10 @@ func Test_Creation_TX(t *testing.T) { wdst.Sync_Wallet_Memory_With_Daemon() var zerohash crypto.Hash - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wsrc.GetAddress().String()); nonce != 2 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wsrc.GetAddress().String()); nonce != 2 { t.Fatalf("nonce not valid. please dig. expected 2 actual %d", nonce) } - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wdst.GetAddress().String()); nonce != 0 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wdst.GetAddress().String()); nonce != 0 { t.Fatalf("nonce not valid. please dig. expected 0 actual %d", nonce) } @@ -353,10 +353,10 @@ func Test_Creation_TX(t *testing.T) { wsrc.Sync_Wallet_Memory_With_Daemon() wdst.Sync_Wallet_Memory_With_Daemon() - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wsrc.GetAddress().String()); nonce != 9 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wsrc.GetAddress().String()); nonce != 9 { t.Fatalf("nonce not valid. please dig. expected 9 actual %d", nonce) } - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wdst.GetAddress().String()); nonce != 11 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wdst.GetAddress().String()); nonce != 11 { t.Fatalf("nonce not valid. please dig. expected 11 actual %d", nonce) } diff --git a/cmd/simulator/simulator.go b/cmd/simulator/simulator.go index 38c2d77..9b92cd0 100644 --- a/cmd/simulator/simulator.go +++ b/cmd/simulator/simulator.go @@ -399,7 +399,6 @@ func main() { fmt.Printf("Block ID : %s\n", hash) fmt.Printf("Block : %x\n", bl.Serialize()) fmt.Printf("difficulty: %s\n", chain.Load_Block_Difficulty(hash).String()) - fmt.Printf("cdifficulty: %s\n", chain.Load_Block_Cumulative_Difficulty(hash).String()) //fmt.Printf("Orphan: %v\n",chain.Is_Block_Orphan(hash)) json_bytes, err := json.Marshal(bl) @@ -423,11 +422,15 @@ func main() { fmt.Printf("Block ID : %s\n", hash) fmt.Printf("Block : %x\n", bl.Serialize()) fmt.Printf("difficulty: %s\n", chain.Load_Block_Difficulty(hash).String()) - fmt.Printf("cdifficulty: %s\n", chain.Load_Block_Cumulative_Difficulty(hash).String()) fmt.Printf("Height: %d\n", chain.Load_Height_for_BL_ID(hash)) fmt.Printf("TopoHeight: %d\n", s) - bhash, err := chain.Load_Merkle_Hash(s) + version, err := chain.ReadBlockSnapshotVersion(hash) + if err != nil { + panic(err) + } + + bhash, err := chain.Load_Merkle_Hash(version) if err != nil { panic(err) diff --git a/config/config.go b/config/config.go index bc44d79..106b9df 100644 --- a/config/config.go +++ b/config/config.go @@ -97,7 +97,7 @@ var Mainnet = CHAIN_CONFIG{Name: "mainnet", } var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0 - Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x45, 0x00, 0x00, 0x00}), + Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x62, 0x00, 0x00, 0x00}), P2P_Default_Port: 40401, RPC_Default_Port: 40402, Wallet_RPC_Default_Port: 40403, diff --git a/config/version.go b/config/version.go index 852a234..f5b1e19 100644 --- a/config/version.go +++ b/config/version.go @@ -20,4 +20,4 @@ import "github.com/blang/semver/v4" // right now it has to be manually changed // do we need to include git commitsha?? -var Version = semver.MustParse("3.4.54-1.DEROHE.STARGATE+09112021") +var Version = semver.MustParse("3.4.62-1.DEROHE.STARGATE+11112021") diff --git a/dvm/dvm.go b/dvm/dvm.go index e67d6d7..5a9a84f 100644 --- a/dvm/dvm.go +++ b/dvm/dvm.go @@ -35,10 +35,11 @@ import "github.com/deroproject/derohe/cryptography/crypto" type Vtype int +// the numbers start from 3 to avoid collisions and can go max upto 0x7f before collision occur const ( - Invalid Vtype = iota // default is invalid - Uint64 // uint64 data type - String // string + Invalid Vtype = 0x3 // default is invalid + Uint64 Vtype = 0x4 // uint64 data type + String Vtype = 0x5 // string ) var replacer = strings.NewReplacer("< =", "<=", "> =", ">=", "= =", "==", "! =", "!=", "& &", "&&", "| |", "||", "< <", "<<", "> >", ">>", "< >", "!=") diff --git a/dvm/dvm_store.go b/dvm/dvm_store.go index 5b71e07..05b0331 100644 --- a/dvm/dvm_store.go +++ b/dvm/dvm_store.go @@ -178,9 +178,6 @@ func (dkey DataKey) MarshalBinaryPanic() (ser []byte) { // these are used by lowest layers func (v Variable) MarshalBinary() (data []byte, err error) { - - data = append(data, byte(v.Type)) // add object type - switch v.Type { case Invalid: return @@ -193,6 +190,7 @@ func (v Variable) MarshalBinary() (data []byte, err error) { default: panic("unknown variable type not implemented") } + data = append(data, byte(v.Type)) // add object type return } func (v Variable) MarshalBinaryPanic() (ser []byte) { @@ -204,24 +202,24 @@ func (v Variable) MarshalBinaryPanic() (ser []byte) { } func (v *Variable) UnmarshalBinary(buf []byte) (err error) { - if len(buf) < 1 || Vtype(buf[0]) == Invalid { + if len(buf) < 1 { return fmt.Errorf("invalid, probably corruption") } - switch Vtype(buf[0]) { + switch Vtype(buf[len(buf)-1]) { case Invalid: return fmt.Errorf("Invalid cannot be deserialized") case Uint64: v.Type = Uint64 var n int - v.ValueUint64, n = binary.Uvarint(buf[1:]) // uint64 data type + v.ValueUint64, n = binary.Uvarint(buf[:len(buf)-1]) // uint64 data type if n <= 0 { panic("corruption in DB") return fmt.Errorf("corruption in DB") } case String: v.Type = String - v.ValueString = string(buf[1:]) + v.ValueString = string(buf[:len(buf)-1]) return nil default: diff --git a/p2p/chain_bootstrap.go b/p2p/chain_bootstrap.go index 2f77579..d38ac9d 100644 --- a/p2p/chain_bootstrap.go +++ b/p2p/chain_bootstrap.go @@ -282,17 +282,6 @@ func (connection *Connection) bootstrap_chain() { return } - cdiff := new(big.Int) - if _, ok := cdiff.SetString(response.CBlocks[i].Cumulative_Difficulty, 10); !ok { // if Cumulative_Difficulty could not be parsed, kill connection - connection.logger.Error(fmt.Errorf("Could not Parse Difficulty in common"), "", "cdiff", response.CBlocks[i].Cumulative_Difficulty) - connection.exit() - return - } - - if err = chain.Store.Block_tx_store.WriteBlock(bl.GetHash(), bl.Serialize(), diff, cdiff); err != nil { - panic(fmt.Sprintf("error while writing block")) - } - // now we must write all the state changes to gravition var ss *graviton.Snapshot @@ -330,6 +319,10 @@ func (connection *Connection) bootstrap_chain() { } } + if err = chain.Store.Block_tx_store.WriteBlock(bl.GetHash(), bl.Serialize(), diff, commit_version); err != nil { + panic(fmt.Sprintf("error while writing block")) + } + connection.logger.V(2).Info("Writing version", "topoheight", request.TopoHeights[i], "keycount", write_count, "commit version ", commit_version) chain.Store.Topo_store.Write(request.TopoHeights[i], bl.GetHash(), commit_version, int64(bl.Height)) // commit everything diff --git a/p2p/chain_sync.go b/p2p/chain_sync.go index 5a05c9b..3b5f9a4 100644 --- a/p2p/chain_sync.go +++ b/p2p/chain_sync.go @@ -82,7 +82,6 @@ try_again: // we have a response, see if its valid and try to add to get the blocks connection.logger.V(2).Info("Peer wants to give chain", "from topoheight", response.Start_height) - _ = config.STABLE_LIMIT // we do not need reorganisation if deviation is less than or equak to 7 blocks // only pop blocks if the system has somehow deviated more than 7 blocks diff --git a/p2p/common.go b/p2p/common.go index 6240939..6ed48cf 100644 --- a/p2p/common.go +++ b/p2p/common.go @@ -1,8 +1,6 @@ package p2p -import "fmt" import "time" -import "math/big" import "sync/atomic" import "github.com/deroproject/derohe/globals" @@ -10,19 +8,17 @@ import "github.com/deroproject/derohe/cryptography/crypto" // fill the common part from our chain func fill_common(common *Common_Struct) { + var err error common.Height = chain.Get_Height() //common.StableHeight = chain.Get_Stable_Height() common.TopoHeight = chain.Load_TOPO_HEIGHT() - //common.Top_ID, _ = chain.Load_BL_ID_at_Height(common.Height - 1) - high_block, err := chain.Load_Block_Topological_order_at_index(common.TopoHeight) + version, err := chain.ReadBlockSnapshotVersion(chain.Get_Top_ID()) if err != nil { - common.Cumulative_Difficulty = "0" - } else { - common.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(high_block).String() + panic(err) } - if common.StateHash, err = chain.Load_Merkle_Hash(common.TopoHeight); err != nil { + if common.StateHash, err = chain.Load_Merkle_Hash(version); err != nil { panic(err) } @@ -61,18 +57,6 @@ func (connection *Connection) update(common *Common_Struct) { atomic.StoreInt64(&connection.TopoHeight, common.TopoHeight) // satify race detector GOD //connection.Top_ID = common.Top_ID - if common.Cumulative_Difficulty != "" { - connection.Cumulative_Difficulty = common.Cumulative_Difficulty - - var x *big.Int - x = new(big.Int) - if _, ok := x.SetString(connection.Cumulative_Difficulty, 10); !ok { // if Cumulative_Difficulty could not be parsed, kill connection - connection.logger.Error(fmt.Errorf("Could not Parse Difficulty in common"), "", "cdiff", connection.Cumulative_Difficulty) - connection.exit() - } - - connection.CDIFF.Store(x) // do it atomically - } if connection.Top_Version != common.Top_Version { atomic.StoreUint64(&connection.Top_Version, common.Top_Version) // satify race detector GOD diff --git a/p2p/connection_pool.go b/p2p/connection_pool.go index 7ed2ce0..76374d3 100644 --- a/p2p/connection_pool.go +++ b/p2p/connection_pool.go @@ -89,11 +89,9 @@ type Connection struct { Tag string // tag for the other end DaemonVersion string //Exit chan bool // Exit marker that connection needs to be killed - ExitCounter int32 - State uint32 // state of the connection - Top_ID crypto.Hash // top block id of the connection - Cumulative_Difficulty string // cumulative difficulty of top block of peer, this is NOT required - CDIFF atomic.Value //*big.Int // NOTE: this field is used internally and is the parsed from Cumulative_Difficulty + ExitCounter int32 + State uint32 // state of the connection + Top_ID crypto.Hash // top block id of the connection logger logr.Logger // connection specific logger logid string // formatted version of connection diff --git a/p2p/controller.go b/p2p/controller.go index 40207c5..b2166c2 100644 --- a/p2p/controller.go +++ b/p2p/controller.go @@ -365,6 +365,7 @@ func P2P_Server_v2() { l, err := net.Listen("tcp", default_address) // listen as simple TCP server if err != nil { logger.Error(err, "Could not listen", "address", default_address) + return } defer l.Close() P2P_Port = int(l.Addr().(*net.TCPAddr).Port) diff --git a/p2p/rpc_changeset.go b/p2p/rpc_changeset.go index 03e0de4..c3588e1 100644 --- a/p2p/rpc_changeset.go +++ b/p2p/rpc_changeset.go @@ -51,7 +51,6 @@ func (c *Connection) ChangeSet(request ChangeList, response *Changes) (err error } cbl.Difficulty = chain.Load_Block_Difficulty(blid).String() - cbl.Cumulative_Difficulty = chain.Load_Block_Cumulative_Difficulty(blid).String() // now we must load all the changes the block has done to the state tree previous_sr, err := chain.Store.Topo_store.Read(topo - 1) diff --git a/p2p/rpc_handshake.go b/p2p/rpc_handshake.go index d878403..f7e2400 100644 --- a/p2p/rpc_handshake.go +++ b/p2p/rpc_handshake.go @@ -18,7 +18,6 @@ package p2p import "fmt" import "bytes" -import "math/big" import "sync/atomic" import "time" @@ -68,8 +67,6 @@ func (connection *Connection) dispatch_test_handshake() { return } - connection.CDIFF.Store(new(big.Int).SetUint64(1)) - connection.request_time.Store(time.Now()) connection.SpeedIn = ratecounter.NewRateCounter(60 * time.Second) connection.SpeedOut = ratecounter.NewRateCounter(60 * time.Second) diff --git a/p2p/wire_structs.go b/p2p/wire_structs.go index 0e38e93..14ac43b 100644 --- a/p2p/wire_structs.go +++ b/p2p/wire_structs.go @@ -28,16 +28,15 @@ import "github.com/deroproject/derohe/cryptography/crypto" // used to parse incoming packet for for command , so as a repective command command could be triggered type Common_Struct struct { - Height int64 `cbor:"HEIGHT"` - TopoHeight int64 `cbor:"THEIGHT"` - StableHeight int64 `cbor:"SHEIGHT"` - Cumulative_Difficulty string `cbor:"CDIFF"` - StateHash [32]byte `cbor:"STATE"` - PeerList []Peer_Info `cbor:"PLIST,omitempty"` // it will contain peerlist every 30 minutes - T0 int64 `cbor:"T0,omitempty"` // see https://en.wikipedia.org/wiki/Network_Time_Protocol - T1 int64 `cbor:"T1,omitempty"` // time when this was sent, in unixmicro - T2 int64 `cbor:"T2,omitempty"` // time when this was sent, in unixmicro - Top_Version uint64 `cbor:"HF"` // this basically represents the hard fork version + Height int64 `cbor:"HEIGHT"` + TopoHeight int64 `cbor:"THEIGHT"` + StableHeight int64 `cbor:"SHEIGHT"` + StateHash [32]byte `cbor:"STATE"` + PeerList []Peer_Info `cbor:"PLIST,omitempty"` // it will contain peerlist every 30 minutes + T0 int64 `cbor:"T0,omitempty"` // see https://en.wikipedia.org/wiki/Network_Time_Protocol + T1 int64 `cbor:"T1,omitempty"` // time when this was sent, in unixmicro + T2 int64 `cbor:"T2,omitempty"` // time when this was sent, in unixmicro + Top_Version uint64 `cbor:"HF"` // this basically represents the hard fork version } type Dummy struct { // empty strcut returned @@ -139,11 +138,10 @@ type Tree_Changes struct { } type Complete_Block struct { - Block []byte `cbor:"BLOCK,omitempty"` - Txs [][]byte `cbor:"TXS,omitempty"` - Difficulty string `cbor:"DIFF,omitempty"` // Diff - Cumulative_Difficulty string `cbor:"CDIFF,omitempty"` // CDiff - Changes []Tree_Changes `cbor:"CHANGES,omitempty"` // changes to state tree + Block []byte `cbor:"BLOCK,omitempty"` + Txs [][]byte `cbor:"TXS,omitempty"` + Difficulty string `cbor:"DIFF,omitempty"` // Diff + Changes []Tree_Changes `cbor:"CHANGES,omitempty"` // changes to state tree } type Block_Chunk struct { diff --git a/rpc/rpc.go b/rpc/rpc.go index 6d994e2..461fdfa 100644 --- a/rpc/rpc.go +++ b/rpc/rpc.go @@ -11,8 +11,12 @@ import "github.com/deroproject/derohe/cryptography/crypto" // this package defines interfaces and necessary glue code Digital Network, it exposes and provides encrypted RPC calls over DERO chain var enc_options = cbor.EncOptions{ - Sort: cbor.SortCTAP2, - TimeTag: cbor.EncTagRequired, + Sort: cbor.SortCoreDeterministic, + ShortestFloat: cbor.ShortestFloat16, + NaNConvert: cbor.NaNConvert7e00, + InfConvert: cbor.InfConvertFloat16, + IndefLength: cbor.IndefLengthForbidden, + TimeTag: cbor.EncTagRequired, } var dec_options = cbor.DecOptions{ diff --git a/transaction/transaction.go b/transaction/transaction.go index f436a07..3976e52 100644 --- a/transaction/transaction.go +++ b/transaction/transaction.go @@ -83,6 +83,7 @@ type Transaction_Prefix struct { C [32]byte `json:"c"` // used for registration S [32]byte `json:"s"` // used for registration Height uint64 `json:"height"` // height at the state, used to cross-check state + BLID [32]byte `json:"blid"` // which is used to build the tx SCDATA rpc.Arguments `json:"scdata"` // all SC related data is provided here, an SC tx uses all the fields } @@ -316,6 +317,11 @@ func (tx *Transaction) Deserialize(buf []byte) (err error) { return fmt.Errorf("Invalid Height value in Transaction\n") } buf = buf[done:] + if len(buf) < 32 { + return fmt.Errorf("Invalid BLID value in Transaction\n") + } + copy(tx.BLID[:], buf[:32]) + buf = buf[32:] var asset_count uint64 asset_count, done = binary.Uvarint(buf) @@ -419,6 +425,7 @@ func (tx *Transaction) SerializeHeader() []byte { if tx.TransactionType == BURN_TX || tx.TransactionType == NORMAL || tx.TransactionType == SC_TX { n = binary.PutUvarint(buf, uint64(tx.Height)) serialised_header.Write(buf[:n]) + serialised_header.Write(tx.BLID[:]) n = binary.PutUvarint(buf, uint64(len(tx.Payloads))) serialised_header.Write(buf[:n]) diff --git a/walletapi/daemon_communication.go b/walletapi/daemon_communication.go index df90e94..a67b9db 100644 --- a/walletapi/daemon_communication.go +++ b/walletapi/daemon_communication.go @@ -59,6 +59,8 @@ import "github.com/creachadair/jrpc2" // this global variable should be within wallet structure var Connected bool = false +var simulator bool // turns on simulator, which has 0 fees + // there should be no global variables, so multiple wallets can run at the same time with different assset var endpoint string @@ -151,6 +153,10 @@ func test_connectivity() (err error) { logger.Error(err, "Mainnet/Testnet mismatch") return } + + if strings.ToLower(info.Network) == "simulator" { + simulator = true + } logger.Info("successfully connected to daemon") return nil } @@ -213,7 +219,7 @@ func (w *Wallet_Memory) Sync_Wallet_Memory_With_Daemon_internal(scid crypto.Hash //rlog.Debugf("wallet topo height %d daemon online topo height %d\n", w.account.TopoHeight, w.Daemon_TopoHeight) previous := w.getEncryptedBalanceresult(scid).Data - if _, _, e, err := w.GetEncryptedBalanceAtTopoHeight(scid, -1, w.GetAddress().String()); err == nil { + if _, _, _, e, err := w.GetEncryptedBalanceAtTopoHeight(scid, -1, w.GetAddress().String()); err == nil { //fmt.Printf("data '%s' previous '%s' scid %s\n",w.account.Balance_Result[scid].Data , previous,scid) if w.getEncryptedBalanceresult(scid).Data != previous { @@ -282,8 +288,8 @@ func (w *Wallet_Memory) DecodeEncryptedBalanceNow(el *crypto.ElGamal) uint64 { func (w *Wallet_Memory) GetSelfEncryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64) (r rpc.GetEncryptedBalance_Result, err error) { defer func() { if r := recover(); r != nil { - logger.V(1).Error(nil, "Recovered while connecting", "r", r, "stack", debug.Stack()) - err = fmt.Errorf("Recovered while connecting", "stack", debug.Stack()) + logger.V(1).Error(nil, "Recovered while GetSelfEncryptedBalanceAtTopoHeight", "r", r, "stack", debug.Stack()) + err = fmt.Errorf("Recovered while GetSelfEncryptedBalanceAtTopoHeight r %s stack %s", r, string(debug.Stack())) } }() @@ -297,12 +303,12 @@ func (w *Wallet_Memory) GetSelfEncryptedBalanceAtTopoHeight(scid crypto.Hash, to // TODO in order to stop privacy leaks we must guess this information somehow on client side itself // maybe the server can broadcast a bloomfilter or something else from the mempool keyimages // -func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (bits int, lastused uint64, e *crypto.ElGamal, err error) { +func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (bits int, lastused uint64, blid crypto.Hash, e *crypto.ElGamal, err error) { defer func() { if r := recover(); r != nil { - logger.V(1).Error(nil, "Recovered while connecting", "r", r, "stack", debug.Stack()) - err = fmt.Errorf("Recovered while connecting", "stack", debug.Stack()) + logger.V(1).Error(nil, "Recovered while GetEncryptedBalanceAtTopoHeight", "r", r, "stack", debug.Stack()) + err = fmt.Errorf("Recovered while GetEncryptedBalanceAtTopoHeight r %s stack %s", r, debug.Stack()) } }() @@ -395,7 +401,12 @@ func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topohe var nb crypto.NonceBalance nb.Unmarshal(hexdecoded) - return result.Bits, nb.NonceHeight, nb.Balance, nil + var block_hash crypto.Hash + if err = block_hash.UnmarshalText([]byte(result.BlockHash)); err != nil { + return + } + + return result.Bits, nb.NonceHeight, block_hash, nb.Balance, nil } func (w *Wallet_Memory) DecodeEncryptedBalance_Memory(el *crypto.ElGamal, hint uint64) (balance uint64) { @@ -405,7 +416,7 @@ func (w *Wallet_Memory) DecodeEncryptedBalance_Memory(el *crypto.ElGamal, hint u } func (w *Wallet_Memory) GetDecryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (balance uint64, noncetopo uint64, err error) { - _, noncetopo, encrypted_balance, err := w.GetEncryptedBalanceAtTopoHeight(scid, topoheight, accountaddr) + _, noncetopo, _, encrypted_balance, err := w.GetEncryptedBalanceAtTopoHeight(scid, topoheight, accountaddr) if err != nil { return 0, 0, err } @@ -520,13 +531,13 @@ func (w *Wallet_Memory) synchistory_internal(scid crypto.Hash, start_topo, end_t if start_topo == w.getEncryptedBalanceresult(scid).Registration { start_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G) } else { - _, _, start_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, start_topo, w.GetAddress().String()) + _, _, _, start_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, start_topo, w.GetAddress().String()) if err != nil { return err } } - _, _, end_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, end_topo, w.GetAddress().String()) + _, _, _, end_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, end_topo, w.GetAddress().String()) if err != nil { return err } @@ -566,7 +577,7 @@ func (w *Wallet_Memory) synchistory_internal_binary_search(scid crypto.Hash, sta return w.synchistory_block(scid, end_topo) } - _, _, median_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, median, w.GetAddress().String()) + _, _, _, median_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, median, w.GetAddress().String()) if err != nil { return err } @@ -653,13 +664,13 @@ func (w *Wallet_Memory) synchistory_block(scid crypto.Hash, topo int64) (err err if topo <= 0 || w.getEncryptedBalanceresult(scid).Registration == topo { previous_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G) } else { - _, _, previous_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo-1, w.GetAddress().String()) + _, _, _, previous_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo-1, w.GetAddress().String()) if err != nil { return err } } - _, _, current_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo, w.GetAddress().String()) + _, _, _, current_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo, w.GetAddress().String()) if err != nil { return err } diff --git a/walletapi/transaction_build.go b/walletapi/transaction_build.go index 004d3b5..57cac46 100644 --- a/walletapi/transaction_build.go +++ b/walletapi/transaction_build.go @@ -19,15 +19,20 @@ type GenerateProofFunc func(scid crypto.Hash, scid_index int, s *crypto.Statemen var GenerateProoffuncptr GenerateProofFunc = crypto.GenerateProof // generate proof etc -func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap [][][]byte, rings [][]*bn256.G1, height uint64, scdata rpc.Arguments, roothash []byte, max_bits int) *transaction.Transaction { - - var tx transaction.Transaction +func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap [][][]byte, rings [][]*bn256.G1, block_hash crypto.Hash, height uint64, scdata rpc.Arguments, roothash []byte, max_bits int) *transaction.Transaction { sender := w.account.Keys.Public.G1() sender_secret := w.account.Keys.Secret.BigInt() + var retry_count int + +rebuild_tx: + + var tx transaction.Transaction + tx.Version = 1 tx.Height = height + tx.BLID = block_hash tx.TransactionType = transaction.NORMAL /* if burn_value >= 1 { @@ -48,6 +53,14 @@ func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap [][][]by fees_done := false + if retry_count%len(rings[0]) == 0 { + max_bits += 3 + } + + if max_bits >= 240 { + panic("currently we cannot use more than 240 bits") + } + for t, _ := range transfers { var publickeylist, C, CLn, CRn []*bn256.G1 @@ -263,6 +276,20 @@ func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap [][][]by scid_map_t[tx.Payloads[t].SCID] = scid_map_t[tx.Payloads[t].SCID] + 1 } + if tx.TransactionType == transaction.SC_TX { + if tx.SCDATA.Has(rpc.SCACTION, rpc.DataUint64) { + if rpc.SC_INSTALL == rpc.SC_ACTION(tx.SCDATA.Value(rpc.SCACTION, rpc.DataUint64).(uint64)) { + txid := tx.GetHash() + if txid[31] < 0x80 { // last byte should be more than 0x80 + if retry_count <= 20 { + //fmt.Printf("rebuilding tx %s retry_count %d\n", txid, retry_count) + goto rebuild_tx + } + } + } + } + } + // these 2 steps are only necessary, since blockchain doesn't accept unserialized txs //var dtx transaction.Transaction //_ = dtx.DeserializeHeader(tx.Serialize()) diff --git a/walletapi/tx_creation_test.go b/walletapi/tx_creation_test.go index 829143e..06c7916 100644 --- a/walletapi/tx_creation_test.go +++ b/walletapi/tx_creation_test.go @@ -270,10 +270,10 @@ func Test_Creation_TX(t *testing.T) { wdst.Sync_Wallet_Memory_With_Daemon() var zerohash crypto.Hash - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wsrc.GetAddress().String()); nonce != 2 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wsrc.GetAddress().String()); nonce != 2 { t.Fatalf("nonce not valid. please dig. expected 2 actual %d", nonce) } - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wdst.GetAddress().String()); nonce != 0 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 2, wdst.GetAddress().String()); nonce != 0 { t.Fatalf("nonce not valid. please dig. expected 0 actual %d", nonce) } @@ -361,10 +361,10 @@ func Test_Creation_TX(t *testing.T) { wsrc.Sync_Wallet_Memory_With_Daemon() wdst.Sync_Wallet_Memory_With_Daemon() - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wsrc.GetAddress().String()); nonce != 9 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wsrc.GetAddress().String()); nonce != 9 { t.Fatalf("nonce not valid. please dig. expected 9 actual %d", nonce) } - if _, nonce, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wdst.GetAddress().String()); nonce != 11 { + if _, nonce, _, _, _ := wsrc.GetEncryptedBalanceAtTopoHeight(zerohash, 11, wdst.GetAddress().String()); nonce != 11 { t.Fatalf("nonce not valid. please dig. expected 11 actual %d", nonce) } diff --git a/walletapi/wallet_transfer.go b/walletapi/wallet_transfer.go index b7637d0..18c1dfb 100644 --- a/walletapi/wallet_transfer.go +++ b/walletapi/wallet_transfer.go @@ -211,6 +211,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint var max_bits_array []int topoheight := int64(-1) + var block_hash crypto.Hash { // if wallet has been recently used, increase probability of user's tx being successfully mined var zeroscid crypto.Hash @@ -219,7 +220,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint } } - _, _, self_e, _ := w.GetEncryptedBalanceAtTopoHeight(transfers[0].SCID, topoheight, w.GetAddress().String()) + _, _, block_hash, self_e, _ := w.GetEncryptedBalanceAtTopoHeight(transfers[0].SCID, topoheight, w.GetAddress().String()) if err != nil { fmt.Printf("self unregistered err %s\n", err) return @@ -255,7 +256,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint bits_needed := make([]int, ringsize, ringsize) - bits_needed[0], _, self_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, topoheight, w.GetAddress().String()) + bits_needed[0], _, _, self_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, topoheight, w.GetAddress().String()) if err != nil { fmt.Printf("self unregistered err %s\n", err) return @@ -269,7 +270,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint return } var dest_e *crypto.ElGamal - bits_needed[1], _, dest_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, topoheight, addr.BaseAddress().String()) + bits_needed[1], _, _, dest_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, topoheight, addr.BaseAddress().String()) if err != nil { fmt.Printf(" t %d unregistered1 '%s' %s\n", t, addr, err) return @@ -311,7 +312,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint //fmt.Printf("t:%d len %d %s receiver %s sender %s\n",t,len(ring_balances), k, receiver_without_payment_id.String(), w.GetAddress().String()) var ebal *crypto.ElGamal - bits_needed[len(ring_balances)], _, ebal, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, -1, k) + bits_needed[len(ring_balances)], _, _, ebal, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, -1, k) if err != nil { fmt.Printf(" unregistered %s\n", k) return @@ -353,7 +354,11 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, ringsize uint max_bits += 6 // extra 6 bits if !dry_run { - tx = w.BuildTransaction(transfers, rings_balances, rings, height, scdata, treehash_raw, max_bits) + tx = w.BuildTransaction(transfers, rings_balances, rings, block_hash, height, scdata, treehash_raw, max_bits) + } + + if tx == nil { + err = fmt.Errorf("somehow the tx could not be built, please retry") } return