DERO-HE STARGATE Testnet Release22

This commit is contained in:
Captain 2021-11-13 16:29:31 +00:00
parent c6208dcbeb
commit 809cea2b90
No known key found for this signature in database
GPG Key ID: 18CDB3ED5E85D2D4
27 changed files with 161 additions and 291 deletions

View File

@ -117,7 +117,6 @@ func (mbl *MiniBlock) HasPid(pid uint32) bool {
default: default:
panic("not supported") panic("not supported")
return false
} }
} }

View File

@ -393,14 +393,10 @@ func MiniBlocks_FilterOnlyGenesis(mbls []MiniBlock, tips []crypto.Hash) (result
case 2: case 2:
pid1 := binary.BigEndian.Uint32(tips[0][:]) pid1 := binary.BigEndian.Uint32(tips[0][:])
pid2 := binary.BigEndian.Uint32(tips[1][:]) pid2 := binary.BigEndian.Uint32(tips[1][:])
return MiniBlocks_Filter(baselist, []uint32{pid1, pid2}) return MiniBlocks_Filter(baselist, []uint32{pid1, pid2})
default: default:
panic("only max 2 tips are supported") panic("only max 2 tips are supported")
} }
return
} }
/* /*

View File

@ -62,11 +62,8 @@ type Blockchain struct {
Tips map[crypto.Hash]crypto.Hash // current tips Tips map[crypto.Hash]crypto.Hash // current tips
dag_unsettled map[crypto.Hash]bool // current unsettled dag
dag_past_unsettled_cache *lru.Cache
dag_future_unsettled_cache *lru.Cache
mining_blocks_cache *lru.Cache // used to cache blocks which have been supplied to mining mining_blocks_cache *lru.Cache // used to cache blocks which have been supplied to mining
cache_IsMiniblockPowValid *lru.Cache // used to cache mini blocks pow test result
cache_IsAddressHashValid *lru.Cache // used to cache some outputs cache_IsAddressHashValid *lru.Cache // used to cache some outputs
cache_Get_Difficulty_At_Tips *lru.Cache // used to cache some outputs cache_Get_Difficulty_At_Tips *lru.Cache // used to cache some outputs
@ -81,8 +78,7 @@ type Blockchain struct {
Top_Block_Median_Size uint64 // median block size of current top block Top_Block_Median_Size uint64 // median block size of current top block
Top_Block_Base_Reward uint64 // top block base reward Top_Block_Base_Reward uint64 // top block base reward
checkpints_disabled bool // are checkpoints disabled simulator bool // is simulator mode
simulator bool // is simulator mode
P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found
P2P_MiniBlock_Relayer func(mbl block.MiniBlock, peerid uint64) P2P_MiniBlock_Relayer func(mbl block.MiniBlock, peerid uint64)
@ -134,7 +130,9 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
logger.Info("will use", "integrator_address", chain.integrator_address.String()) logger.Info("will use", "integrator_address", chain.integrator_address.String())
//chain.Tips = map[crypto.Hash]crypto.Hash{} // initialize Tips map if chain.cache_IsMiniblockPowValid, err = lru.New(8192); err != nil { // temporary cache for miniblock difficulty
return nil, err
}
if chain.cache_Get_Difficulty_At_Tips, err = lru.New(8192); err != nil { // temporary cache for difficulty if chain.cache_Get_Difficulty_At_Tips, err = lru.New(8192); err != nil { // temporary cache for difficulty
return nil, err return nil, err
} }
@ -145,10 +143,6 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
return nil, err return nil, err
} }
if globals.Arguments["--disable-checkpoints"] != nil {
chain.checkpints_disabled = globals.Arguments["--disable-checkpoints"].(bool)
}
if params["--simulator"] == true { if params["--simulator"] == true {
chain.simulator = true // enable simulator mode, this will set hard coded difficulty to 1 chain.simulator = true // enable simulator mode, this will set hard coded difficulty to 1
} }
@ -745,6 +739,8 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
panic(err) panic(err)
} }
logger.V(3).Info("reading block snapshot", "blid", full_order[i-1], "i", i, "record_version", record_version)
ss, err = chain.Store.Balance_store.LoadSnapshot(record_version) ss, err = chain.Store.Balance_store.LoadSnapshot(record_version)
if err != nil { if err != nil {
panic(err) panic(err)
@ -840,20 +836,23 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
// we are here, means everything is okay, lets commit the update balance tree // we are here, means everything is okay, lets commit the update balance tree
data_trees = append(data_trees, balance_tree, sc_meta) data_trees = append(data_trees, balance_tree, sc_meta)
//fmt.Printf("committing data trees %+v\n", data_trees) //fmt.Printf("committing data trees %+v\n", data_trees)
commit_version, err := graviton.Commit(data_trees...) commit_version, err := graviton.Commit(data_trees...)
if err != nil { if err != nil {
panic(err) panic(err)
} }
//fmt.Printf("committed trees version %d at topo %d\n", commit_version, current_topo_block)
chain.StoreBlock(bl, commit_version) chain.StoreBlock(bl, commit_version)
if height_changed { if height_changed {
chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i])) chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i]))
if logger.V(3).Enabled() {
merkle_root, err := chain.Load_Merkle_Hash(commit_version)
if err != nil {
panic(err)
}
logger.V(3).Info("height changed storing topo", "i", i, "blid", full_order[i].String(), "topoheight", current_topo_block, "commit_version", commit_version, "committed_merkle", merkle_root)
}
} }
} }
@ -993,9 +992,6 @@ func (chain *Blockchain) Initialise_Chain_From_DB() {
chain.Tips[top] = top // we only can load a single tip from db chain.Tips[top] = top // we only can load a single tip from db
// get dag unsettled, it's only possible when we have the tips
// chain.dag_unsettled = chain.Get_DAG_Unsettled() // directly off the disk
logger.V(1).Info("Reloaded Chain from disk", "Tips", chain.Tips, "Height", chain.Height) logger.V(1).Info("Reloaded Chain from disk", "Tips", chain.Tips, "Height", chain.Height)
} }

View File

@ -96,28 +96,6 @@ func CheckPowHashBig(pow_hash crypto.Hash, big_difficulty_integer *big.Int) bool
return false return false
} }
// confirms whether the actual tip difficulty is withing 9% deviation with reference
// actual tip cannot be less than 91% of main tip
// if yes tip is okay, else tip should be declared stale
// both the tips should be in the store
func (chain *Blockchain) validate_tips(reference, actual crypto.Hash) (result bool) {
reference_diff := chain.Load_Block_Difficulty(reference)
actual_diff := chain.Load_Block_Difficulty(actual)
// multiply by 91
reference91 := new(big.Int).Mul(reference_diff, new(big.Int).SetUint64(91))
// divide by 100
reference91.Div(reference91, new(big.Int).SetUint64(100))
if reference91.Cmp(actual_diff) < 0 {
return true
} else {
return false
}
}
// when creating a new block, current_time in utc + chain_block_time must be added // when creating a new block, current_time in utc + chain_block_time must be added
// while verifying the block, expected time stamp should be replaced from what is in blocks header // while verifying the block, expected time stamp should be replaced from what is in blocks header
// in DERO atlantis difficulty is based on previous tips // in DERO atlantis difficulty is based on previous tips
@ -239,6 +217,15 @@ func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int {
} }
func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock) bool { func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock) bool {
var cachekey []byte
for i := range bl.Tips {
cachekey = append(cachekey, bl.Tips[i][:]...)
}
cachekey = append(cachekey, mbl.Serialize()...)
if _, ok := chain.cache_IsMiniblockPowValid.Get(fmt.Sprintf("%s", cachekey)); ok {
return true
}
PoW := mbl.GetPoWHash() PoW := mbl.GetPoWHash()
block_difficulty := chain.Get_Difficulty_At_Tips(bl.Tips) block_difficulty := chain.Get_Difficulty_At_Tips(bl.Tips)
@ -248,6 +235,7 @@ func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock
}*/ }*/
if CheckPowHashBig(PoW, block_difficulty) == true { if CheckPowHashBig(PoW, block_difficulty) == true {
chain.cache_IsMiniblockPowValid.Add(fmt.Sprintf("%s", cachekey), true) // set in cache
return true return true
} }
return false return false

View File

@ -622,7 +622,7 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
cbl.Bl = &bl // the block is now complete, lets try to add it to chain cbl.Bl = &bl // the block is now complete, lets try to add it to chain
if !accept_limiter.Allow() { // if rate limiter allows, then add block to chain if !chain.simulator && !accept_limiter.Allow() { // if rate limiter allows, then add block to chain
logger.Info("Block rejected by chain.", "blid", bl.GetHash()) logger.Info("Block rejected by chain.", "blid", bl.GetHash())
return return
} }

View File

@ -66,12 +66,20 @@ func (s *storefs) DeleteBlock(h [32]byte) error {
} }
filename_start := fmt.Sprintf("%x.block", h[:]) filename_start := fmt.Sprintf("%x.block", h[:])
var found bool
for _, file := range files { for _, file := range files {
if strings.HasPrefix(file.Name(), filename_start) { if strings.HasPrefix(file.Name(), filename_start) {
file := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]), file.Name()) file := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]), file.Name())
return os.Remove(file) err = os.Remove(file)
if err != nil {
return err
}
found = true
} }
} }
if found {
return nil
}
return os.ErrNotExist return os.ErrNotExist
} }

View File

@ -350,7 +350,7 @@ func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool
} }
if hash != tx.Payloads[0].Statement.Roothash { if hash != tx.Payloads[0].Statement.Roothash {
return fmt.Errorf("Tx statement roothash mismatch") return fmt.Errorf("Tx statement roothash mismatch expected %x actual %x", tx.Payloads[0].Statement.Roothash, hash[:])
} }
// we have found the balance tree with which it was built now lets verify // we have found the balance tree with which it was built now lets verify
@ -473,20 +473,11 @@ func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool
} }
// these transactions are done // these transactions are done
if tx.TransactionType == transaction.NORMAL || tx.TransactionType == transaction.BURN_TX { if tx.TransactionType == transaction.NORMAL || tx.TransactionType == transaction.BURN_TX || tx.TransactionType == transaction.SC_TX {
transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it
return nil return nil
} }
// we reach here if tx proofs are valid
if tx.TransactionType != transaction.SC_TX {
return fmt.Errorf("non sc transaction should never reach here")
}
//if !tx.IsRegistrationValid() {
// return fmt.Errorf("SC has invalid signature")
//}
return nil return nil
} }

View File

@ -227,7 +227,7 @@ func main() {
if threads < 1 || iterations < 1 || threads > 2048 { if threads < 1 || iterations < 1 || threads > 2048 {
panic("Invalid parameters\n") panic("Invalid parameters\n")
return //return
} }
// This tiny goroutine continuously updates status as required // This tiny goroutine continuously updates status as required
@ -331,7 +331,7 @@ func main() {
l.Refresh() // refresh the prompt l.Refresh() // refresh the prompt
go func() { go func() {
var gracefulStop = make(chan os.Signal) var gracefulStop = make(chan os.Signal, 1)
signal.Notify(gracefulStop, os.Interrupt) // listen to all signals signal.Notify(gracefulStop, os.Interrupt) // listen to all signals
for { for {
sig := <-gracefulStop sig := <-gracefulStop

View File

@ -226,7 +226,7 @@ func handle_easymenu_post_open_command(l *readline.Instance, line string) (proce
break break
} }
arguments = append(arguments, rpc.Argument{rpc.RPC_DESTINATION_PORT, rpc.DataUint64, a.Arguments.Value(rpc.RPC_DESTINATION_PORT, rpc.DataUint64).(uint64)}) arguments = append(arguments, rpc.Argument{Name: rpc.RPC_DESTINATION_PORT, DataType: rpc.DataUint64, Value: a.Arguments.Value(rpc.RPC_DESTINATION_PORT, rpc.DataUint64).(uint64)})
// arguments = append(arguments, rpc.Argument{"Comment", rpc.DataString, "holygrail of all data is now working if you can see this"}) // arguments = append(arguments, rpc.Argument{"Comment", rpc.DataString, "holygrail of all data is now working if you can see this"})
if a.Arguments.Has(rpc.RPC_EXPIRY, rpc.DataTime) { // but only it is present if a.Arguments.Has(rpc.RPC_EXPIRY, rpc.DataTime) { // but only it is present
@ -252,28 +252,28 @@ func handle_easymenu_post_open_command(l *readline.Instance, line string) (proce
switch arg.DataType { switch arg.DataType {
case rpc.DataString: case rpc.DataString:
if v, err := ReadString(l, arg.Name, arg.Value.(string)); err == nil { if v, err := ReadString(l, arg.Name, arg.Value.(string)); err == nil {
arguments = append(arguments, rpc.Argument{arg.Name, arg.DataType, v}) arguments = append(arguments, rpc.Argument{Name: arg.Name, DataType: arg.DataType, Value: v})
} else { } else {
logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "") logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "")
return return
} }
case rpc.DataInt64: case rpc.DataInt64:
if v, err := ReadInt64(l, arg.Name, arg.Value.(int64)); err == nil { if v, err := ReadInt64(l, arg.Name, arg.Value.(int64)); err == nil {
arguments = append(arguments, rpc.Argument{arg.Name, arg.DataType, v}) arguments = append(arguments, rpc.Argument{Name: arg.Name, DataType: arg.DataType, Value: v})
} else { } else {
logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "") logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "")
return return
} }
case rpc.DataUint64: case rpc.DataUint64:
if v, err := ReadUint64(l, arg.Name, arg.Value.(uint64)); err == nil { if v, err := ReadUint64(l, arg.Name, arg.Value.(uint64)); err == nil {
arguments = append(arguments, rpc.Argument{arg.Name, arg.DataType, v}) arguments = append(arguments, rpc.Argument{Name: arg.Name, DataType: arg.DataType, Value: v})
} else { } else {
logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "") logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "")
return return
} }
case rpc.DataFloat64: case rpc.DataFloat64:
if v, err := ReadFloat64(l, arg.Name, arg.Value.(float64)); err == nil { if v, err := ReadFloat64(l, arg.Name, arg.Value.(float64)); err == nil {
arguments = append(arguments, rpc.Argument{arg.Name, arg.DataType, v}) arguments = append(arguments, rpc.Argument{Name: arg.Name, DataType: arg.DataType, Value: v})
} else { } else {
logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "") logger.Error(fmt.Errorf("%s could not be parsed (type %s),", arg.Name, arg.DataType), "")
return return
@ -310,13 +310,13 @@ func handle_easymenu_post_open_command(l *readline.Instance, line string) (proce
logger.Info("This RPC has requested your address.") logger.Info("This RPC has requested your address.")
logger.Info("If you are expecting something back, it needs to be sent") logger.Info("If you are expecting something back, it needs to be sent")
logger.Info("Your address will remain completely invisible to external entities(only sender/receiver can see your address)") logger.Info("Your address will remain completely invisible to external entities(only sender/receiver can see your address)")
arguments = append(arguments, rpc.Argument{rpc.RPC_REPLYBACK_ADDRESS, rpc.DataAddress, wallet.GetAddress()}) arguments = append(arguments, rpc.Argument{Name: rpc.RPC_REPLYBACK_ADDRESS, DataType: rpc.DataAddress, Value: wallet.GetAddress()})
} }
// if no arguments, use space by embedding a small comment // if no arguments, use space by embedding a small comment
if len(arguments) == 0 { // allow user to enter Comment if len(arguments) == 0 { // allow user to enter Comment
if v, err := ReadString(l, "Comment", ""); err == nil { if v, err := ReadString(l, "Comment", ""); err == nil {
arguments = append(arguments, rpc.Argument{"Comment", rpc.DataString, v}) arguments = append(arguments, rpc.Argument{Name: "Comment", DataType: rpc.DataString, Value: v})
} else { } else {
logger.Error(fmt.Errorf("%s could not be parsed (type %s),", "Comment", rpc.DataString), "") logger.Error(fmt.Errorf("%s could not be parsed (type %s),", "Comment", rpc.DataString), "")
return return

View File

@ -52,15 +52,8 @@ import "github.com/deroproject/derohe/rpc"
import "github.com/deroproject/derohe/blockchain" import "github.com/deroproject/derohe/blockchain"
import derodrpc "github.com/deroproject/derohe/cmd/derod/rpc" import derodrpc "github.com/deroproject/derohe/cmd/derod/rpc"
//import "github.com/deroproject/derosuite/checkpoints"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
//import "github.com/deroproject/derosuite/cryptonight"
//import "github.com/deroproject/derosuite/crypto/ringct"
//import "github.com/deroproject/derohe/blockchain/rpcserver"
//import "github.com/deroproject/derohe/walletapi"
var command_line string = `derod var command_line string = `derod
DERO : A secure, private blockchain with smart-contracts DERO : A secure, private blockchain with smart-contracts
@ -184,9 +177,7 @@ func main() {
params["--integrator-address"] = globals.Arguments["--integrator-address"] params["--integrator-address"] = globals.Arguments["--integrator-address"]
} }
//params["--disable-checkpoints"] = globals.Arguments["--disable-checkpoints"].(bool)
chain, err := blockchain.Blockchain_Start(params) chain, err := blockchain.Blockchain_Start(params)
if err != nil { if err != nil {
logger.Error(err, "Error starting blockchain") logger.Error(err, "Error starting blockchain")
return return
@ -201,11 +192,9 @@ func main() {
} }
p2p.P2P_Init(params) p2p.P2P_Init(params)
rpcserver, _ := derodrpc.RPCServer_Start(params) rpcserver, _ := derodrpc.RPCServer_Start(params)
// setup function pointers // setup function pointers
// these pointers need to fixed
chain.P2P_Block_Relayer = func(cbl *block.Complete_Block, peerid uint64) { chain.P2P_Block_Relayer = func(cbl *block.Complete_Block, peerid uint64) {
p2p.Broadcast_Block(cbl, peerid) p2p.Broadcast_Block(cbl, peerid)
} }
@ -237,6 +226,14 @@ func main() {
mempool_tx_count := len(chain.Mempool.Mempool_List_TX()) mempool_tx_count := len(chain.Mempool.Mempool_List_TX())
regpool_tx_count := len(chain.Regpool.Regpool_List_TX()) regpool_tx_count := len(chain.Regpool.Regpool_List_TX())
if our_height < 0 { // somehow the data folder got deleted/renamed/corrupted
logger.Error(nil, "Somehow the data directory is not accessible. shutting down")
l.Terminal.ExitRawMode()
l.Terminal.Print("\n\n")
os.Exit(-1)
return
}
// only update prompt if needed // only update prompt if needed
if last_second != time.Now().Unix() || last_our_height != our_height || last_best_height != best_height || last_peer_count != peer_count || last_topo_height != topo_height || last_mempool_tx_count != mempool_tx_count || last_regpool_tx_count != regpool_tx_count { if last_second != time.Now().Unix() || last_our_height != our_height || last_best_height != best_height || last_peer_count != peer_count || last_topo_height != topo_height || last_mempool_tx_count != mempool_tx_count || last_regpool_tx_count != regpool_tx_count {
// choose color based on urgency // choose color based on urgency
@ -299,31 +296,58 @@ func main() {
l.Refresh() // refresh the prompt l.Refresh() // refresh the prompt
go func() { go func() {
var gracefulStop = make(chan os.Signal) var gracefulStop = make(chan os.Signal, 1)
signal.Notify(gracefulStop, os.Interrupt) // listen to all signals signal.Notify(gracefulStop, os.Interrupt) // listen to all signals
for { for {
sig := <-gracefulStop sig := <-gracefulStop
logger.Info("received signal", "signal", sig) logger.Info("received signal", "signal", sig)
if sig.String() == "interrupt" { if sig.String() == "interrupt" {
close(Exit_In_Progress) close(Exit_In_Progress)
return
} }
} }
}() }()
for {
if err = readline_loop(l, chain, logger); err == nil {
break
}
}
logger.Info("Exit in Progress, Please wait")
time.Sleep(100 * time.Millisecond) // give prompt update time to finish
rpcserver.RPCServer_Stop()
p2p.P2P_Shutdown() // shutdown p2p subsystem
chain.Shutdown() // shutdown chain subsysem
for globals.Subsystem_Active > 0 {
logger.Info("Exit in Progress, Please wait.", "active subsystems", globals.Subsystem_Active)
time.Sleep(1000 * time.Millisecond)
}
}
func readline_loop(l *readline.Instance, chain *blockchain.Blockchain, logger logr.Logger) (err error) {
defer func() {
err = globals.Recover(0)
}()
restart_loop:
for { for {
line, err := l.Readline() line, err := l.Readline()
if err == io.EOF {
<-Exit_In_Progress
return nil
}
if err == readline.ErrInterrupt { if err == readline.ErrInterrupt {
if len(line) == 0 { if len(line) == 0 {
logger.Info("Ctrl-C received, Exit in progress") logger.Info("Ctrl-C received, Exit in progress")
close(Exit_In_Progress) return nil
break
} else { } else {
continue continue
} }
} else if err == io.EOF {
<-Exit_In_Progress
break
} }
line = strings.TrimSpace(line) line = strings.TrimSpace(line)
@ -338,84 +362,6 @@ func main() {
case line == "help": case line == "help":
usage(l.Stderr()) usage(l.Stderr())
//
case command == "import_chain": // this migrates existing chain from DERO atlantis to DERO HE
/*
f, err := os.Open("/tmp/raw_export.txt")
if err != nil {
globals.Logger.Warnf("error opening file /tmp/raw_export.txt %s", err)
continue
}
reader := bufio.NewReader(f)
account, _ := walletapi.Generate_Keys_From_Random() // create a random address
for {
line, err = reader.ReadString('\n')
if err != nil || len(line) < 10 {
break
}
var txs []string
err = json.Unmarshal([]byte(line), &txs)
if err != nil {
fmt.Printf("err while unmarshalling json err %s", err)
continue
}
if len(txs) < 1 {
panic("TX cannot be zero")
}
cbl, bl := chain.Create_new_miner_block(account.GetAddress())
for i := range txs {
var tx transaction.Transaction
tx_bytes, err := hex.DecodeString(txs[i])
if err != nil {
globals.Logger.Warnf("TX could not be decoded")
}
err = tx.DeserializeHeader(tx_bytes)
if err != nil {
globals.Logger.Warnf("TX could not be Deserialized")
}
globals.Logger.Infof(" txhash %s", tx.GetHash())
if i == 0 {
bl.Miner_TX = tx
cbl.Bl.Miner_TX = tx
if bl.Miner_TX.GetHash() != tx.GetHash() || cbl.Bl.Miner_TX.GetHash() != tx.GetHash() {
panic("miner TX hash mismatch")
}
} else {
bl.Tx_hashes = append(bl.Tx_hashes, tx.GetHash())
cbl.Bl.Tx_hashes = append(cbl.Bl.Tx_hashes, tx.GetHash())
cbl.Txs = append(cbl.Txs, &tx)
}
}
if err, ok := chain.Add_Complete_Block(cbl); ok {
globals.Logger.Warnf("Block Successfully accepted by chain at height %d", cbl.Bl.Miner_TX.Vin[0].(transaction.Txin_gen).Height)
} else {
globals.Logger.Warnf("Block rejected by chain at height %d, please investigate, err %s", cbl.Bl.Miner_TX.Vin[0].(transaction.Txin_gen).Height,err)
globals.Logger.Warnf("Stopping import")
}
}
globals.Logger.Infof("File imported Successfully")
f.Close()
*/
case command == "profile": // writes cpu and memory profile case command == "profile": // writes cpu and memory profile
// TODO enable profile over http rpc to enable better testing/tracking // TODO enable profile over http rpc to enable better testing/tracking
cpufile, err := os.Create(filepath.Join(globals.GetDataDirectory(), "cpuprofile.prof")) cpufile, err := os.Create(filepath.Join(globals.GetDataDirectory(), "cpuprofile.prof"))
@ -425,6 +371,7 @@ func main() {
} }
if err := pprof.StartCPUProfile(cpufile); err != nil { if err := pprof.StartCPUProfile(cpufile); err != nil {
logger.Error(err, "could not start CPU profile") logger.Error(err, "could not start CPU profile")
continue
} }
logger.Info("CPU profiling will be available after program exits.", "path", filepath.Join(globals.GetDataDirectory(), "cpuprofile.prof")) logger.Info("CPU profiling will be available after program exits.", "path", filepath.Join(globals.GetDataDirectory(), "cpuprofile.prof"))
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
@ -582,7 +529,7 @@ func main() {
} }
} else { } else {
fmt.Printf("print_block needs a single block id as argument\n") fmt.Printf("print_block needs a single block id as argument\n")
break continue
} }
bl, err := chain.Load_BL_FROM_ID(hash) bl, err := chain.Load_BL_FROM_ID(hash)
if err != nil { if err != nil {
@ -641,7 +588,7 @@ func main() {
if err != nil { if err != nil {
fmt.Printf("Tips %s not in our DB", bl.Tips[i]) fmt.Printf("Tips %s not in our DB", bl.Tips[i])
tips_found = false tips_found = false
break continue
} }
} }
@ -687,16 +634,11 @@ func main() {
*/ */
case strings.ToLower(line) == "status": case strings.ToLower(line) == "status":
// fmt.Printf("chain diff %d\n",chain.Get_Difficulty_At_Block(chain.Top_ID))
//fmt.Printf("chain nw rate %d\n", chain.Get_Network_HashRate())
inc, out := p2p.Peer_Direction_Count() inc, out := p2p.Peer_Direction_Count()
mempool_tx_count := len(chain.Mempool.Mempool_List_TX()) mempool_tx_count := len(chain.Mempool.Mempool_List_TX())
regpool_tx_count := len(chain.Regpool.Regpool_List_TX()) regpool_tx_count := len(chain.Regpool.Regpool_List_TX())
//supply := chain.Load_Already_Generated_Coins_for_Topo_Index(nil, chain.Load_TOPO_HEIGHT(nil))
supply := uint64(0) supply := uint64(0)
if supply > (1000000 * 1000000000000) { if supply > (1000000 * 1000000000000) {
@ -733,7 +675,6 @@ func main() {
case strings.ToLower(line) == "peer_list": // print peer list case strings.ToLower(line) == "peer_list": // print peer list
p2p.PeerList_Print() p2p.PeerList_Print()
case strings.ToLower(line) == "sync_info": // print active connections case strings.ToLower(line) == "sync_info": // print active connections
p2p.Connection_Print() p2p.Connection_Print()
case strings.ToLower(line) == "bye": case strings.ToLower(line) == "bye":
@ -742,11 +683,11 @@ func main() {
fallthrough fallthrough
case strings.ToLower(line) == "quit": case strings.ToLower(line) == "quit":
close(Exit_In_Progress) close(Exit_In_Progress)
goto exit return nil
case command == "graphminifull": // renders the graph of miniblocks in memory case command == "graphminifull": // renders the graph of miniblocks in memory
ioutil.WriteFile("/tmp/minidag.dot", []byte(chain.MiniBlocks.Graph()), 0644) ioutil.WriteFile("/tmp/minidag_recent.dot", []byte(chain.MiniBlocks.Graph()), 0644)
logger.Info("Writing mini block graph (from memory) dot format /tmp/minidag.dot\n") logger.Info("Writing mini block graph (from memory) dot format /tmp/minidag_recent.dot\n")
case command == "graphmini": // renders graphs of miniblocks within a block case command == "graphmini": // renders graphs of miniblocks within a block
topo := int64(0) topo := int64(0)
@ -762,28 +703,23 @@ func main() {
continue continue
} }
hash, err := chain.Load_Block_Topological_order_at_index(topo) if hash, err := chain.Load_Block_Topological_order_at_index(topo); err == nil {
if err != nil { if bl, err := chain.Load_BL_FROM_ID(hash); err == nil {
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err) tmp_collection := block.CreateMiniBlockCollection()
continue for _, tmbl := range bl.MiniBlocks {
} if err, ok := tmp_collection.InsertMiniBlock(tmbl); !ok {
bl, err := chain.Load_BL_FROM_ID(hash) fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err)
if err != nil { break restart_loop
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err) }
continue }
} ioutil.WriteFile(fmt.Sprintf("/tmp/minidag_%d.dot", topo), []byte(tmp_collection.Graph()), 0644)
logger.Info("Writing mini block graph dot format /tmp/minidag.dot", "topo", topo)
tmp_collection := block.CreateMiniBlockCollection()
for _, tmbl := range bl.MiniBlocks {
if err, ok := tmp_collection.InsertMiniBlock(tmbl); !ok {
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err)
continue
} }
} }
ioutil.WriteFile("/tmp/minidag.dot", []byte(tmp_collection.Graph()), 0644) if err != nil {
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err)
logger.Info("Writing mini block graph dot format /tmp/minidag.dot", "topo", topo) }
case command == "graph": case command == "graph":
start := int64(0) start := int64(0)
@ -820,14 +756,12 @@ func main() {
WriteBlockChainTree(chain, "/tmp/graph.dot", start, stop) WriteBlockChainTree(chain, "/tmp/graph.dot", start, stop)
case command == "pop": case command == "pop":
switch len(line_parts) { switch len(line_parts) {
case 1: case 1:
chain.Rewind_Chain(1) chain.Rewind_Chain(1)
case 2: case 2:
pop_count := 0 pop_count := 0
if s, err := strconv.Atoi(line_parts[1]); err == nil { if s, err := strconv.Atoi(line_parts[1]); err == nil {
//fmt.Printf("%T, %v", s, s)
pop_count = s pop_count = s
if chain.Rewind_Chain(int(pop_count)) { if chain.Rewind_Chain(int(pop_count)) {
@ -893,19 +827,9 @@ func main() {
logger.Info(fmt.Sprintf("you said: %s", strconv.Quote(line))) logger.Info(fmt.Sprintf("you said: %s", strconv.Quote(line)))
} }
} }
exit:
logger.Info("Exit in Progress, Please wait") return fmt.Errorf("can never reach here")
time.Sleep(100 * time.Millisecond) // give prompt update time to finish
rpcserver.RPCServer_Stop()
p2p.P2P_Shutdown() // shutdown p2p subsystem
chain.Shutdown() // shutdown chain subsysem
for globals.Subsystem_Active > 0 {
logger.Info("Exit in Progress, Please wait.", "active subsystems", globals.Subsystem_Active)
time.Sleep(1000 * time.Millisecond)
}
} }
func writenode(chain *blockchain.Blockchain, w *bufio.Writer, blid crypto.Hash, start_height int64) { // process a node, recursively func writenode(chain *blockchain.Blockchain, w *bufio.Writer, blid crypto.Hash, start_height int64) { // process a node, recursively
@ -993,12 +917,10 @@ func prettyprint_json(b []byte) []byte {
func usage(w io.Writer) { func usage(w io.Writer) {
io.WriteString(w, "commands:\n") io.WriteString(w, "commands:\n")
//io.WriteString(w, completer.Tree(" "))
io.WriteString(w, "\t\033[1mhelp\033[0m\t\tthis help\n") io.WriteString(w, "\t\033[1mhelp\033[0m\t\tthis help\n")
io.WriteString(w, "\t\033[1mdiff\033[0m\t\tShow difficulty\n") io.WriteString(w, "\t\033[1mdiff\033[0m\t\tShow difficulty\n")
io.WriteString(w, "\t\033[1mprint_bc\033[0m\tPrint blockchain info in a given blocks range, print_bc <begin_height> <end_height>\n") io.WriteString(w, "\t\033[1mprint_bc\033[0m\tPrint blockchain info in a given blocks range, print_bc <begin_height> <end_height>\n")
io.WriteString(w, "\t\033[1mprint_block\033[0m\tPrint block, print_block <block_hash> or <block_height>\n") io.WriteString(w, "\t\033[1mprint_block\033[0m\tPrint block, print_block <block_hash> or <block_height>\n")
io.WriteString(w, "\t\033[1mprint_height\033[0m\tPrint local blockchain height\n")
io.WriteString(w, "\t\033[1mprint_tx\033[0m\tPrint transaction, print_tx <transaction_hash>\n") io.WriteString(w, "\t\033[1mprint_tx\033[0m\tPrint transaction, print_tx <transaction_hash>\n")
io.WriteString(w, "\t\033[1mstatus\033[0m\t\tShow general information\n") io.WriteString(w, "\t\033[1mstatus\033[0m\t\tShow general information\n")
io.WriteString(w, "\t\033[1mpeer_list\033[0m\tPrint peer list\n") io.WriteString(w, "\t\033[1mpeer_list\033[0m\tPrint peer list\n")
@ -1020,34 +942,8 @@ func usage(w io.Writer) {
} }
var completer = readline.NewPrefixCompleter( var completer = readline.NewPrefixCompleter(
/* readline.PcItem("mode",
readline.PcItem("vi"),
readline.PcItem("emacs"),
),
readline.PcItem("login"),
readline.PcItem("say",
readline.PcItem("hello"),
readline.PcItem("bye"),
),
readline.PcItem("setprompt"),
readline.PcItem("setpassword"),
readline.PcItem("bye"),
*/
readline.PcItem("help"), readline.PcItem("help"),
/* readline.PcItem("go",
readline.PcItem("build", readline.PcItem("-o"), readline.PcItem("-v")),
readline.PcItem("install",
readline.PcItem("-v"),
readline.PcItem("-vv"),
readline.PcItem("-vvv"),
),
readline.PcItem("test"),
),
readline.PcItem("sleep"),
*/
readline.PcItem("diff"), readline.PcItem("diff"),
//readline.PcItem("dev_verify_pool"),
//readline.PcItem("dev_verify_chain_doublespend"),
readline.PcItem("mempool_flush"), readline.PcItem("mempool_flush"),
readline.PcItem("mempool_delete_tx"), readline.PcItem("mempool_delete_tx"),
readline.PcItem("mempool_print"), readline.PcItem("mempool_print"),
@ -1057,11 +953,8 @@ var completer = readline.NewPrefixCompleter(
readline.PcItem("peer_list"), readline.PcItem("peer_list"),
readline.PcItem("print_bc"), readline.PcItem("print_bc"),
readline.PcItem("print_block"), readline.PcItem("print_block"),
readline.PcItem("print_height"), // readline.PcItem("print_tx"),
readline.PcItem("print_tx"),
readline.PcItem("status"), readline.PcItem("status"),
readline.PcItem("start_mining"),
readline.PcItem("stop_mining"),
readline.PcItem("sync_info"), readline.PcItem("sync_info"),
readline.PcItem("version"), readline.PcItem("version"),
readline.PcItem("bye"), readline.PcItem("bye"),

View File

@ -166,13 +166,6 @@ func GetTransaction(ctx context.Context, p rpc.GetTransaction_Params) (result rp
continue continue
} }
} }
{ // we could not fetch the tx, return an empty string
result.Txs_as_hex = append(result.Txs_as_hex, "")
err = fmt.Errorf("TX NOT FOUND %s", hash)
return
}
} }
result.Status = "OK" result.Status = "OK"

View File

@ -181,7 +181,7 @@ func check_update() {
m1.Id = dns.Id() m1.Id = dns.Id()
m1.RecursionDesired = true m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1) m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{config.DNS_UPDATE_CHECK, dns.TypeTXT, dns.ClassINET} m1.Question[0] = dns.Question{Name: config.DNS_UPDATE_CHECK, Qtype: dns.TypeTXT, Qclass: dns.ClassINET}
packed, err := m1.Pack() packed, err := m1.Pack()
if err != nil { if err != nil {

View File

@ -32,21 +32,21 @@ const PLUGIN_NAME = "pong_server"
const DEST_PORT = uint64(0x1234567812345678) const DEST_PORT = uint64(0x1234567812345678)
var expected_arguments = rpc.Arguments{ var expected_arguments = rpc.Arguments{
{rpc.RPC_DESTINATION_PORT, rpc.DataUint64, DEST_PORT}, {Name: rpc.RPC_DESTINATION_PORT, DataType: rpc.DataUint64, Value: DEST_PORT},
// { rpc.RPC_EXPIRY , rpc.DataTime, time.Now().Add(time.Hour).UTC()}, // { Name:rpc.RPC_EXPIRY , DataType:rpc.DataTime, Value:time.Now().Add(time.Hour).UTC()},
{rpc.RPC_COMMENT, rpc.DataString, "Purchase PONG"}, {Name: rpc.RPC_COMMENT, DataType: rpc.DataString, Value: "Purchase PONG"},
//{"float64", rpc.DataFloat64, float64(0.12345)}, // in atomic units //{"float64", rpc.DataFloat64, float64(0.12345)}, // in atomic units
// {rpc.RPC_NEEDS_REPLYBACK_ADDRESS,rpc.DataUint64,uint64(0)}, // this service will reply to incoming request,so needs the senders address // {Name:rpc.RPC_NEEDS_REPLYBACK_ADDRESS,DataType:rpc.DataUint64,Value:uint64(0)}, // this service will reply to incoming request,so needs the senders address
{rpc.RPC_VALUE_TRANSFER, rpc.DataUint64, uint64(12345)}, // in atomic units {Name: rpc.RPC_VALUE_TRANSFER, DataType: rpc.DataUint64, Value: uint64(12345)}, // in atomic units
} }
// currently the interpreter seems to have a glitch if this gets initialized within the code // currently the interpreter seems to have a glitch if this gets initialized within the code
// see limitations github.com/traefik/yaegi // see limitations github.com/traefik/yaegi
var response = rpc.Arguments{ var response = rpc.Arguments{
{rpc.RPC_DESTINATION_PORT, rpc.DataUint64, uint64(0)}, {Name: rpc.RPC_DESTINATION_PORT, DataType: rpc.DataUint64, Value: uint64(0)},
{rpc.RPC_SOURCE_PORT, rpc.DataUint64, DEST_PORT}, {Name: rpc.RPC_SOURCE_PORT, DataType: rpc.DataUint64, Value: DEST_PORT},
{rpc.RPC_COMMENT, rpc.DataString, "Successfully purchased pong (this could be serial/license key or download link or further)"}, {Name: rpc.RPC_COMMENT, DataType: rpc.DataString, Value: "Successfully purchased pong (this could be serial/license key or download link or further)"},
} }
var rpcClient = jsonrpc.NewClient("http://127.0.0.1:40403/json_rpc") var rpcClient = jsonrpc.NewClient("http://127.0.0.1:40403/json_rpc")

View File

@ -90,7 +90,7 @@ func simulator_chain_start() (*blockchain.Blockchain, *derodrpc.RPCServer, map[s
if err != nil { if err != nil {
panic(err) panic(err)
return nil, nil, nil // return nil, nil, nil
} }
params["chain"] = chain params["chain"] = chain

View File

@ -268,7 +268,7 @@ func main() {
l.Refresh() // refresh the prompt l.Refresh() // refresh the prompt
go func() { go func() {
var gracefulStop = make(chan os.Signal) var gracefulStop = make(chan os.Signal, 1)
signal.Notify(gracefulStop, os.Interrupt) // listen to all signals signal.Notify(gracefulStop, os.Interrupt) // listen to all signals
for { for {
sig := <-gracefulStop sig := <-gracefulStop

View File

@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
// right now it has to be manually changed // right now it has to be manually changed
// do we need to include git commitsha?? // do we need to include git commitsha??
var Version = semver.MustParse("3.4.62-1.DEROHE.STARGATE+11112021") var Version = semver.MustParse("3.4.63-1.DEROHE.STARGATE+12112021")

View File

@ -21,12 +21,9 @@ import "math/big"
//import "crypto/rand" //import "crypto/rand"
import "encoding/hex" import "encoding/hex"
import "golang.org/x/crypto/sha3"
import "github.com/deroproject/derohe/cryptography/bn256" import "github.com/deroproject/derohe/cryptography/bn256"
//import "golang.org/x/crypto/sha3"
import "github.com/deroproject/derohe/cryptography/sha3"
// the original try and increment method A Note on Hashing to BN Curves https://www.normalesup.org/~tibouchi/papers/bnhash-scis.pdf // the original try and increment method A Note on Hashing to BN Curves https://www.normalesup.org/~tibouchi/papers/bnhash-scis.pdf
// see this for a simplified version https://github.com/clearmatics/mobius/blob/7ad988b816b18e22424728329fc2b166d973a120/contracts/bn256g1.sol // see this for a simplified version https://github.com/clearmatics/mobius/blob/7ad988b816b18e22424728329fc2b166d973a120/contracts/bn256g1.sol
@ -105,7 +102,7 @@ var CURVE_B = new(big.Int).SetUint64(3)
// a = (p+1) / 4 // a = (p+1) / 4
var CURVE_A = new(big.Int).Div(new(big.Int).Add(FIELD_ORDER, new(big.Int).SetUint64(1)), new(big.Int).SetUint64(4)) var CURVE_A = new(big.Int).Div(new(big.Int).Add(FIELD_ORDER, new(big.Int).SetUint64(1)), new(big.Int).SetUint64(4))
func HashToPointNew(seed *big.Int) *bn256.G1 { func HashToPoint(seed *big.Int) *bn256.G1 {
y_squared := new(big.Int) y_squared := new(big.Int)
one := new(big.Int).SetUint64(1) one := new(big.Int).SetUint64(1)
@ -183,19 +180,9 @@ func isOnCurve(x, y *big.Int) bool {
return p_cubed.Cmp(new(big.Int).Exp(y, new(big.Int).SetUint64(2), FIELD_ORDER)) == 0 return p_cubed.Cmp(new(big.Int).Exp(y, new(big.Int).SetUint64(2), FIELD_ORDER)) == 0
} }
/*
// this should be merged , simplified just as simple as 25519 // this should be merged , simplified just as simple as 25519
func HashToPoint(seed *big.Int) *bn256.G1 { func HashToPointOld(seed *big.Int) *bn256.G1 {
/*
var x, _ = new(big.Int).SetString("0d36fdf1852f1563df9c904374055bb2a4d351571b853971764b9561ae203a9e",16)
var y, _ = new(big.Int).SetString("06efda2e606d7bafec34b82914953fa253d21ca3ced18db99c410e9057dccd50",16)
fmt.Printf("hardcode point on curve %+v\n", isOnCurve(x,y))
panic("done")
*/
return HashToPointNew(seed)
seed_reduced := new(big.Int) seed_reduced := new(big.Int)
seed_reduced.Mod(seed, FIELD_MODULUS) seed_reduced.Mod(seed, FIELD_MODULUS)
@ -247,4 +234,4 @@ func HashToPoint(seed *big.Int) *bn256.G1 {
} }
return nil return nil
} }*/

View File

@ -16,9 +16,8 @@
package crypto package crypto
//import "golang.org/x/crypto/sha3" import "golang.org/x/crypto/sha3"
import "encoding/binary" import "encoding/binary"
import "github.com/deroproject/derohe/cryptography/sha3"
// quick keccak wrapper // quick keccak wrapper
func Keccak256(data ...[]byte) (result Hash) { func Keccak256(data ...[]byte) (result Hash) {

View File

@ -47,8 +47,16 @@ func (connection *Connection) bootstrap_chain() {
// var err error // var err error
var zerohash crypto.Hash var zerohash crypto.Hash
// peer's chain is only 110 height, so do not bootstrap
if connection.TopoHeight-50-max_request_topoheights < 10 {
connection.logger.Info("fastsync cannot be done as peer's chain has low height")
connection.logger.Info("will do normal sync")
connection.sync_chain()
return
}
// we will request top 60 blocks // we will request top 60 blocks
ctopo := connection.TopoHeight ctopo := connection.TopoHeight - 50 // last 50 blocks have to be synced, this syncing will help us detect error
var topos []int64 var topos []int64
for i := ctopo - (max_request_topoheights - 1); i < ctopo; i++ { for i := ctopo - (max_request_topoheights - 1); i < ctopo; i++ {
topos = append(topos, i) topos = append(topos, i)

View File

@ -210,7 +210,7 @@ func ping_loop() {
var request, response Dummy var request, response Dummy
fill_common(&request.Common) // fill common info fill_common(&request.Common) // fill common info
if c.peer_sent_time.Add(10 * time.Minute).Before(time.Now()) { if c.peer_sent_time.Add(5 * time.Second).Before(time.Now()) {
c.peer_sent_time = time.Now() c.peer_sent_time = time.Now()
request.Common.PeerList = get_peer_list() request.Common.PeerList = get_peer_list()
} }
@ -833,7 +833,7 @@ func trigger_sync() {
//connection.Unlock() //connection.Unlock()
if islagging { if islagging {
if connection.Pruned > chain.Load_Block_Topological_order(chain.Get_Top_ID()) { if connection.Pruned > chain.Load_Block_Topological_order(chain.Get_Top_ID()) && chain.Get_Height() != 0 {
connection.logger.V(1).Info("We cannot resync with the peer, since peer chain is pruned", "height", connection.Height, "pruned", connection.Pruned) connection.logger.V(1).Info("We cannot resync with the peer, since peer chain is pruned", "height", connection.Height, "pruned", connection.Pruned)
continue continue
} }

View File

@ -143,6 +143,7 @@ func (connection *Connection) dispatch_test_handshake() {
// mark connection active // mark connection active
func (c *Connection) Active(req Dummy, dummy *Dummy) error { func (c *Connection) Active(req Dummy, dummy *Dummy) error {
defer handle_connection_panic(c)
c.update(&req.Common) // update common information c.update(&req.Common) // update common information
atomic.StoreUint32(&c.State, ACTIVE) atomic.StoreUint32(&c.State, ACTIVE)
fill_common(&dummy.Common) // fill common info fill_common(&dummy.Common) // fill common info
@ -151,6 +152,7 @@ func (c *Connection) Active(req Dummy, dummy *Dummy) error {
// used to ping pong // used to ping pong
func (c *Connection) Ping(request Dummy, response *Dummy) error { func (c *Connection) Ping(request Dummy, response *Dummy) error {
defer handle_connection_panic(c)
fill_common_T1(&request.Common) fill_common_T1(&request.Common)
c.update(&request.Common) // update common information c.update(&request.Common) // update common information
fill_common(&response.Common) // fill common info fill_common(&response.Common) // fill common info

View File

@ -76,7 +76,7 @@ type Transaction_Prefix struct {
// thereby representing immense scalability and privacy both at the same time // thereby representing immense scalability and privacy both at the same time
// default dero network has id 0 // default dero network has id 0
TransactionType TransactionType `json:"version"` TransactionType TransactionType `json:"txtype"`
Value uint64 `json:"value"` // represents value for premine, SC, BURN transactions Value uint64 `json:"value"` // represents value for premine, SC, BURN transactions
MinerAddress [33]byte `json:"miner_address"` // miner address // 33 bytes also used for registration MinerAddress [33]byte `json:"miner_address"` // miner address // 33 bytes also used for registration
@ -206,6 +206,16 @@ func (tx *Transaction) IsPremine() (result bool) {
return tx.TransactionType == PREMINE return tx.TransactionType == PREMINE
} }
func (tx *Transaction) IsSC() (result bool) {
return tx.TransactionType == SC_TX
}
// if external proof is required
func (tx *Transaction) IsProofRequired() (result bool) {
return (tx.IsCoinbase() || tx.IsRegistration() || tx.IsPremine()) == false
}
func (tx *Transaction) Fees() (fees uint64) { func (tx *Transaction) Fees() (fees uint64) {
var zero_scid [32]byte var zero_scid [32]byte
for i := range tx.Payloads { for i := range tx.Payloads {

View File

@ -225,8 +225,8 @@ func (t *LookupTable) Lookup(p *bn256.G1, previous_balance uint64) (balance uint
} }
panic(fmt.Sprintf("balance not yet found, work done %x", balance)) //panic(fmt.Sprintf("balance not yet found, work done %x", balance))
return balance //return balance
} }
// this should be tuned by anyone using this package // this should be tuned by anyone using this package

View File

@ -857,7 +857,7 @@ func (w *Wallet_Memory) synchistory_block(scid crypto.Hash, topo int64) (err err
// enable sender side proofs // enable sender side proofs
proof := rpc.NewAddressFromKeys((*crypto.Point)(blinder)) proof := rpc.NewAddressFromKeys((*crypto.Point)(blinder))
proof.Proof = true proof.Proof = true
proof.Arguments = rpc.Arguments{{"H", rpc.DataHash, crypto.Hash(shared_key)}, {rpc.RPC_VALUE_TRANSFER, rpc.DataUint64, uint64(entry.Amount - entry.Burn)}} proof.Arguments = rpc.Arguments{{Name: "H", DataType: rpc.DataHash, Value: crypto.Hash(shared_key)}, {Name: rpc.RPC_VALUE_TRANSFER, DataType: rpc.DataUint64, Value: uint64(entry.Amount - entry.Burn)}}
entry.Proof = proof.String() entry.Proof = proof.String()
entry.PayloadType = tx.Payloads[t].RPCType entry.PayloadType = tx.Payloads[t].RPCType
switch tx.Payloads[t].RPCType { switch tx.Payloads[t].RPCType {
@ -915,7 +915,7 @@ func (w *Wallet_Memory) synchistory_block(scid crypto.Hash, topo int64) (err err
// enable receiver side proofs // enable receiver side proofs
proof := rpc.NewAddressFromKeys((*crypto.Point)(blinder)) proof := rpc.NewAddressFromKeys((*crypto.Point)(blinder))
proof.Proof = true proof.Proof = true
proof.Arguments = rpc.Arguments{{"H", rpc.DataHash, crypto.Hash(shared_key)}, {rpc.RPC_VALUE_TRANSFER, rpc.DataUint64, uint64(entry.Amount)}} proof.Arguments = rpc.Arguments{{Name: "H", DataType: rpc.DataHash, Value: crypto.Hash(shared_key)}, {Name: rpc.RPC_VALUE_TRANSFER, DataType: rpc.DataUint64, Value: uint64(entry.Amount)}}
entry.Proof = proof.String() entry.Proof = proof.String()
entry.PayloadType = tx.Payloads[t].RPCType entry.PayloadType = tx.Payloads[t].RPCType

View File

@ -55,13 +55,13 @@ func Transfer(ctx context.Context, p rpc.Transfer_Params) (result rpc.Transfer_R
//fmt.Printf("incoming transfer params %+v\n", p) //fmt.Printf("incoming transfer params %+v\n", p)
if p.SC_Code != "" { if p.SC_Code != "" {
p.SC_RPC = append(p.SC_RPC, rpc.Argument{rpc.SCACTION, rpc.DataUint64, uint64(rpc.SC_INSTALL)}) p.SC_RPC = append(p.SC_RPC, rpc.Argument{Name: rpc.SCACTION, DataType: rpc.DataUint64, Value: uint64(rpc.SC_INSTALL)})
p.SC_RPC = append(p.SC_RPC, rpc.Argument{rpc.SCCODE, rpc.DataString, p.SC_Code}) p.SC_RPC = append(p.SC_RPC, rpc.Argument{Name: rpc.SCCODE, DataType: rpc.DataString, Value: p.SC_Code})
} }
if p.SC_ID != "" { if p.SC_ID != "" {
p.SC_RPC = append(p.SC_RPC, rpc.Argument{rpc.SCACTION, rpc.DataUint64, uint64(rpc.SC_CALL)}) p.SC_RPC = append(p.SC_RPC, rpc.Argument{Name: rpc.SCACTION, DataType: rpc.DataUint64, Value: uint64(rpc.SC_CALL)})
p.SC_RPC = append(p.SC_RPC, rpc.Argument{rpc.SCID, rpc.DataHash, crypto.HashHexToHash(p.SC_ID)}) p.SC_RPC = append(p.SC_RPC, rpc.Argument{Name: rpc.SCID, DataType: rpc.DataHash, Value: crypto.HashHexToHash(p.SC_ID)})
} }
/* /*

View File

@ -93,7 +93,7 @@ func simulator_chain_start() (*blockchain.Blockchain, *derodrpc.RPCServer, map[s
if err != nil { if err != nil {
panic(err) panic(err)
return nil, nil, nil //return nil, nil, nil
} }
params["chain"] = chain params["chain"] = chain

View File

@ -193,7 +193,7 @@ func (w *Wallet_Memory) GetRandomIAddress8() (addr rpc.Address) {
var dstport [8]byte var dstport [8]byte
rand.Read(dstport[:]) rand.Read(dstport[:])
addr.Arguments = rpc.Arguments{{rpc.RPC_DESTINATION_PORT, rpc.DataUint64, binary.BigEndian.Uint64(dstport[:])}} addr.Arguments = rpc.Arguments{{Name: rpc.RPC_DESTINATION_PORT, DataType: rpc.DataUint64, Value: binary.BigEndian.Uint64(dstport[:])}}
return return
} }