DERO HE Stargate Release 17

This commit is contained in:
Captain 2021-11-09 02:31:05 +00:00
parent cf3173c22c
commit 9ef0d71e8c
No known key found for this signature in database
GPG Key ID: 18CDB3ED5E85D2D4
12 changed files with 110 additions and 60 deletions

View File

@ -425,7 +425,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
for _, mbl := range bl.MiniBlocks { for _, mbl := range bl.MiniBlocks {
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(miner_hash) { if !chain.IsAddressHashValid(true, miner_hash) {
err = fmt.Errorf("miner address not registered") err = fmt.Errorf("miner address not registered")
return err, false return err, false
} }

View File

@ -467,7 +467,7 @@ func (chain *Blockchain) Create_new_block_template_mining(miniblock_miner_addres
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(miner_hash) { if !chain.IsAddressHashValid(false, miner_hash) {
logger.V(3).Error(err, "unregistered miner %s", miner_hash) logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins") err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return return
@ -527,14 +527,6 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
return return
} }
var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(miner_hash) {
logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return
}
//fmt.Printf("received miniblock %x block %x\n", miniblock_blob, bl.Serialize()) //fmt.Printf("received miniblock %x block %x\n", miniblock_blob, bl.Serialize())
// lets try to check pow to detect whether the miner is cheating // lets try to check pow to detect whether the miner is cheating
@ -544,6 +536,14 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
return return
} }
var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(true, miner_hash) {
logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return
}
// if we reach here, everything looks ok // if we reach here, everything looks ok
bl.MiniBlocks = append(bl.MiniBlocks, mbl) bl.MiniBlocks = append(bl.MiniBlocks, mbl)
@ -675,14 +675,16 @@ func (chain *Blockchain) ExpandMiniBlockTip(hash crypto.Hash) (result crypto.Has
} }
// it is USED by consensus and p2p whether the miners has is valid // it is USED by consensus and p2p whether the miners has is valid
func (chain *Blockchain) IsAddressHashValid(hashes ...crypto.Hash) (found bool) { func (chain *Blockchain) IsAddressHashValid(skip_cache bool, hashes ...crypto.Hash) (found bool) {
for _, hash := range hashes { // check whether everything could be satisfied via cache if skip_cache {
if _, found := chain.cache_IsAddressHashValid.Get(fmt.Sprintf("%s", hash)); found { for _, hash := range hashes { // check whether everything could be satisfied via cache
goto hard_way // do things the hard way if _, found := chain.cache_IsAddressHashValid.Get(fmt.Sprintf("%s", hash)); !found {
goto hard_way // do things the hard way
}
} }
return true
} }
return true
hard_way: hard_way:
// the block may just have been mined, so we evaluate roughly 25 past blocks to cross check // the block may just have been mined, so we evaluate roughly 25 past blocks to cross check

View File

@ -197,7 +197,7 @@ func (chain *Blockchain) InsertMiniBlock(mbl block.MiniBlock) (err error, result
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(miner_hash) { if !chain.IsAddressHashValid(true, miner_hash) {
logger.V(1).Error(err, "Invalid miner address") logger.V(1).Error(err, "Invalid miner address")
err = fmt.Errorf("Invalid miner address") err = fmt.Errorf("Invalid miner address")
return err, false return err, false

View File

@ -52,12 +52,12 @@ const MAX_RINGSIZE = 128 // <= 128, ringsize will be accepted
// Minimum FEE calculation constants are here // Minimum FEE calculation constants are here
const FEE_PER_KB = uint64(100) // .00100 dero per kb const FEE_PER_KB = uint64(100) // .00100 dero per kb
const MAINNET_BOOTSTRAP_DIFFICULTY = uint64(800) // atlantis mainnet botstrapped at 200 MH/s const MAINNET_BOOTSTRAP_DIFFICULTY = uint64(80000000) // atlantis mainnet botstrapped at 80 MH/s
const MAINNET_MINIMUM_DIFFICULTY = uint64(800) // 800 H/s const MAINNET_MINIMUM_DIFFICULTY = uint64(800000000) // 80 MH/s
// testnet bootstraps at 1 MH // testnet bootstraps at 1 MH
const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(10000) // testnet bootstrap at 100 H/s const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(50000) // testnet bootstrap at 50KH/s
const TESTNET_MINIMUM_DIFFICULTY = uint64(10000) // 100 H/s const TESTNET_MINIMUM_DIFFICULTY = uint64(10000) // 10KH/s
// this single parameter controls lots of various parameters // this single parameter controls lots of various parameters
// within the consensus, it should never go below 7 // within the consensus, it should never go below 7
@ -97,7 +97,7 @@ var Mainnet = CHAIN_CONFIG{Name: "mainnet",
} }
var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0 var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0
Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x40, 0x00, 0x00, 0x00}), Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x44, 0x00, 0x00, 0x00}),
P2P_Default_Port: 40401, P2P_Default_Port: 40401,
RPC_Default_Port: 40402, RPC_Default_Port: 40402,
Wallet_RPC_Default_Port: 40403, Wallet_RPC_Default_Port: 40403,

View File

@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
// right now it has to be manually changed // right now it has to be manually changed
// do we need to include git commitsha?? // do we need to include git commitsha??
var Version = semver.MustParse("3.4.38-1.DEROHE.STARGATE+08112021") var Version = semver.MustParse("3.4.45-1.DEROHE.STARGATE+08112021")

View File

@ -179,6 +179,46 @@ var execution_tests_functions = []struct {
nil, nil,
Variable{Type: String, ValueString: string(decodeHex("41FB"))}, Variable{Type: String, ValueString: string(decodeHex("41FB"))},
}, },
{
"substr()",
`Function TestRun(input String) String
30 return substr(input,0,5)
End Function`,
"TestRun",
map[string]interface{}{"input": string("0123456789")},
nil,
Variable{Type: String, ValueString: string("01234")},
},
{
"substr()",
`Function TestRun(input String) String
30 return substr(input,1,5)
End Function`,
"TestRun",
map[string]interface{}{"input": string("0123456789")},
nil,
Variable{Type: String, ValueString: string("12345")},
},
{
"substr()",
`Function TestRun(input String) String
30 return substr(input,1,129)
End Function`,
"TestRun",
map[string]interface{}{"input": string("0123456789")},
nil,
Variable{Type: String, ValueString: string("123456789")},
},
{
"substr()",
`Function TestRun(input String) String
30 return substr(input,13,129)
End Function`,
"TestRun",
map[string]interface{}{"input": string("0123456789")},
nil,
Variable{Type: String, ValueString: string("")},
},
} }
func decodeHex(s string) []byte { func decodeHex(s string) []byte {

View File

@ -173,6 +173,8 @@ func (connection *Connection) process_object_response(response Objects, sent int
processing_complete <- true processing_complete <- true
}() }()
defer globals.Recover(2)
for i := 0; i < len(response.CBlocks); i++ { // process incoming full blocks for i := 0; i < len(response.CBlocks); i++ { // process incoming full blocks
var cbl block.Complete_Block // parse incoming block and deserialize it var cbl block.Complete_Block // parse incoming block and deserialize it
var bl block.Block var bl block.Block

View File

@ -104,37 +104,36 @@ func (connection *Connection) feed_chunk(chunk *Block_Chunk, sent int64) error {
var bl block.Block var bl block.Block
if err := bl.Deserialize(chunk.BLOCK); err != nil {
logger.V(1).Error(err, "error deserializing block")
return nil
}
if bl.GetHash() != chunk.BLID {
return fmt.Errorf("Corrupted Chunk. bad block data")
}
// we must check the Pow now
if int64(bl.Height) >= chain.Get_Height()-3 && int64(bl.Height) <= chain.Get_Height()+3 {
} else {
return nil // we need not broadcast
}
if len(bl.Tips) == 0 || len(bl.MiniBlocks) < 5 {
return nil
}
for _, mbl := range bl.MiniBlocks {
if !chain.VerifyMiniblockPoW(&bl, mbl) {
return errormsg.ErrInvalidPoW
}
}
broadcast_Chunk(chunk, 0, sent) // broadcast chunk INV
var chunks *Chunks_Per_Block_Data var chunks *Chunks_Per_Block_Data
if chunksi, ok := chunk_map.Load(chunk.HHash); ok { if chunksi, ok := chunk_map.Load(chunk.HHash); ok {
chunks = chunksi.(*Chunks_Per_Block_Data) chunks = chunksi.(*Chunks_Per_Block_Data)
} else { } else {
if err := bl.Deserialize(chunk.BLOCK); err != nil {
logger.V(1).Error(err, "error deserializing block")
return nil
}
if bl.GetHash() != chunk.BLID {
return fmt.Errorf("Corrupted Chunk. bad block data")
}
// we must check the Pow now
if int64(bl.Height) >= chain.Get_Height()-3 && int64(bl.Height) <= chain.Get_Height()+3 {
} else {
return nil // we need not broadcast
}
if len(bl.Tips) == 0 || len(bl.MiniBlocks) < 5 {
return nil
}
for _, mbl := range bl.MiniBlocks {
if !chain.VerifyMiniblockPoW(&bl, mbl) {
return errormsg.ErrInvalidPoW
}
}
chunks = new(Chunks_Per_Block_Data) chunks = new(Chunks_Per_Block_Data)
chunks.Created = time.Now() chunks.Created = time.Now()
chunk_map.Store(chunk.HHash, chunks) chunk_map.Store(chunk.HHash, chunks)
@ -144,6 +143,10 @@ func (connection *Connection) feed_chunk(chunk *Block_Chunk, sent int64) error {
return nil return nil
} }
if chunks.Chunks[chunk.CHUNK_ID] == nil {
broadcast_Chunk(chunk, 0, sent) // broadcast chunk INV
}
chunks.Lock() chunks.Lock()
defer chunks.Unlock() defer chunks.Unlock()

View File

@ -803,8 +803,6 @@ func (connection *Connection) isConnectionSyncing() (count int) {
func trigger_sync() { func trigger_sync() {
defer globals.Recover(3) defer globals.Recover(3)
topoheight := chain.Load_Block_Topological_order(chain.Get_Top_ID())
unique_map := UniqueConnections() unique_map := UniqueConnections()
var clist []*Connection var clist []*Connection
@ -822,12 +820,14 @@ func trigger_sync() {
for _, connection := range clist { for _, connection := range clist {
height := chain.Get_Height()
//connection.Lock() recursive mutex are not suported //connection.Lock() recursive mutex are not suported
// only choose highest available peers for syncing // only choose highest available peers for syncing
if atomic.LoadUint32(&connection.State) != HANDSHAKE_PENDING && topoheight <= atomic.LoadInt64(&connection.TopoHeight) { // skip pre-handshake connections if atomic.LoadUint32(&connection.State) != HANDSHAKE_PENDING && height < atomic.LoadInt64(&connection.Height) { // skip pre-handshake connections
// check whether we are lagging with this connection // check whether we are lagging with this connection
//connection.Lock() //connection.Lock()
islagging := topoheight < atomic.LoadInt64(&connection.TopoHeight) islagging := height < atomic.LoadInt64(&connection.Height)
//fmt.Printf("checking cdiff is lagging %+v topoheight %d peer topoheight %d \n", islagging, topoheight, connection.TopoHeight) //fmt.Printf("checking cdiff is lagging %+v topoheight %d peer topoheight %d \n", islagging, topoheight, connection.TopoHeight)
@ -840,15 +840,18 @@ func trigger_sync() {
continue continue
} }
if connection.Height >= (chain.Get_Height() + 1) { // give ourselves one sec, maybe the block is just being written if connection.Height > chain.Get_Height() { // give ourselves one sec, maybe the block is just being written
time.Sleep(time.Second) time.Sleep(time.Second)
islagging = topoheight < atomic.LoadInt64(&connection.TopoHeight) // we only use topoheight, since pruned chain might not have full cdiff height := chain.Get_Height()
islagging = height < atomic.LoadInt64(&connection.Height) // we only use topoheight, since pruned chain might not have full cdiff
} else {
continue
} }
if islagging { if islagging {
//connection.Lock() //connection.Lock()
connection.logger.V(1).Info("We need to resync with the peer", "height", connection.Height, "pruned", connection.Pruned) connection.logger.V(1).Info("We need to resync with the peer", "our_height", height, "height", connection.Height, "pruned", connection.Pruned)
//connection.Unlock() //connection.Unlock()
// set mode to syncronising // set mode to syncronising

View File

@ -202,7 +202,7 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(miner_hash) { if !chain.IsAddressHashValid(false, miner_hash) { // this will use cache
c.logger.V(3).Error(err, "unregistered miner") c.logger.V(3).Error(err, "unregistered miner")
return fmt.Errorf("unregistered miner") return fmt.Errorf("unregistered miner")
} }

View File

@ -105,7 +105,7 @@ func (l *leaf) Get(store *Store, keyhash [HASHSIZE]byte) ([]byte, error) {
return l.value, nil return l.value, nil
} }
return nil, xerrors.Errorf("%w: collision, keyhash %x not found", ErrNotFound, keyhash) return nil, xerrors.Errorf("%w: collision, keyhash %x not found, keyhash in ram %x", ErrNotFound, keyhash,l.keyhash)
} }
func (l *leaf) Delete(store *Store, keyhash [HASHSIZE]byte) (bool, bool, error) { func (l *leaf) Delete(store *Store, keyhash [HASHSIZE]byte) (bool, bool, error) {

View File

@ -77,7 +77,7 @@ func (l *leaf) GetKeyValue(store *Store, keyhash [HASHSIZE]byte, valid_bit_count
return used_bit_count, l.key, l.value, nil return used_bit_count, l.key, l.value, nil
} }
return used_bit_count, nil, nil, xerrors.Errorf("%w: collision, keyhash %x not found", ErrNotFound, keyhash) return used_bit_count, nil, nil, xerrors.Errorf("%w: collision, keyhash %x not found, inram hash %x, used_bit_count %d", ErrNotFound, keyhash,l.keyhash,used_bit_count)
} }
// sets a root for the cursor, so the cursor visits only a specific prefix keys // sets a root for the cursor, so the cursor visits only a specific prefix keys