DERO-HE STARGATE Testnet Release30

This commit is contained in:
Captain 2021-11-27 04:25:17 +00:00
parent 4501db42a4
commit b98d31ed18
No known key found for this signature in database
GPG Key ID: 18CDB3ED5E85D2D4
26 changed files with 692 additions and 1357 deletions

View File

@ -55,11 +55,20 @@ type Complete_Block struct {
// this has been simplified and varint length has been removed // this has been simplified and varint length has been removed
// keccak hash of entire block including miniblocks, gives the block id // keccak hash of entire block including miniblocks, gives the block id
func (bl *Block) GetHash() (hash crypto.Hash) { func (bl *Block) GetHash() (hash crypto.Hash) {
return sha3.Sum256(bl.serialize(true)) return sha3.Sum256(bl.serialize(false))
} }
func (bl *Block) GetHashWithoutMiniBlocks() (hash crypto.Hash) { func (bl *Block) GetHashSkipLastMiniBlock() (hash crypto.Hash) {
return sha3.Sum256(bl.SerializeWithoutMiniBlocks()) return sha3.Sum256(bl.SerializeWithoutLastMiniBlock())
}
// serialize entire block ( block_header + miner_tx + tx_list )
func (bl *Block) Serialize() []byte {
return bl.serialize(false) // include mini blocks
}
func (bl *Block) SerializeWithoutLastMiniBlock() []byte {
return bl.serialize(true) //skip last mini block
} }
// get timestamp, it has millisecond granularity // get timestamp, it has millisecond granularity
@ -87,7 +96,7 @@ func (bl Block) String() string {
} }
// this function serializes a block and skips miniblocks is requested // this function serializes a block and skips miniblocks is requested
func (bl *Block) serialize(includeminiblocks bool) []byte { func (bl *Block) serialize(skiplastminiblock bool) []byte {
var serialized bytes.Buffer var serialized bytes.Buffer
@ -117,13 +126,27 @@ func (bl *Block) serialize(includeminiblocks bool) []byte {
serialized.Write(hash[:]) serialized.Write(hash[:])
} }
if includeminiblocks { if len(bl.MiniBlocks) == 0 {
n = binary.PutUvarint(buf, uint64(len(bl.MiniBlocks))) serialized.WriteByte(0)
serialized.Write(buf[:n]) } else {
if skiplastminiblock == false {
n = binary.PutUvarint(buf, uint64(len(bl.MiniBlocks)))
serialized.Write(buf[:n])
for _, mblock := range bl.MiniBlocks {
s := mblock.Serialize()
serialized.Write(s[:])
}
} else {
length := len(bl.MiniBlocks) - 1
n = binary.PutUvarint(buf, uint64(length))
serialized.Write(buf[:n])
for i := 0; i < length; i++ {
s := bl.MiniBlocks[i].Serialize()
serialized.Write(s[:])
}
for _, mblock := range bl.MiniBlocks {
s := mblock.Serialize()
serialized.Write(s[:])
} }
} }
@ -138,15 +161,6 @@ func (bl *Block) serialize(includeminiblocks bool) []byte {
} }
// serialize entire block ( block_header + miner_tx + tx_list )
func (bl *Block) Serialize() []byte {
return bl.serialize(true) // include mini blocks
}
func (bl *Block) SerializeWithoutMiniBlocks() []byte {
return bl.serialize(false) // do not include mini blocks
}
// get block transactions tree hash // get block transactions tree hash
func (bl *Block) GetTipsHash() (result crypto.Hash) { func (bl *Block) GetTipsHash() (result crypto.Hash) {
h := sha3.New256() // add all the remaining hashes h := sha3.New256() // add all the remaining hashes

View File

@ -17,9 +17,9 @@
package block package block
import "fmt" import "fmt"
import "time"
import "hash" import "hash"
import "sync" import "sync"
import "bytes"
import "strings" import "strings"
import "encoding/binary" import "encoding/binary"
@ -28,49 +28,52 @@ import "golang.org/x/crypto/sha3"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
import "github.com/deroproject/derohe/pow" import "github.com/deroproject/derohe/pow"
const MINIBLOCK_SIZE = 68 const MINIBLOCK_SIZE = 48
var hasherPool = sync.Pool{ var hasherPool = sync.Pool{
New: func() interface{} { return sha3.New256() }, New: func() interface{} { return sha3.New256() },
} }
// it should be exactly 68 bytes after serialization // it should be exactly 48 bytes after serialization
// structure size 1 + 6 + 4 + 4 + 16 +32 + 5 bytes // structure size 1 + 2 + 5 + 8 + 16 + 16 bytes
type MiniBlock struct { type MiniBlock struct {
// all the below 3 fields are serialized into single byte // below 3 fields are serialized into single byte
Version uint8 // 1 byte // lower 5 bits (0,1,2,3,4) Version uint8 // 1 byte // lower 5 bits (0,1,2,3,4)
Odd bool // 1 bit flag, bit 4 Final bool // bit 5
Genesis bool // 1 bit flag, bit 5
PastCount uint8 // previous count // bits 6,7 PastCount uint8 // previous count // bits 6,7
Timestamp uint64 // 6 bytes millisecond precision, serialized in 6 bytes, Timestamp uint16 // can represent time from first block
// can represent time till 2121-04-11 11:53:25 Height uint64 // 5 bytes serialized in 5 bytes,
Past [2]uint32 // 4 bytes used to build DAG of miniblocks and prevent number of attacks
Past [2]uint32 // 8 bytes used to build DAG of miniblocks and prevent number of attacks
KeyHash crypto.Hash // 16 bytes, remaining bytes are trimmed miniblock miner keyhash KeyHash crypto.Hash // 16 bytes, remaining bytes are trimmed miniblock miner keyhash
Check crypto.Hash // in non genesis,32 bytes this is XOR of hash of TXhashes and block header hash
// in genesis, this represents 8 bytes height, 12 bytes of first tip, 12 bytes of second tip
Nonce [5]byte // 5 nonce byte represents 2^40 variations, 2^40 work every ms
// below fields are never serialized and are placed here for easier processing/documentation Flags uint32 // can be used as flags by special miners to represent something, also used as nonce
Distance uint32 // distance to tip block Nonce [3]uint32 // 12 nonce byte represents 2^96 variations, 2^96 work every ms
PastMiniBlocks []MiniBlock // pointers to past }
Height int64 // which height
type MiniBlockKey struct {
Height uint64
Past0 uint32
Past1 uint32
}
func (mbl *MiniBlock) GetKey() (key MiniBlockKey) {
key.Height = mbl.Height
key.Past0 = mbl.Past[0]
key.Past1 = mbl.Past[1]
return
} }
func (mbl MiniBlock) String() string { func (mbl MiniBlock) String() string {
r := new(strings.Builder) r := new(strings.Builder)
fmt.Fprintf(r, "%08x %d ", mbl.GetMiniID(), mbl.Version) fmt.Fprintf(r, "%d ", mbl.Version)
if mbl.Genesis { fmt.Fprintf(r, "height %d", mbl.Height)
fmt.Fprintf(r, "GENESIS height %d", int64(binary.BigEndian.Uint64(mbl.Check[:])))
} else {
fmt.Fprintf(r, "NORMAL ")
}
if mbl.Odd { if mbl.Final {
fmt.Fprintf(r, " Odd ") fmt.Fprintf(r, " Final ")
} }
if mbl.PastCount == 1 { if mbl.PastCount == 1 {
@ -79,23 +82,12 @@ func (mbl MiniBlock) String() string {
fmt.Fprintf(r, " Past [%08x %08x]", mbl.Past[0], mbl.Past[1]) fmt.Fprintf(r, " Past [%08x %08x]", mbl.Past[0], mbl.Past[1])
} }
fmt.Fprintf(r, " time %d", mbl.Timestamp) fmt.Fprintf(r, " time %d", mbl.Timestamp)
fmt.Fprintf(r, " flags %d", mbl.Flags)
fmt.Fprintf(r, " Nonce [%08x %08x %08x]", mbl.Nonce[0], mbl.Nonce[1], mbl.Nonce[2])
return r.String() return r.String()
} }
func (mbl *MiniBlock) GetTimestamp() time.Time {
return time.Unix(0, int64(mbl.Timestamp*uint64(time.Millisecond)))
}
//func (mbl *MiniBlock) SetTimestamp(t time.Time) {
// mbl.Timestamp = uint64(t.UTC().UnixMilli())
//}
func (mbl *MiniBlock) GetMiniID() uint32 {
h := mbl.GetHash()
return binary.BigEndian.Uint32(h[:])
}
// this function gets the block identifier hash, this is only used to deduplicate mini blocks // this function gets the block identifier hash, this is only used to deduplicate mini blocks
func (mbl *MiniBlock) GetHash() (result crypto.Hash) { func (mbl *MiniBlock) GetHash() (result crypto.Hash) {
ser := mbl.Serialize() ser := mbl.Serialize()
@ -115,33 +107,8 @@ func (mbl *MiniBlock) GetPoWHash() (hash crypto.Hash) {
return pow.Pow(mbl.Serialize()) return pow.Pow(mbl.Serialize())
} }
func (mbl *MiniBlock) HasPid(pid uint32) bool {
switch mbl.PastCount {
case 0:
return false
case 1:
if mbl.Past[0] == pid {
return true
} else {
return false
}
case 2:
if mbl.Past[0] == pid || mbl.Past[1] == pid {
return true
} else {
return false
}
default:
panic("not supported")
}
}
func (mbl *MiniBlock) SanityCheck() error { func (mbl *MiniBlock) SanityCheck() error {
if mbl.Version >= 32 { if mbl.Version >= 31 {
return fmt.Errorf("version not supported") return fmt.Errorf("version not supported")
} }
if mbl.PastCount > 2 { if mbl.PastCount > 2 {
@ -150,94 +117,88 @@ func (mbl *MiniBlock) SanityCheck() error {
if mbl.PastCount == 0 { if mbl.PastCount == 0 {
return fmt.Errorf("miniblock must have tips") return fmt.Errorf("miniblock must have tips")
} }
if mbl.Height >= 0xffffffffff {
return fmt.Errorf("miniblock height not possible")
}
if mbl.PastCount == 2 && mbl.Past[0] == mbl.Past[1] {
return fmt.Errorf("tips cannot collide")
}
return nil return nil
} }
// serialize entire block ( 64 bytes ) // serialize entire block ( 64 bytes )
func (mbl *MiniBlock) Serialize() (result []byte) { func (mbl *MiniBlock) Serialize() (result []byte) {
result = make([]byte, MINIBLOCK_SIZE, MINIBLOCK_SIZE)
var scratch [8]byte
if err := mbl.SanityCheck(); err != nil { if err := mbl.SanityCheck(); err != nil {
panic(err) panic(err)
} }
result[0] = mbl.Version | mbl.PastCount<<6 var b bytes.Buffer
if mbl.Final {
if mbl.Odd { b.WriteByte(mbl.Version | mbl.PastCount<<6 | 0x20)
result[0] |= 1 << 4 } else {
} b.WriteByte(mbl.Version | mbl.PastCount<<6)
if mbl.Genesis {
result[0] |= 1 << 5
} }
binary.BigEndian.PutUint64(scratch[:], mbl.Timestamp) binary.Write(&b, binary.BigEndian, mbl.Timestamp)
copy(result[1:], scratch[2:]) // 1 + 6
for i, v := range mbl.Past { var scratch [8]byte
binary.BigEndian.PutUint32(result[7+i*4:], v) binary.BigEndian.PutUint64(scratch[:], mbl.Height)
b.Write(scratch[3:8]) // 1 + 5
for _, v := range mbl.Past {
binary.Write(&b, binary.BigEndian, v)
} }
copy(result[1+6+4+4:], mbl.KeyHash[:16]) // 1 + 6 + 4 + 4 + 16 b.Write(mbl.KeyHash[:16])
copy(result[1+6+4+4+16:], mbl.Check[:]) // 1 + 6 + 4 + 4 + 16 + 32 binary.Write(&b, binary.BigEndian, mbl.Flags)
copy(result[1+6+4+4+16+32:], mbl.Nonce[:]) // 1 + 6 + 4 + 4 + 16 + 32 + 5 = 68 bytes for _, v := range mbl.Nonce {
binary.Write(&b, binary.BigEndian, v)
}
return result return b.Bytes()
} }
//parse entire block completely //parse entire block completely
func (mbl *MiniBlock) Deserialize(buf []byte) (err error) { func (mbl *MiniBlock) Deserialize(buf []byte) (err error) {
var scratch [8]byte
if len(buf) < MINIBLOCK_SIZE { if len(buf) < MINIBLOCK_SIZE {
return fmt.Errorf("Expected %d bytes. Actual %d", MINIBLOCK_SIZE, len(buf)) return fmt.Errorf("Expected %d bytes. Actual %d", MINIBLOCK_SIZE, len(buf))
} }
if mbl.Version = buf[0] & 0xf; mbl.Version != 1 { if mbl.Version = buf[0] & 0x1f; mbl.Version != 1 {
return fmt.Errorf("unknown version '%d'", mbl.Version) return fmt.Errorf("unknown version '%d'", mbl.Version)
} }
mbl.PastCount = buf[0] >> 6 mbl.PastCount = buf[0] >> 6
if buf[0]&0x10 > 0 {
mbl.Odd = true
}
if buf[0]&0x20 > 0 { if buf[0]&0x20 > 0 {
mbl.Genesis = true mbl.Final = true
}
mbl.Timestamp = binary.BigEndian.Uint16(buf[1:])
mbl.Height = binary.BigEndian.Uint64(buf[0:]) & 0x000000ffffffffff
var b bytes.Buffer
b.Write(buf[8:])
for i := range mbl.Past {
if err = binary.Read(&b, binary.BigEndian, &mbl.Past[i]); err != nil {
return
}
} }
if err = mbl.SanityCheck(); err != nil { if err = mbl.SanityCheck(); err != nil {
return err return err
} }
if len(buf) != MINIBLOCK_SIZE { b.Read(mbl.KeyHash[:16])
return fmt.Errorf("Expecting %d bytes", MINIBLOCK_SIZE) if err = binary.Read(&b, binary.BigEndian, &mbl.Flags); err != nil {
return
} }
copy(scratch[2:], buf[1:]) for i := range mbl.Nonce {
mbl.Timestamp = binary.BigEndian.Uint64(scratch[:]) if err = binary.Read(&b, binary.BigEndian, &mbl.Nonce[i]); err != nil {
return
for i := range mbl.Past { }
mbl.Past[i] = binary.BigEndian.Uint32(buf[7+i*4:])
} }
copy(mbl.KeyHash[:], buf[15:15+16])
copy(mbl.Check[:], buf[15+16:])
copy(mbl.Nonce[:], buf[15+16+32:])
mbl.Height = int64(binary.BigEndian.Uint64(mbl.Check[:]))
return return
} }
// checks for basic sanity
func (mbl *MiniBlock) IsSafe() bool {
id := mbl.GetMiniID()
if id == mbl.Past[0] {
//return fmt.Errorf("Self Collision")
return false
}
if mbl.PastCount == 2 && id == mbl.Past[1] {
//return fmt.Errorf("Self Collision")
return false
}
return true
}

View File

@ -16,7 +16,6 @@
package block package block
import "time"
import "bytes" import "bytes"
import "testing" import "testing"
import "crypto/rand" import "crypto/rand"
@ -25,11 +24,7 @@ func Test_blockmini_serde(t *testing.T) {
var random_data [MINIBLOCK_SIZE]byte var random_data [MINIBLOCK_SIZE]byte
random_data[0] = 0xa1 random_data[0] = 0x41
for i := byte(1); i < 32; i++ {
random_data[i] = 0
}
var bl, bl2 MiniBlock var bl, bl2 MiniBlock
if err := bl2.Deserialize(random_data[:]); err != nil { if err := bl2.Deserialize(random_data[:]); err != nil {
@ -43,8 +38,6 @@ func Test_blockmini_serde(t *testing.T) {
t.Fatalf("error during serdes %x", random_data) t.Fatalf("error during serdes %x", random_data)
} }
//t.Logf("bl1 %+v\n",bl)
} }
func Test_blockmini_serdes(t *testing.T) { func Test_blockmini_serdes(t *testing.T) {
@ -55,7 +48,7 @@ func Test_blockmini_serdes(t *testing.T) {
if _, err := rand.Read(random_data[:]); err != nil { if _, err := rand.Read(random_data[:]); err != nil {
t.Fatalf("error reading random number %s", err) t.Fatalf("error reading random number %s", err)
} }
random_data[0] = 0xa1 random_data[0] = 0x41
var bl, bl2 MiniBlock var bl, bl2 MiniBlock
@ -78,29 +71,3 @@ func Test_blockmini_serdes(t *testing.T) {
} }
} }
// test all invalid edge cases, which will return error
func Test_Representable_Time(t *testing.T) {
var bl, bl2 MiniBlock
bl.Version = 1
bl.PastCount = 2
bl.Timestamp = 0xffffffffffff
serialized := bl.Serialize()
if err := bl2.Deserialize(serialized); err != nil {
t.Fatalf("error during serdes")
}
if bl.Timestamp != bl2.Timestamp {
t.Fatalf("timestamp corruption")
}
timestamp := time.Unix(0, int64(bl.Timestamp*uint64(time.Millisecond)))
if timestamp.Year() != 2121 {
t.Fatalf("corruption in timestamp representing year 2121")
}
t.Logf("time representable is %s\n", timestamp.UTC())
}

View File

@ -19,34 +19,27 @@ package block
import "fmt" import "fmt"
import "sort" import "sort"
import "sync" import "sync"
import "strings"
//import "runtime/debug"
import "encoding/binary"
//import "golang.org/x/crypto/sha3"
import "github.com/deroproject/derohe/cryptography/crypto"
//import "github.com/deroproject/derohe/astrobwt"
type MiniBlocksCollection struct { type MiniBlocksCollection struct {
Collection map[uint32]MiniBlock Collection map[MiniBlockKey][]MiniBlock
sync.RWMutex sync.RWMutex
} }
// create a collection // create a collection
func CreateMiniBlockCollection() *MiniBlocksCollection { func CreateMiniBlockCollection() *MiniBlocksCollection {
return &MiniBlocksCollection{Collection: map[uint32]MiniBlock{}} return &MiniBlocksCollection{Collection: map[MiniBlockKey][]MiniBlock{}}
} }
// purge all heights less than this height // purge all heights less than this height
func (c *MiniBlocksCollection) PurgeHeight(height int64) (purge_count int) { func (c *MiniBlocksCollection) PurgeHeight(height int64) (purge_count int) {
if height < 0 {
return
}
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
for k, mbl := range c.Collection { for k, _ := range c.Collection {
if mbl.Height <= height { if k.Height <= uint64(height) {
purge_count++ purge_count++
delete(c.Collection, k) delete(c.Collection, k)
} }
@ -55,9 +48,14 @@ func (c *MiniBlocksCollection) PurgeHeight(height int64) (purge_count int) {
} }
func (c *MiniBlocksCollection) Count() int { func (c *MiniBlocksCollection) Count() int {
c.Lock() c.RLock()
defer c.Unlock() defer c.RUnlock()
return len(c.Collection) count := 0
for _, v := range c.Collection {
count += len(v)
}
return count
} }
// check if already inserted // check if already inserted
@ -67,432 +65,65 @@ func (c *MiniBlocksCollection) IsAlreadyInserted(mbl MiniBlock) bool {
// check if collision will occur // check if collision will occur
func (c *MiniBlocksCollection) IsCollision(mbl MiniBlock) bool { func (c *MiniBlocksCollection) IsCollision(mbl MiniBlock) bool {
uid := mbl.GetMiniID()
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if _, ok := c.Collection[uid]; ok { return c.isCollisionnolock(mbl)
//fmt.Printf("collision uid %08X %s %08X %s stack %s\n",uid,mbl.GetHash(), col.GetMiniID(), col.GetHash(),debug.Stack()) }
return true
// this assumes that we are already locked
func (c *MiniBlocksCollection) isCollisionnolock(mbl MiniBlock) bool {
mbls := c.Collection[mbl.GetKey()]
for i := range mbls {
if mbl == mbls[i] {
return true
}
} }
return false return false
} }
// check whether the miniblock is connected
func (c *MiniBlocksCollection) IsConnected(mbl MiniBlock) bool {
if mbl.Genesis {
return true
}
c.RLock()
defer c.RUnlock()
for i := uint8(0); i < mbl.PastCount; i++ {
if _, ok := c.Collection[mbl.Past[i]]; !ok {
return false
}
}
return true
}
// get distance from base
func (c *MiniBlocksCollection) CalculateDistance(mbl MiniBlock) uint32 {
if mbl.Genesis {
return 0
}
c.RLock()
defer c.RUnlock()
max_distance := uint32(0)
for i := uint8(0); i < mbl.PastCount; i++ {
if prev, ok := c.Collection[mbl.Past[i]]; !ok {
panic("past should be present")
} else if prev.Distance > max_distance {
max_distance = prev.Distance
}
}
return max_distance
}
func (c *MiniBlocksCollection) Get(id uint32) (mbl MiniBlock) {
c.RLock()
defer c.RUnlock()
var ok bool
if mbl, ok = c.Collection[id]; !ok {
panic("id requested should be present")
}
return mbl
}
// insert a miniblock // insert a miniblock
func (c *MiniBlocksCollection) InsertMiniBlock(mbl MiniBlock) (err error, result bool) { func (c *MiniBlocksCollection) InsertMiniBlock(mbl MiniBlock) (err error, result bool) {
if c.IsCollision(mbl) { if mbl.Final {
return fmt.Errorf("collision %x", mbl.Serialize()), false return fmt.Errorf("Final cannot be inserted"), false
} }
if !c.IsConnected(mbl) {
return fmt.Errorf("not connected"), false
}
if mbl.Genesis && mbl.Odd {
return fmt.Errorf("genesis cannot be odd height"), false
}
prev_distance := c.CalculateDistance(mbl)
hash := mbl.GetHash()
uid := binary.BigEndian.Uint32(hash[:])
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if _, ok := c.Collection[uid]; ok { if c.isCollisionnolock(mbl) {
return fmt.Errorf("collision1"), false return fmt.Errorf("collision %x", mbl.Serialize()), false
} }
if mbl.Genesis { c.Collection[mbl.GetKey()] = append(c.Collection[mbl.GetKey()], mbl)
mbl.Height = int64(binary.BigEndian.Uint64(mbl.Check[:]))
} else {
for i := uint8(0); i < mbl.PastCount; i++ {
if prev, ok := c.Collection[mbl.Past[i]]; !ok {
return fmt.Errorf("no past found"), false
} else {
if mbl.Timestamp < prev.Timestamp {
return fmt.Errorf("timestamp less than parent"), false // childs timestamp cannot be less than parent, atleast one is fudging
}
mbl.PastMiniBlocks = append(mbl.PastMiniBlocks, prev)
mbl.Height = prev.Height
}
}
if mbl.Odd != (prev_distance%2 == 1) {
return fmt.Errorf("invalid odd status prev %d odd %+v", prev_distance, mbl.Odd), false
}
mbl.Distance = prev_distance + 1
}
c.Collection[uid] = mbl
return nil, true return nil, true
} }
// get all the genesis blocks // get all the genesis blocks
func (c *MiniBlocksCollection) GetAllGenesisMiniBlocks() (mbls []MiniBlock) { func (c *MiniBlocksCollection) GetAllMiniBlocks(key MiniBlockKey) (mbls []MiniBlock) {
c.Lock() c.RLock()
defer c.Unlock() defer c.RUnlock()
for _, mbl := range c.Collection { for _, mbl := range c.Collection[key] {
if mbl.Genesis { mbls = append(mbls, mbl)
mbls = append(mbls, mbl)
}
} }
return return
} }
// get all the tips from the map, this is n²
func (c *MiniBlocksCollection) GetAllTips() (mbls []MiniBlock) {
c.Lock()
defer c.Unlock()
clone := map[uint32]MiniBlock{}
clone_list := make([]MiniBlock, 0, 64)
for k, v := range c.Collection {
clone[k] = v
clone_list = append(clone_list, v)
}
for _, mbl := range clone_list {
if mbl.Genesis {
continue // genesis tips do no have valid past
}
for i := uint8(0); i < mbl.PastCount; i++ {
delete(clone, mbl.Past[i])
}
}
for _, v := range clone {
mbls = append(mbls, v)
}
mbls = MiniBlocks_SortByDistanceDesc(mbls)
return
}
// get all the tips from the map, this is atleast O(n) // get all the tips from the map, this is atleast O(n)
func (c *MiniBlocksCollection) GetAllTipsAtHeight(height int64) (mbls []MiniBlock) { func (c *MiniBlocksCollection) GetAllKeys(height int64) (keys []MiniBlockKey) {
c.Lock() c.RLock()
defer c.Unlock() defer c.RUnlock()
clone := map[uint32]MiniBlock{} for k := range c.Collection {
var clone_list []MiniBlock if k.Height == uint64(height) {
for k, v := range c.Collection { keys = append(keys, k)
if v.Height == height {
clone[k] = v
clone_list = append(clone_list, v)
} }
} }
for _, mbl := range clone_list { sort.SliceStable(keys, func(i, j int) bool { // sort descending on the basis of work done
if mbl.Genesis { return len(c.Collection[keys[i]]) > len(c.Collection[keys[j]])
continue // genesis tips do no have valid past
}
for i := uint8(0); i < mbl.PastCount; i++ {
delete(clone, mbl.Past[i])
}
}
for _, v := range clone {
mbls = append(mbls, v)
}
mbls = MiniBlocks_SortByDistanceDesc(mbls)
return
}
// this works in all case
func (c *MiniBlocksCollection) GetGenesisFromMiniBlock(mbl MiniBlock) (genesis []MiniBlock) {
if mbl.Genesis {
genesis = append(genesis, mbl)
return
}
if len(mbl.PastMiniBlocks) >= 1 { // we do not need locks as all history is connected
return GetGenesisFromMiniBlock(mbl)
}
c.Lock()
defer c.Unlock()
var tmp_genesis []MiniBlock
for i := uint8(0); i < mbl.PastCount; i++ {
if pmbl, ok := c.Collection[mbl.Past[i]]; ok {
tmp_genesis = append(tmp_genesis, GetGenesisFromMiniBlock(pmbl)...)
} else {
return
}
}
return MiniBlocks_Unique(tmp_genesis)
}
// this works in all cases, but it may return truncated history,all returns must be checked for connectivity
func (c *MiniBlocksCollection) GetEntireMiniBlockHistory(mbl MiniBlock) (history []MiniBlock) {
history = make([]MiniBlock, 0, 128)
if mbl.Genesis {
history = append(history, mbl)
return
}
if len(mbl.PastMiniBlocks) >= 1 { // we do not need locks as all history is connected
return GetEntireMiniBlockHistory(mbl)
}
c.Lock()
defer c.Unlock()
for i := uint8(0); i < mbl.PastCount; i++ {
if pmbl, ok := c.Collection[mbl.Past[i]]; ok {
history = append(history, GetEntireMiniBlockHistory(pmbl)...)
} else {
return
}
}
history = append(history, mbl) // add self
unique := MiniBlocks_Unique(history)
return MiniBlocks_SortByTimeAsc(unique)
}
// gets the genesis from the tips
// this function only works, if the miniblock has been expanded
func GetGenesisFromMiniBlock(mbl MiniBlock) (genesis []MiniBlock) {
if mbl.Genesis {
genesis = append(genesis, mbl)
return
}
var queue []MiniBlock
queue = append(queue, mbl.PastMiniBlocks...)
for len(queue) > 0 {
item := queue[0]
queue = queue[1:] // Dequeue
if item.Genesis {
genesis = append(genesis, item)
} else {
queue = append(queue, item.PastMiniBlocks...)
}
}
return
}
// get entire history,its in sorted form
func GetEntireMiniBlockHistory(mbls ...MiniBlock) (history []MiniBlock) {
queue := make([]MiniBlock, 0, 128)
queue = append(queue, mbls...)
history = make([]MiniBlock, 0, 128)
unique := make([]MiniBlock, 0, 128)
unique_map := map[crypto.Hash]MiniBlock{}
for len(queue) > 0 {
item := queue[0]
queue = queue[1:] // Dequeue
if _, ok := unique_map[item.GetHash()]; !ok {
unique_map[item.GetHash()] = item
history = append(history, item) //mini blocks might be duplicated
if !item.Genesis {
queue = append(queue, item.PastMiniBlocks...)
}
}
}
for _, v := range unique_map {
unique = append(unique, v)
}
history = MiniBlocks_Unique(history)
if len(unique) != len(history) {
panic("result mismatch")
}
history = MiniBlocks_SortByTimeAsc(history) // sort on the basis of timestamps
return
}
// this sorts by distance, in descending order
// if distance is equal, then it sorts by its id which is collision free
func MiniBlocks_SortByDistanceDesc(mbls []MiniBlock) (sorted []MiniBlock) {
sorted = make([]MiniBlock, 0, len(mbls))
sorted = append(sorted, mbls...)
sort.SliceStable(sorted, func(i, j int) bool { // sort descending on the basis of Distance
if sorted[i].Distance == sorted[j].Distance {
return sorted[i].GetMiniID() > sorted[j].GetMiniID() // higher mini id first
}
return sorted[i].Distance > sorted[j].Distance
}) })
return sorted
}
// this sorts by timestamp,ascending order
// if timestamp is equal, then it sorts by its id which is collision free
func MiniBlocks_SortByTimeAsc(mbls []MiniBlock) (sorted []MiniBlock) {
sorted = make([]MiniBlock, 0, len(mbls))
sorted = append(sorted, mbls...)
sort.SliceStable(sorted, func(i, j int) bool { // sort on the basis of timestamps
if sorted[i].Timestamp == sorted[j].Timestamp {
return sorted[i].GetMiniID() < sorted[j].GetMiniID()
}
return sorted[i].Timestamp < sorted[j].Timestamp
})
return sorted
}
func MiniBlocks_Unique(mbls []MiniBlock) (unique []MiniBlock) {
unique = make([]MiniBlock, 0, len(mbls))
unique_map := map[crypto.Hash]MiniBlock{}
for _, mbl := range mbls {
unique_map[mbl.GetHash()] = mbl
}
for _, v := range unique_map {
unique = append(unique, v)
}
return
}
// will filter the mbls having the specific tips
// this will also remove any blocks which do not refer to base
func MiniBlocks_FilterOnlyGenesis(mbls []MiniBlock, tips []crypto.Hash) (result []MiniBlock) {
var baselist []MiniBlock
for _, mbl := range mbls {
if mbl.Genesis {
baselist = append(baselist, mbl)
}
}
switch len(tips) {
case 0:
panic("atleast 1 tip must be present")
case 1:
pid1 := binary.BigEndian.Uint32(tips[0][:])
return MiniBlocks_Filter(baselist, []uint32{pid1})
case 2:
pid1 := binary.BigEndian.Uint32(tips[0][:])
pid2 := binary.BigEndian.Uint32(tips[1][:])
return MiniBlocks_Filter(baselist, []uint32{pid1, pid2})
default:
panic("only max 2 tips are supported")
}
}
/*
// this will filter if the blocks have any pids
// this will remove any nonbase blocks
func MiniBlocks_FilterPidsSkipGenesis(mbls []MiniBlock, pids []uint32) (result []MiniBlock) {
var nongenesislist []MiniBlock
for _, mbl := range mbls {
if !mbl.Genesis {
nongenesislist = append(nongenesislist, mbl)
}
}
return MiniBlocks_Filter(nongenesislist, pids)
}
*/
// this will filter if the blocks have any pids
func MiniBlocks_Filter(mbls []MiniBlock, pids []uint32) (result []MiniBlock) {
switch len(pids) {
case 0:
panic("atleast 1 pid must be present")
case 1:
pid1 := pids[0]
for _, mbl := range mbls {
if mbl.PastCount == uint8(len(pids)) && mbl.HasPid(pid1) {
result = append(result, mbl)
}
}
case 2:
pid1 := pids[0]
pid2 := pids[1]
for _, mbl := range mbls {
if mbl.PastCount == uint8(len(pids)) && mbl.HasPid(pid1) && mbl.HasPid(pid2) {
result = append(result, mbl)
}
}
default:
panic("only max 2 tips are supported")
}
return return
} }
// draw out a dot graph
func (c *MiniBlocksCollection) Graph() string {
w := new(strings.Builder)
w.WriteString("digraph miniblock_graphs { \n")
for _, mbl := range c.Collection { // draw all nodes
color := "green"
if mbl.Genesis {
color = "white"
}
w.WriteString(fmt.Sprintf("node [ fontsize=12 style=filled ]\n{\n"))
w.WriteString(fmt.Sprintf("L%08x [ fillcolor=%s label = \"%08x %d height %d Odd %+v\" ];\n", mbl.GetMiniID(), color, mbl.GetMiniID(), 0, mbl.Distance, mbl.Odd))
w.WriteString(fmt.Sprintf("}\n"))
if !mbl.Genesis { // render connections
w.WriteString(fmt.Sprintf("L%08x -> L%08x ;\n", mbl.Past[0], mbl.GetMiniID()))
if mbl.PastCount == 2 {
w.WriteString(fmt.Sprintf("L%08x -> L%08x ;\n", mbl.Past[1], mbl.GetMiniID()))
}
}
}
w.WriteString("}\n")
return w.String()
}

View File

@ -18,18 +18,13 @@ package block
//import "bytes" //import "bytes"
import "testing" import "testing"
import "crypto/rand"
import "encoding/binary"
import "github.com/deroproject/derohe/cryptography/crypto"
// tests whether the purge is working as it should // tests whether the purge is working as it should
func Test_blockmini_purge(t *testing.T) { func Test_blockmini_purge(t *testing.T) {
c := CreateMiniBlockCollection() c := CreateMiniBlockCollection()
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1} mbl := MiniBlock{Version: 1, Height: uint64(i), PastCount: 1}
rand.Read(mbl.Nonce[:]) // fill with randomness
binary.BigEndian.PutUint64(mbl.Check[:], uint64(i))
if err, ok := c.InsertMiniBlock(mbl); !ok { if err, ok := c.InsertMiniBlock(mbl); !ok {
t.Fatalf("error inserting miniblock err: %s", err) t.Fatalf("error inserting miniblock err: %s", err)
} }
@ -37,12 +32,14 @@ func Test_blockmini_purge(t *testing.T) {
c.PurgeHeight(5) // purge all miniblock <= height 5 c.PurgeHeight(5) // purge all miniblock <= height 5
if len(c.Collection) != 4 { if c.Count() != 4 {
t.Fatalf("miniblocks not purged") t.Fatalf("miniblocks not purged")
} }
for _, v := range c.Collection { for _, mbls := range c.Collection {
if v.Height <= 5 { for _, mbl := range mbls {
t.Fatalf("purge not working correctly") if mbl.Height <= 5 {
t.Fatalf("purge not working correctly")
}
} }
} }
} }
@ -52,13 +49,7 @@ func Test_blockmini_purge(t *testing.T) {
func Test_blockmini_collision(t *testing.T) { func Test_blockmini_collision(t *testing.T) {
c := CreateMiniBlockCollection() c := CreateMiniBlockCollection()
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1} mbl := MiniBlock{Version: 1, PastCount: 1}
rand.Read(mbl.Nonce[:]) // fill with randomness
binary.BigEndian.PutUint64(mbl.Check[:], uint64(8))
if !c.IsConnected(mbl) { // even before inserting it should return connectd
t.Fatalf("genesis blocks are already connected")
}
if err, ok := c.InsertMiniBlock(mbl); !ok { if err, ok := c.InsertMiniBlock(mbl); !ok {
t.Fatalf("error inserting miniblock err: %s", err) t.Fatalf("error inserting miniblock err: %s", err)
@ -71,175 +62,4 @@ func Test_blockmini_collision(t *testing.T) {
if c.IsAlreadyInserted(mbl) != c.IsCollision(mbl) { if c.IsAlreadyInserted(mbl) != c.IsCollision(mbl) {
t.Fatalf("already inserted block not detected") t.Fatalf("already inserted block not detected")
} }
if !c.IsConnected(mbl) {
t.Fatalf("genesis blocks are already connected")
}
if c.CalculateDistance(mbl) != 0 {
t.Fatalf("genesis blocks should always be 0 distance")
}
}
// tests whether the timestamp sorting is working as it should
//
func Test_blockmini_timestampsorting(t *testing.T) {
for tries := 0; tries < 10000; tries++ {
c := CreateMiniBlockCollection()
total_count := 10
for i := 0; i < total_count; i++ {
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1, Timestamp: uint64(256 - i)}
//rand.Read(mbl.Nonce[:]) // fill with randomness
binary.BigEndian.PutUint64(mbl.Check[:], uint64(i))
if err, ok := c.InsertMiniBlock(mbl); !ok {
t.Fatalf("error inserting miniblock err: %s", err)
}
//t.Logf("presorted %+v", mbl)
}
all_genesis := c.GetAllGenesisMiniBlocks()
if len(all_genesis) != total_count {
panic("corruption")
}
sorted_all_genesis := MiniBlocks_SortByTimeAsc(all_genesis)
for i := 0; i < len(sorted_all_genesis)-1; i++ {
//t.Logf("sorted %+v", sorted_all_genesis[i])
if sorted_all_genesis[i].Timestamp > sorted_all_genesis[i+1].Timestamp {
t.Fatalf("sorting of Timestamp failed")
}
}
// insert a miniblock which has timestamp collision
{
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1, Timestamp: uint64(254)}
binary.BigEndian.PutUint64(mbl.Check[:], uint64(9900))
if err, ok := c.InsertMiniBlock(mbl); !ok {
t.Fatalf("error inserting miniblock err: %s", err)
}
}
all_genesis = c.GetAllGenesisMiniBlocks()
if len(all_genesis) != (total_count + 1) {
panic("corruption")
}
sorted_all_genesis = MiniBlocks_SortByTimeAsc(all_genesis)
for i := 0; i < len(sorted_all_genesis)-1; i++ {
//t.Logf("sorted %d %+v", sorted_all_genesis[i].GetMiniID(),sorted_all_genesis[i+1])
if sorted_all_genesis[i].Timestamp > sorted_all_genesis[i+1].Timestamp {
t.Fatalf("sorting of Timestamp failed")
}
}
if sorted_all_genesis[total_count-2].Height != 9900 { /// this element will be moved to this, if everything is current
t.Fatalf("test failed %+v", sorted_all_genesis[total_count-2])
}
}
}
// tests whether the distance sorting is working as it should
func Test_blockmini_distancesorting(t *testing.T) {
var unsorted []MiniBlock
total_count := 10
for i := 0; i < total_count; i++ {
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1, Timestamp: uint64(256 - i), Distance: uint32(256 - i)}
binary.BigEndian.PutUint64(mbl.Check[:], uint64(i))
unsorted = append(unsorted, mbl)
}
// insert a miniblock which has timestamp and distance collision
{
mbl := MiniBlock{Version: 1, Genesis: true, PastCount: 1, Timestamp: uint64(254), Distance: uint32(254)}
mbl.Height = int64(9900)
unsorted = append(unsorted, mbl)
}
sorted_d := MiniBlocks_SortByDistanceDesc(unsorted)
for i := 0; i < len(sorted_d)-1; i++ {
// t.Logf("sorted %d %d %+v", i, sorted_d[i].GetMiniID(),sorted_d[i])
if sorted_d[i].Distance < sorted_d[i+1].Distance {
t.Fatalf("sorting of Distance failed")
}
}
if sorted_d[3].Height != 9900 { /// this element will be moved to this, if everything is current
t.Fatalf("test failed %+v", sorted_d[3])
}
}
// tests whether filtering is working as it should
func Test_MiniBlocks_Filter(t *testing.T) {
var mbls []MiniBlock
total_count := uint32(10)
for i := uint32(0); i < total_count; i++ {
mbl := MiniBlock{Version: 1, Genesis: true}
if i%2 == 0 {
mbl.PastCount = 1
mbl.Past[0] = i
} else {
mbl.PastCount = 2
mbl.Past[0] = i
mbl.Past[1] = i + 1
}
binary.BigEndian.PutUint64(mbl.Check[:], uint64(i))
mbls = append(mbls, mbl)
}
for i := uint32(0); i < total_count; i++ {
if i%2 == 0 {
result := MiniBlocks_Filter(mbls, []uint32{i})
if len(result) != 1 {
t.Fatalf("failed filter")
}
if result[0].PastCount != 1 || result[0].Past[0] != i {
t.Fatalf("failed filter")
}
} else {
result := MiniBlocks_Filter(mbls, []uint32{i, i + 1})
if len(result) != 1 {
t.Fatalf("failed filter")
}
if result[0].PastCount != 2 || result[0].Past[0] != i || result[0].Past[1] != i+1 {
t.Fatalf("failed filter")
}
}
}
for i := uint32(0); i < total_count; i++ {
if i%2 == 0 {
var tips [1]crypto.Hash
binary.BigEndian.PutUint32(tips[0][:], i)
result := MiniBlocks_FilterOnlyGenesis(mbls, tips[:])
if len(result) != 1 {
t.Fatalf("failed filter")
}
if result[0].PastCount != 1 || result[0].Past[0] != i {
t.Fatalf("failed filter")
}
} else {
var tips [2]crypto.Hash
binary.BigEndian.PutUint32(tips[0][:], i)
binary.BigEndian.PutUint32(tips[1][:], i+1)
result := MiniBlocks_FilterOnlyGenesis(mbls, tips[:])
if len(result) != 1 {
t.Fatalf("failed filter")
}
if result[0].PastCount != 2 || result[0].Past[0] != i || result[0].Past[1] != i+1 {
t.Fatalf("failed filter")
}
}
}
} }

View File

@ -89,7 +89,7 @@ type Blockchain struct {
simulator bool // is simulator mode simulator bool // is simulator mode
P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found
P2P_MiniBlock_Relayer func(mbl []block.MiniBlock, peerid uint64) P2P_MiniBlock_Relayer func(mbl block.MiniBlock, peerid uint64)
RPC_NotifyNewBlock *sync.Cond // used to notify rpc that a new block has been found RPC_NotifyNewBlock *sync.Cond // used to notify rpc that a new block has been found
RPC_NotifyHeightChanged *sync.Cond // used to notify rpc that chain height has changed due to addition of block RPC_NotifyHeightChanged *sync.Cond // used to notify rpc that chain height has changed due to addition of block
@ -514,28 +514,22 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
// verify everything related to miniblocks in one go // verify everything related to miniblocks in one go
{ if !chain.simulator {
if err = chain.Verify_MiniBlocks(*cbl.Bl); err != nil { if err = Verify_MiniBlocks(*cbl.Bl); err != nil { // verifies the miniblocks all refer to current block
return err, false return err, false
} }
if bl.Height != 0 { // a genesis block doesn't have miniblock if bl.Height != 0 { // a genesis block doesn't have miniblock
// verify hash of miniblock for corruption // verify hash of miniblock for corruption
if err = chain.Verify_MiniBlocks_HashCheck(cbl); err != nil { if err = chain.Verify_MiniBlocks_HashCheck(cbl); err != nil {
return err, false return err, false
} }
// check dynamic consensus rules
if err = chain.Check_Dynamism(cbl.Bl.MiniBlocks); err != nil {
return err, false
}
} }
for _, mbl := range bl.MiniBlocks { for _, mbl := range bl.MiniBlocks {
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(true, miner_hash) { if mbl.Final == false && !chain.IsAddressHashValid(true, miner_hash) {
err = fmt.Errorf("miner address not registered") err = fmt.Errorf("miner address not registered")
return err, false return err, false
} }
@ -720,6 +714,9 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
go func(j int) { go func(j int) {
defer sem.Release(1) defer sem.Release(1)
defer wg.Done() defer wg.Done()
if atomic.LoadInt32(&fail_count) >= 1 { // fail fast
return
}
if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, cbl.Txs[j], bl.Tips); err != nil { // transaction verification failed if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, cbl.Txs[j], bl.Tips); err != nil { // transaction verification failed
atomic.AddInt32(&fail_count, 1) // increase fail count by 1 atomic.AddInt32(&fail_count, 1) // increase fail count by 1
block_logger.Error(err, "tx nonce verification failed", "txid", cbl.Txs[j].GetHash()) block_logger.Error(err, "tx nonce verification failed", "txid", cbl.Txs[j].GetHash())
@ -749,6 +746,9 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
go func(j int) { go func(j int) {
defer sem.Release(1) defer sem.Release(1)
defer wg.Done() defer wg.Done()
if atomic.LoadInt32(&fail_count) >= 1 { // fail fast
return
}
if err := chain.Verify_Transaction_NonCoinbase(cbl.Txs[j]); err != nil { // transaction verification failed if err := chain.Verify_Transaction_NonCoinbase(cbl.Txs[j]); err != nil { // transaction verification failed
atomic.AddInt32(&fail_count, 1) // increase fail count by 1 atomic.AddInt32(&fail_count, 1) // increase fail count by 1
block_logger.Error(err, "tx verification failed", "txid", cbl.Txs[j].GetHash()) block_logger.Error(err, "tx verification failed", "txid", cbl.Txs[j].GetHash())
@ -1205,12 +1205,12 @@ func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) error {
} }
if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, chain.Get_TIPS()); err != nil { // transaction verification failed if err := chain.Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_version, tx, chain.Get_TIPS()); err != nil { // transaction verification failed
logger.V(1).Error(err, "Incoming TX nonce verification failed", "txid", txhash, "stacktrace", globals.StackTrace(false)) logger.V(2).Error(err, "Incoming TX nonce verification failed", "txid", txhash, "stacktrace", globals.StackTrace(false))
return fmt.Errorf("Incoming TX %s nonce verification failed, err %s", txhash, err) return fmt.Errorf("Incoming TX %s nonce verification failed, err %s", txhash, err)
} }
if err := chain.Verify_Transaction_NonCoinbase(tx); err != nil { if err := chain.Verify_Transaction_NonCoinbase(tx); err != nil {
logger.V(1).Error(err, "Incoming TX could not be verified", "txid", txhash) logger.V(2).Error(err, "Incoming TX could not be verified", "txid", txhash)
return fmt.Errorf("Incoming TX %s could not be verified, err %s", txhash, err) return fmt.Errorf("Incoming TX %s could not be verified, err %s", txhash, err)
} }
@ -1331,13 +1331,6 @@ func (chain *Blockchain) Rewind_Chain(rewind_count int) (result bool) {
chain.Lock() chain.Lock()
defer chain.Unlock() defer chain.Unlock()
// we must till we reach a safe point
// safe point is point where a single block exists at specific height
// this may lead us to rewinding a it more
//safe := false
// TODO we must fix safeness using the stable calculation
if rewind_count == 0 { if rewind_count == 0 {
return return
} }
@ -1345,28 +1338,23 @@ func (chain *Blockchain) Rewind_Chain(rewind_count int) (result bool) {
top_block_topo_index := chain.Load_TOPO_HEIGHT() top_block_topo_index := chain.Load_TOPO_HEIGHT()
rewinded := int64(0) rewinded := int64(0)
for { // rewind as many as possible for {
if top_block_topo_index-rewinded < 1 || rewinded >= int64(rewind_count) {
break
}
rewinded++
}
for { // rewinf till we reach a safe point
r, err := chain.Store.Topo_store.Read(top_block_topo_index - rewinded) r, err := chain.Store.Topo_store.Read(top_block_topo_index - rewinded)
if err != nil { if err != nil {
panic(err) panic(err)
} }
if chain.IsBlockSyncBlockHeight(r.BLOCK_ID) || r.Height == 1 { if top_block_topo_index-rewinded < 1 || rewinded >= int64(rewind_count) {
break break
} }
if r.Height == 1 {
break
}
rewinded++ rewinded++
} }
for i := int64(0); i != rewinded; i++ { for i := int64(0); i < rewinded; i++ {
chain.Store.Topo_store.Clean(top_block_topo_index - i) chain.Store.Topo_store.Clean(top_block_topo_index - i)
} }
@ -1381,11 +1369,14 @@ func (chain *Blockchain) CheckDagStructure(tips []crypto.Hash) bool {
for i := range tips { // first make sure all the tips are at same height for i := range tips { // first make sure all the tips are at same height
if chain.Load_Height_for_BL_ID(tips[0]) != chain.Load_Height_for_BL_ID(tips[i]) { if chain.Load_Height_for_BL_ID(tips[0]) != chain.Load_Height_for_BL_ID(tips[i]) {
return false return false
} }
} }
if len(tips) == 2 && tips[0] == tips[1] {
return false
}
switch len(tips) { switch len(tips) {
case 1: case 1:
past := chain.Get_Block_Past(tips[0]) past := chain.Get_Block_Past(tips[0])

View File

@ -17,6 +17,7 @@
package blockchain package blockchain
import "fmt" import "fmt"
import "math"
import "math/big" import "math/big"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
@ -96,17 +97,41 @@ func CheckPowHashBig(pow_hash crypto.Hash, big_difficulty_integer *big.Int) bool
return false return false
} }
const E = float64(2.71828182845905)
// hard code datatypes
func Diff(solvetime, blocktime, M int64, prev_diff int64) (diff int64) {
if blocktime <= 0 || solvetime <= 0 || M <= 0 {
panic("invalid parameters")
}
easypart := int64(math.Pow(E, ((1-float64(solvetime)/float64(blocktime))/float64(M))) * 10000)
diff = (prev_diff * easypart) / 10000
return diff
}
// big int implementation
func DiffBig(solvetime, blocktime, M int64, prev_diff *big.Int) (diff *big.Int) {
if blocktime <= 0 || solvetime <= 0 || M <= 0 {
panic("invalid parameters")
}
easypart := int64(math.Pow(E, ((1-float64(solvetime)/float64(blocktime))/float64(M))) * 10000)
diff = new(big.Int).Mul(prev_diff, new(big.Int).SetInt64(easypart))
diff.Div(diff, new(big.Int).SetUint64(10000))
return diff
}
// when creating a new block, current_time in utc + chain_block_time must be added // when creating a new block, current_time in utc + chain_block_time must be added
// while verifying the block, expected time stamp should be replaced from what is in blocks header // while verifying the block, expected time stamp should be replaced from what is in blocks header
// in DERO atlantis difficulty is based on previous tips // in DERO atlantis difficulty is based on previous tips
// get difficulty at specific tips, // get difficulty at specific tips,
// algorithm is as follows choose biggest difficulty tip (// division is integer and not floating point) // algorithm is agiven above
// diff = (parent_diff + (parent_diff / 100 * max(1 - (parent_timestamp - parent_parent_timestamp) // (chain_block_time*2//3), -1))
// this should be more thoroughly evaluated // this should be more thoroughly evaluated
// NOTE: we need to evaluate if the mining adversary gains something, if the they set the time diff to 1 // NOTE: we need to evaluate if the mining adversary gains something, if the they set the time diff to 1
// we need to do more simulations and evaluations // we need to do more simulations and evaluations
// difficulty is now processed at sec level, mean how many hashes are require per sec to reach block time // difficulty is now processed at sec level, mean how many hashes are require per sec to reach block time
// basica
func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int { func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int {
tips_string := "" tips_string := ""
@ -118,104 +143,12 @@ func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int {
return new(big.Int).SetBytes([]byte(diff_bytes.(string))) return new(big.Int).SetBytes([]byte(diff_bytes.(string)))
} }
var MinimumDifficulty *big.Int difficulty := Get_Difficulty_At_Tips(chain, tips)
change := new(big.Int)
step := new(big.Int)
if globals.IsMainnet() {
MinimumDifficulty = new(big.Int).SetUint64(config.Settings.MAINNET_MINIMUM_DIFFICULTY) // this must be controllable parameter
} else {
MinimumDifficulty = new(big.Int).SetUint64(config.Settings.TESTNET_MINIMUM_DIFFICULTY) // this must be controllable parameter
}
GenesisDifficulty := new(big.Int).SetUint64(1)
if len(tips) == 0 || chain.simulator == true {
return GenesisDifficulty
}
height := chain.Calculate_Height_At_Tips(tips)
// hard fork version 1 has difficulty set to 1
/*if 1 == chain.Get_Current_Version_at_Height(height) {
return new(big.Int).SetUint64(1)
}*/
/*
// if we are hardforking from 1 to 2
// we can start from high difficulty to find the right point
if height >= 1 && chain.Get_Current_Version_at_Height(height-1) == 1 && chain.Get_Current_Version_at_Height(height) == 2 {
if globals.IsMainnet() {
bootstrap_difficulty := new(big.Int).SetUint64(config.MAINNET_BOOTSTRAP_DIFFICULTY) // return bootstrap mainnet difficulty
rlog.Infof("Returning bootstrap difficulty %s at height %d", bootstrap_difficulty.String(), height)
return bootstrap_difficulty
} else {
bootstrap_difficulty := new(big.Int).SetUint64(config.TESTNET_BOOTSTRAP_DIFFICULTY)
rlog.Infof("Returning bootstrap difficulty %s at height %d", bootstrap_difficulty.String(), height)
return bootstrap_difficulty // return bootstrap difficulty for testnet
}
}
// if we are hardforking from 3 to 4
// we can start from high difficulty to find the right point
if height >= 1 && chain.Get_Current_Version_at_Height(height-1) <= 3 && chain.Get_Current_Version_at_Height(height) == 4 {
if globals.IsMainnet() {
bootstrap_difficulty := new(big.Int).SetUint64(config.MAINNET_BOOTSTRAP_DIFFICULTY_hf4) // return bootstrap mainnet difficulty
rlog.Infof("Returning bootstrap difficulty %s at height %d", bootstrap_difficulty.String(), height)
return bootstrap_difficulty
} else {
bootstrap_difficulty := new(big.Int).SetUint64(config.TESTNET_BOOTSTRAP_DIFFICULTY)
rlog.Infof("Returning bootstrap difficulty %s at height %d", bootstrap_difficulty.String(), height)
return bootstrap_difficulty // return bootstrap difficulty for testnet
}
}
*/
// until we have atleast 2 blocks, we cannot run the algo
if height < 3 && chain.Get_Current_Version_at_Height(height) <= 1 {
return MinimumDifficulty
}
// take the time from the most heavy block
biggest_difficulty := chain.Load_Block_Difficulty(tips[0])
parent_highest_time := chain.Load_Block_Timestamp(tips[0])
// find parents parents tip from the most heavy block's parent
parent_past := chain.Get_Block_Past(tips[0])
past_biggest_tip := parent_past[0]
parent_parent_highest_time := chain.Load_Block_Timestamp(past_biggest_tip)
if biggest_difficulty.Cmp(MinimumDifficulty) < 0 {
biggest_difficulty.Set(MinimumDifficulty)
}
block_time := config.BLOCK_TIME_MILLISECS
step.Div(biggest_difficulty, new(big.Int).SetUint64(100))
// create 3 ranges, used for physical verification
switch {
case (parent_highest_time - parent_parent_highest_time) <= block_time-1000: // increase diff
change.Add(change, step) // block was found earlier, increase diff
case (parent_highest_time - parent_parent_highest_time) >= block_time+1000: // decrease diff
change.Sub(change, step) // block was found late, decrease diff
change.Sub(change, step)
default: // if less than 1 sec deviation,use previous diff, ie change is zero
}
biggest_difficulty.Add(biggest_difficulty, change)
if biggest_difficulty.Cmp(MinimumDifficulty) < 0 { // we can never be below minimum difficulty
biggest_difficulty.Set(MinimumDifficulty)
}
if chain.cache_enabled { if chain.cache_enabled {
chain.cache_Get_Difficulty_At_Tips.Add(tips_string, string(biggest_difficulty.Bytes())) // set in cache chain.cache_Get_Difficulty_At_Tips.Add(tips_string, string(difficulty.Bytes())) // set in cache
} }
return biggest_difficulty return difficulty
} }
func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock) bool { func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock) bool {
@ -236,10 +169,6 @@ func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock
logger.Panicf("Difficuly mismatch between big and uint64 diff ") logger.Panicf("Difficuly mismatch between big and uint64 diff ")
}*/ }*/
if mbl.Odd { // odd miniblocks have twice the difficulty
block_difficulty.Mul(new(big.Int).Set(block_difficulty), new(big.Int).SetUint64(2))
}
if CheckPowHashBig(PoW, block_difficulty) == true { if CheckPowHashBig(PoW, block_difficulty) == true {
if chain.cache_enabled { if chain.cache_enabled {
chain.cache_IsMiniblockPowValid.Add(fmt.Sprintf("%s", cachekey), true) // set in cache chain.cache_IsMiniblockPowValid.Add(fmt.Sprintf("%s", cachekey), true) // set in cache
@ -249,9 +178,71 @@ func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock
return false return false
} }
// this function calculates difficulty on the basis of previous difficulty and number of blocks type DiffProvider interface {
// THIS is the ideal algorithm for us as it will be optimal based on the number of orphan blocks Load_Block_Height(crypto.Hash) int64
// we may deploy it when the block reward becomes insignificant in comparision to fees Load_Block_Difficulty(crypto.Hash) *big.Int
// basically tail emission kicks in or we need to optimally increase number of blocks Load_Block_Timestamp(crypto.Hash) uint64
// the algorithm does NOT work if the network has a single miner !!! Get_Block_Past(crypto.Hash) []crypto.Hash
// this algorithm will work without the concept of time }
func Get_Difficulty_At_Tips(source DiffProvider, tips []crypto.Hash) *big.Int {
var MinimumDifficulty *big.Int
if globals.IsMainnet() {
MinimumDifficulty = new(big.Int).SetUint64(config.Settings.MAINNET_MINIMUM_DIFFICULTY) // this must be controllable parameter
} else {
MinimumDifficulty = new(big.Int).SetUint64(config.Settings.TESTNET_MINIMUM_DIFFICULTY) // this must be controllable parameter
}
GenesisDifficulty := new(big.Int).SetUint64(1)
if chain, ok := source.(*Blockchain); ok {
if chain.simulator == true {
return GenesisDifficulty
}
}
if len(tips) == 0 {
return GenesisDifficulty
}
height := int64(0)
for i := range tips {
past_height := source.Load_Block_Height(tips[i])
if past_height < 0 {
panic(fmt.Errorf("could not find height for blid %s", tips[i]))
}
if height <= past_height {
height = past_height
}
}
height++
//above height code is equivalent to below code
//height := chain.Calculate_Height_At_Tips(tips)
// until we have atleast 2 blocks, we cannot run the algo
if height < 3 {
return MinimumDifficulty
}
tip_difficulty := source.Load_Block_Difficulty(tips[0])
tip_time := source.Load_Block_Timestamp(tips[0])
parents := source.Get_Block_Past(tips[0])
parent_time := source.Load_Block_Timestamp(parents[0])
block_time := int64(config.BLOCK_TIME_MILLISECS)
solve_time := int64(tip_time - parent_time)
if solve_time > (block_time * 2) { // there should not be sudden decreases
solve_time = block_time * 2
}
M := int64(8)
difficulty := DiffBig(solve_time, block_time, M, tip_difficulty)
if difficulty.Cmp(MinimumDifficulty) < 0 { // we can never be below minimum difficulty
difficulty.Set(MinimumDifficulty)
}
return difficulty
}

View File

@ -25,6 +25,7 @@ import "encoding/binary"
import "golang.org/x/xerrors" import "golang.org/x/xerrors"
import "golang.org/x/time/rate" import "golang.org/x/time/rate"
import "golang.org/x/crypto/sha3"
// this file creates the blobs which can be used to mine new blocks // this file creates the blobs which can be used to mine new blocks
@ -93,6 +94,13 @@ func (chain *Blockchain) SortTips(tips []crypto.Hash) (sorted []crypto.Hash) {
return return
} }
// used by tip
func convert_uint32_to_crypto_hash(i uint32) crypto.Hash {
var h crypto.Hash
binary.BigEndian.PutUint32(h[:], i)
return h
}
//NOTE: this function is quite big since we do a lot of things in preparation of next blocks //NOTE: this function is quite big since we do a lot of things in preparation of next blocks
func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl *block.Complete_Block, bl block.Block, err error) { func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl *block.Complete_Block, bl block.Block, err error) {
//chain.Lock() //chain.Lock()
@ -118,34 +126,36 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl
var tips []crypto.Hash var tips []crypto.Hash
// lets fill in the tips from miniblocks, list is already sorted // lets fill in the tips from miniblocks, list is already sorted
if mbls := chain.MiniBlocks.GetAllTipsAtHeight(chain.Get_Height() + 1); len(mbls) > 0 { if keys := chain.MiniBlocks.GetAllKeys(chain.Get_Height() + 1); len(keys) > 0 {
mbls = block.MiniBlocks_SortByDistanceDesc(mbls) for _, key := range keys {
for _, mbl := range mbls { mbls := chain.MiniBlocks.GetAllMiniBlocks(key)
tips = tips[:0] if len(mbls) < 1 {
gens := block.GetGenesisFromMiniBlock(mbl)
if len(gens) <= 0 { // if tip cannot be resolved to genesis skip it
continue continue
} }
mbl := mbls[0]
var tip crypto.Hash tips = tips[:0]
copy(tip[:], gens[0].Check[8:8+12]) tip := convert_uint32_to_crypto_hash(mbl.Past[0])
if ehash, ok := chain.ExpandMiniBlockTip(tip); ok { if ehash, ok := chain.ExpandMiniBlockTip(tip); ok {
tips = append(tips, ehash) tips = append(tips, ehash)
} else { } else {
continue continue
} }
if gens[0].PastCount == 2 { if mbl.PastCount == 2 {
copy(tip[:], gens[0].Check[8+12:]) tip = convert_uint32_to_crypto_hash(mbl.Past[1])
if ehash, ok := chain.ExpandMiniBlockTip(tip); ok { if ehash, ok := chain.ExpandMiniBlockTip(tip); ok {
tips = append(tips, ehash) tips = append(tips, ehash)
} else { } else {
continue continue
} }
} }
if mbl.PastCount == 2 && mbl.Past[0] == mbl.Past[1] {
continue
}
break break
} }
} }
if len(tips) == 0 { if len(tips) == 0 {
@ -209,11 +219,28 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl
logger.V(8).Info("mempool returned tx list", "tx_list", tx_hash_list_sorted) logger.V(8).Info("mempool returned tx list", "tx_list", tx_hash_list_sorted)
var pre_check cbl_verify // used to verify sanity of new block var pre_check cbl_verify // used to verify sanity of new block
history_tx := map[crypto.Hash]bool{} // used to build history of recent blocks
for _, h := range history_array {
var history_bl *block.Block
if history_bl, err = chain.Load_BL_FROM_ID(h); err != nil {
return
}
for i := range history_bl.Tx_hashes {
history_tx[history_bl.Tx_hashes[i]] = true
}
}
for i := range tx_hash_list_sorted { for i := range tx_hash_list_sorted {
if (sizeoftxs + tx_hash_list_sorted[i].Size) > (99*config.STARGATE_HE_MAX_BLOCK_SIZE)/100 { // limit block to max possible if (sizeoftxs + tx_hash_list_sorted[i].Size) > (config.STARGATE_HE_MAX_BLOCK_SIZE - 102400) { // limit block to max possible
break break
} }
if _, ok := history_tx[tx_hash_list_sorted[i].Hash]; ok {
logger.V(8).Info("not selecting tx since it is already mined", "txid", tx_hash_list_sorted[i].Hash)
continue
}
if tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash); tx != nil { if tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash); tx != nil {
if int64(tx.Height) < height { if int64(tx.Height) < height {
if history[tx.BLID] != true { if history[tx.BLID] != true {
@ -295,54 +322,19 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address) (cbl
} }
// lets fill in the miniblocks, list is already sorted // lets fill in the miniblocks, list is already sorted
if mbls := chain.MiniBlocks.GetAllTipsAtHeight(height); len(mbls) > 0 {
mbls = block.MiniBlocks_SortByDistanceDesc(mbls) var key block.MiniBlockKey
max_distance := uint32(0) key.Height = bl.Height
tipcount := 0 key.Past0 = binary.BigEndian.Uint32(bl.Tips[0][:])
for _, mbl := range mbls { if len(bl.Tips) == 2 {
if tipcount == 2 { //we can only support max 2 tips key.Past1 = binary.BigEndian.Uint32(bl.Tips[1][:])
break }
}
gens := block.GetGenesisFromMiniBlock(mbl) if mbls := chain.MiniBlocks.GetAllMiniBlocks(key); len(mbls) > 0 {
if len(gens) <= 0 { // if tip cannot be resolved to genesis skip it if uint64(len(mbls)) > config.BLOCK_TIME-1 {
continue mbls = mbls[:config.BLOCK_TIME-1]
}
gens_filtered := block.MiniBlocks_FilterOnlyGenesis(gens, bl.Tips)
if len(gens_filtered) <= 0 { // no valid genesis having same tips
continue
}
if len(gens) != len(gens_filtered) { // more than 1 genesis, with some not pointing to same tips
continue
}
if max_distance < mbl.Distance {
max_distance = mbl.Distance
}
if mbl.Genesis && max_distance-mbl.Distance > miniblock_genesis_distance { // only 0 distance is supported for genesis
continue
}
if !mbl.Genesis && max_distance-mbl.Distance > miniblock_normal_distance { // only 3 distance is supported
continue
}
history := block.GetEntireMiniBlockHistory(mbl)
if !mbl.Genesis && len(history) < 2 {
logger.V(1).Error(nil, "history missing. this should never occur", "mbl", fmt.Sprintf("%+v", mbl))
continue
}
bl.MiniBlocks = append(bl.MiniBlocks, history...)
tipcount++
} }
bl.MiniBlocks = mbls
if len(bl.MiniBlocks) > 1 { // we need to unique and sort them by time
bl.MiniBlocks = block.MiniBlocks_SortByTimeAsc(block.MiniBlocks_Unique(bl.MiniBlocks))
}
} }
cbl.Bl = &bl cbl.Bl = &bl
@ -357,63 +349,36 @@ func ConvertBlockToMiniblock(bl block.Block, miniblock_miner_address rpc.Address
if len(bl.Tips) == 0 { if len(bl.Tips) == 0 {
panic("Tips cannot be zero") panic("Tips cannot be zero")
} }
mbl.Height = bl.Height
mbl.Timestamp = uint64(globals.Time().UTC().UnixMilli()) timestamp := uint64(globals.Time().UTC().UnixMilli())
diff := timestamp - bl.Timestamp
if len(bl.MiniBlocks) == 0 { mbl.Timestamp = 0xffff
mbl.Genesis = true if diff > 0xffff {
mbl.PastCount = byte(len(bl.Tips)) mbl.Timestamp = 0xffff
for i := range bl.Tips {
mbl.Past[i] = binary.BigEndian.Uint32(bl.Tips[i][:])
}
} else {
tmp_collection := block.CreateMiniBlockCollection()
for _, tmbl := range bl.MiniBlocks {
if err, ok := tmp_collection.InsertMiniBlock(tmbl); !ok {
logger.Error(err, "error converting block to miniblock")
panic("not possible, logical flaw")
}
}
tips := tmp_collection.GetAllTips()
if len(tips) > 2 || len(tips) == 0 {
logger.Error(nil, "block contains miniblocks for more tips than possible", "count", len(tips))
panic("not possible, logical flaw")
}
for i, tip := range tips {
mbl.PastCount++
tiphash := tip.GetHash()
mbl.Past[i] = binary.BigEndian.Uint32(tiphash[:])
if tip.Timestamp >= uint64(globals.Time().UTC().UnixMilli()) {
mbl.Timestamp = tip.Timestamp + 1
}
}
prev_distance := tips[0].Distance
if len(tips) == 2 && prev_distance < tips[1].Distance {
prev_distance = tips[1].Distance
}
mbl.Odd = (prev_distance%2 == 1) // set odd even height
} }
if mbl.Genesis { mbl.PastCount = byte(len(bl.Tips))
binary.BigEndian.PutUint64(mbl.Check[:], bl.Height) for i := range bl.Tips {
copy(mbl.Check[8:], bl.Tips[0][0:12]) mbl.Past[i] = binary.BigEndian.Uint32(bl.Tips[i][:])
if len(bl.Tips) == 2 {
copy(mbl.Check[8+12:], bl.Tips[1][0:12])
}
} else {
txshash := bl.GetTXSHash()
block_header_hash := bl.GetHashWithoutMiniBlocks()
for i := range mbl.Check {
mbl.Check[i] = txshash[i] ^ block_header_hash[i]
}
} }
miner_address_hashed_key := graviton.Sum(miniblock_miner_address.Compressed()) if uint64(len(bl.MiniBlocks)) != config.BLOCK_TIME-1 {
copy(mbl.KeyHash[:], miner_address_hashed_key[:]) miner_address_hashed_key := graviton.Sum(miniblock_miner_address.Compressed())
copy(mbl.KeyHash[:], miner_address_hashed_key[:])
} else {
mbl.Final = true
block_header_hash := sha3.Sum256(bl.Serialize()) // note here this block is not present
for i := range mbl.KeyHash {
mbl.KeyHash[i] = block_header_hash[i]
}
}
// leave the flags for users as per their request
for i := range mbl.Nonce {
mbl.Nonce[i] = globals.Global_Random.Uint32() // fill with randomness
}
globals.Global_Random.Read(mbl.Nonce[:]) // fill with randomness
return return
} }
@ -451,10 +416,13 @@ func (chain *Blockchain) Create_new_block_template_mining(miniblock_miner_addres
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(false, miner_hash) { if !mbl.Final {
logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins") if !chain.IsAddressHashValid(false, miner_hash) {
return logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return
}
} }
miniblock_blob = fmt.Sprintf("%x", mbl.Serialize()) miniblock_blob = fmt.Sprintf("%x", mbl.Serialize())
@ -473,7 +441,7 @@ var duplicate_height_check = map[uint64]bool{}
// otherwise the miner is trying to attack the network // otherwise the miner is trying to attack the network
func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte) (mblid crypto.Hash, blid crypto.Hash, result bool, err error) { func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte) (mblid crypto.Hash, blid crypto.Hash, result bool, err error) {
if globals.Arguments["--sync-node"].(bool) { if globals.Arguments["--sync-node"] != nil && globals.Arguments["--sync-node"].(bool) {
logger.Error(fmt.Errorf("Mining is deactivated since daemon is running with --sync-mode, please check program options."), "") logger.Error(fmt.Errorf("Mining is deactivated since daemon is running with --sync-mode, please check program options."), "")
return mblid, blid, false, fmt.Errorf("Please deactivate --sync-node option before mining") return mblid, blid, false, fmt.Errorf("Please deactivate --sync-node option before mining")
} }
@ -511,8 +479,6 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
return return
} }
//fmt.Printf("received miniblock %x block %x\n", miniblock_blob, bl.Serialize())
// lets try to check pow to detect whether the miner is cheating // lets try to check pow to detect whether the miner is cheating
if !chain.VerifyMiniblockPoW(&bl, mbl) { if !chain.VerifyMiniblockPoW(&bl, mbl) {
logger.V(1).Error(err, "Error ErrInvalidPoW ") logger.V(1).Error(err, "Error ErrInvalidPoW ")
@ -520,67 +486,45 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
return return
} }
var miner_hash crypto.Hash if !mbl.Final {
copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(true, miner_hash) {
logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return
}
// if we reach here, everything looks ok var miner_hash crypto.Hash
bl.MiniBlocks = append(bl.MiniBlocks, mbl) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(true, miner_hash) {
logger.V(3).Error(err, "unregistered miner %s", miner_hash)
err = fmt.Errorf("unregistered miner or you need to wait 15 mins")
return
}
if err = chain.Verify_MiniBlocks(bl); err != nil { if err1, ok := chain.InsertMiniBlock(mbl); ok {
//fmt.Printf("miniblock %s inserted successfully, total %d\n",mblid,len(chain.MiniBlocks.Collection) )
result = true
fmt.Printf("verifying miniblocks %s\n", err) // notify peers, we have a miniblock and return to miner
return if !chain.simulator { // if not in simulator mode, relay miniblock to the chain
} go chain.P2P_MiniBlock_Relayer(mbl, 0)
mblid = mbl.GetHash()
if err1, ok := chain.InsertMiniBlock(mbl); ok {
//fmt.Printf("miniblock %s inserted successfully, total %d\n",mblid,len(chain.MiniBlocks.Collection) )
result = true
} else {
logger.V(1).Error(err1, "miniblock insertion failed", "mbl", fmt.Sprintf("%+v", mbl))
err = err1
return
}
cache_block_mutex.Lock()
cache_block.Timestamp = 0 // expire cache block
cache_block_mutex.Unlock()
// notify peers, we have a miniblock and return to miner
if !chain.simulator { // if not in simulator mode, relay miniblock to the chain
var mbls []block.MiniBlock
if !mbl.Genesis {
for i := uint8(0); i < mbl.PastCount; i++ {
mbls = append(mbls, chain.MiniBlocks.Get(mbl.Past[i]))
} }
} } else {
mbls = append(mbls, mbl) logger.V(1).Error(err1, "miniblock insertion failed", "mbl", fmt.Sprintf("%+v", mbl))
go chain.P2P_MiniBlock_Relayer(mbls, 0) err = err1
}
return
} }
result = true // block's pow is valid
// if we reach here, everything looks ok, we can complete the block we have, lets add the final piece
bl.MiniBlocks = append(bl.MiniBlocks, mbl)
// if a duplicate block is being sent, reject the block // if a duplicate block is being sent, reject the block
if _, ok := duplicate_height_check[bl.Height]; ok { if _, ok := duplicate_height_check[bl.Height]; ok {
logger.V(1).Error(nil, "Block %s rejected by chain due to duplicate hwork.", "blid", bl.GetHash()) logger.V(3).Error(nil, "Block %s rejected by chain due to duplicate hwork.", "blid", bl.GetHash())
err = fmt.Errorf("Error duplicate work") err = fmt.Errorf("Error duplicate work")
return return
} }
// fast check dynamic consensus rules
// if it passes then this miniblock completes the puzzle (if other consensus rules allow)
if scraperr := chain.Check_Dynamism(bl.MiniBlocks); scraperr != nil {
logger.V(3).Error(scraperr, "dynamism check failed ")
return
}
// since we have passed dynamic rules, build a full block and try adding to chain // since we have passed dynamic rules, build a full block and try adding to chain
// lets build up the complete block // lets build up the complete block
@ -611,7 +555,7 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
cbl.Bl = &bl // the block is now complete, lets try to add it to chain cbl.Bl = &bl // the block is now complete, lets try to add it to chain
if !chain.simulator && !accept_limiter.Allow() { // if rate limiter allows, then add block to chain if !chain.simulator && !accept_limiter.Allow() { // if rate limiter allows, then add block to chain
logger.Info("Block rejected by chain.", "blid", bl.GetHash()) logger.V(1).Info("Block rejected by chain", "blid", bl.GetHash())
return return
} }
@ -621,16 +565,19 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
err, result_block = chain.Add_Complete_Block(cbl) err, result_block = chain.Add_Complete_Block(cbl)
if result_block { if result_block {
duplicate_height_check[bl.Height] = true duplicate_height_check[bl.Height] = true
cache_block_mutex.Lock()
cache_block.Timestamp = 0 // expire cache block
cache_block_mutex.Unlock()
logger.V(1).Info("Block successfully accepted, Notifying Network", "blid", bl.GetHash(), "height", bl.Height) logger.V(1).Info("Block successfully accepted, Notifying Network", "blid", bl.GetHash(), "height", bl.Height)
if !chain.simulator { // if not in simulator mode, relay block to the chain if !chain.simulator { // if not in simulator mode, relay block to the chain
chain.P2P_Block_Relayer(cbl, 0) // lets relay the block to network chain.P2P_Block_Relayer(cbl, 0) // lets relay the block to network
} }
} else { } else {
logger.V(1).Error(err, "Block Rejected", "blid", bl.GetHash()) logger.V(3).Error(err, "Block Rejected", "blid", bl.GetHash())
return return
} }
return return
@ -642,7 +589,7 @@ func (chain *Blockchain) ExpandMiniBlockTip(hash crypto.Hash) (result crypto.Has
tips := chain.Get_TIPS() tips := chain.Get_TIPS()
for i := range tips { for i := range tips {
if bytes.Equal(hash[:12], tips[i][:12]) { if bytes.Equal(hash[:4], tips[i][:4]) {
copy(result[:], tips[i][:]) copy(result[:], tips[i][:])
return result, true return result, true
} }
@ -655,7 +602,7 @@ func (chain *Blockchain) ExpandMiniBlockTip(hash crypto.Hash) (result crypto.Has
blhash, err := chain.Load_Block_Topological_order_at_index(i) blhash, err := chain.Load_Block_Topological_order_at_index(i)
if err == nil { if err == nil {
if bytes.Equal(hash[:12], blhash[:12]) { if bytes.Equal(hash[:4], blhash[:4]) {
copy(result[:], blhash[:]) copy(result[:], blhash[:])
return result, true return result, true
} }

View File

@ -19,19 +19,14 @@ package blockchain
import "fmt" import "fmt"
//import "time" //import "time"
import "bytes"
import "encoding/binary" import "encoding/binary"
//import "github.com/go-logr/logr"
//import "golang.org/x/xerrors"
import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/errormsg"
import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
import "golang.org/x/crypto/sha3"
const miniblock_genesis_distance = 0 const miniblock_genesis_distance = 0
const miniblock_normal_distance = 2 const miniblock_normal_distance = 2
@ -39,23 +34,21 @@ const miniblock_normal_distance = 2
func (chain *Blockchain) Verify_MiniBlocks_HashCheck(cbl *block.Complete_Block) (err error) { func (chain *Blockchain) Verify_MiniBlocks_HashCheck(cbl *block.Complete_Block) (err error) {
last_mini_block := cbl.Bl.MiniBlocks[len(cbl.Bl.MiniBlocks)-1] last_mini_block := cbl.Bl.MiniBlocks[len(cbl.Bl.MiniBlocks)-1]
if last_mini_block.Genesis && len(cbl.Bl.MiniBlocks) == 1 { if !last_mini_block.Final {
return nil return fmt.Errorf("corrupted block")
} }
txshash := cbl.Bl.GetTXSHash() block_header_hash := sha3.Sum256(cbl.Bl.SerializeWithoutLastMiniBlock())
block_header_hash := cbl.Bl.GetHashWithoutMiniBlocks() for i := 0; i < 16; i++ {
if last_mini_block.KeyHash[i] != block_header_hash[i] {
for i := range last_mini_block.Check { return fmt.Errorf("MiniBlock has corrupted header expected %x actual %x", block_header_hash[:], last_mini_block.KeyHash[:])
if last_mini_block.Check[i] != txshash[i]^block_header_hash[i] {
return fmt.Errorf("MiniBlock has corrupted header.")
} }
} }
return nil return nil
} }
// verifies the consensus rules completely for miniblocks // verifies the consensus rules completely for miniblocks
func (chain *Blockchain) Verify_MiniBlocks(bl block.Block) (err error) { func Verify_MiniBlocks(bl block.Block) (err error) {
if bl.Height == 0 && len(bl.MiniBlocks) != 0 { if bl.Height == 0 && len(bl.MiniBlocks) != 0 {
err = fmt.Errorf("Genesis block cannot have miniblocks") err = fmt.Errorf("Genesis block cannot have miniblocks")
@ -71,139 +64,52 @@ func (chain *Blockchain) Verify_MiniBlocks(bl block.Block) (err error) {
return return
} }
final_count := 0
for _, mbl := range bl.MiniBlocks { for _, mbl := range bl.MiniBlocks {
if mbl.Timestamp > uint64(globals.Time().UTC().UnixMilli())+50 { // 50 ms passing allowed if mbl.Final { // 50 ms passing allowed
//block_logger.Error(fmt.Errorf("MiniBlock has invalid timestamp from future"), "rejecting","current time",globals.Time().UTC(),"miniblock_time", mbl.GetTimestamp(),"i",i) final_count++
return errormsg.ErrInvalidTimestamp
} }
} }
if final_count != 1 {
err = fmt.Errorf("No final miniblock")
return
}
// check whether the genesis blocks are all equal // check whether the genesis blocks are all equal
for _, mbl := range bl.MiniBlocks { for _, mbl := range bl.MiniBlocks {
if !mbl.IsSafe() {
return fmt.Errorf("MiniBlock is unsafe")
}
if mbl.Genesis { // make sure all genesis blocks point to all the actual tips
if bl.Height != binary.BigEndian.Uint64(mbl.Check[:]) { if bl.Height != mbl.Height {
return fmt.Errorf("MiniBlock has invalid height") return fmt.Errorf("MiniBlock has invalid height block height %d mbl height %d", bl.Height, mbl.Height)
}
if len(bl.Tips) != int(mbl.PastCount) {
return fmt.Errorf("MiniBlock has wrong number of tips")
}
if len(bl.Tips) == 0 {
panic("all miniblocks genesis must point to tip")
} else if len(bl.Tips) == 1 {
if binary.BigEndian.Uint32(bl.Tips[0][:]) != mbl.Past[0] {
return fmt.Errorf("MiniBlock has invalid tip")
} }
if len(bl.Tips) != int(mbl.PastCount) { } else if len(bl.Tips) == 2 {
return fmt.Errorf("MiniBlock has wrong number of tips") if binary.BigEndian.Uint32(bl.Tips[0][:]) != mbl.Past[0] {
return fmt.Errorf("MiniBlock has invalid tip")
} }
if len(bl.Tips) == 0 { if binary.BigEndian.Uint32(bl.Tips[1][:]) != mbl.Past[1] {
panic("all miniblocks genesis must point to tip") return fmt.Errorf("MiniBlock has invalid tip")
} else if len(bl.Tips) == 1 {
if !bytes.Equal(mbl.Check[8:8+12], bl.Tips[0][0:12]) {
return fmt.Errorf("MiniBlock has invalid tip")
}
} else if len(bl.Tips) == 2 {
if !(bytes.Equal(mbl.Check[8:8+12], bl.Tips[0][0:12]) || bytes.Equal(mbl.Check[8:8+12], bl.Tips[1][0:12])) {
return fmt.Errorf("MiniBlock has invalid tip")
}
if !(bytes.Equal(mbl.Check[8+12:], bl.Tips[1][0:12]) || bytes.Equal(mbl.Check[8+12:], bl.Tips[1][0:12])) {
return fmt.Errorf("MiniBlock has invalid second tip")
}
if bytes.Equal(mbl.Check[8:8+12], mbl.Check[8+12:]) {
return fmt.Errorf("MiniBlock refers to same tip twice")
}
} else {
panic("we only support 2 tips")
} }
} if mbl.Past[0] == mbl.Past[1] {
} return fmt.Errorf("MiniBlock refers to same tip twice")
// we should draw the dag and make sure each and every one is connected
{
tmp_collection := block.CreateMiniBlockCollection()
for _, tmbl := range bl.MiniBlocks {
if err, ok := tmp_collection.InsertMiniBlock(tmbl); !ok {
return err
}
}
tips := tmp_collection.GetAllTips() // we should only receive a single tip
if len(tips) != 1 {
return fmt.Errorf("MiniBlock consensus should have only 1 tip")
}
if tips[0].GetHash() != bl.MiniBlocks[len(bl.MiniBlocks)-1].GetHash() {
return fmt.Errorf("MiniBlock consensus last tip is placed wrong")
}
history := block.GetEntireMiniBlockHistory(tips[0])
if len(history) != len(bl.MiniBlocks) {
return fmt.Errorf("MiniBlock dag is not completely connected")
}
// check condition where tips cannot be referred to long into past
for _, mbl := range history {
if !mbl.Genesis {
if mbl.PastCount == 2 {
p1 := tmp_collection.Get(mbl.Past[0])
p2 := tmp_collection.Get(mbl.Past[1])
if p1.Distance == p2.Distance ||
(p1.Distance > p2.Distance && (p1.Distance-p2.Distance) <= miniblock_genesis_distance && p2.Genesis) || // this will limit forking
(p2.Distance > p1.Distance && (p2.Distance-p1.Distance) <= miniblock_genesis_distance && p1.Genesis) || // this will limit forking
(p1.Distance > p2.Distance && (p1.Distance-p2.Distance) <= miniblock_normal_distance) || // give some freeway to miners
(p2.Distance > p1.Distance && (p2.Distance-p1.Distance) <= miniblock_normal_distance) { // give some freeway to miners
} else {
return fmt.Errorf("MiniBlock dag is well formed, but tip referred is too long in distance")
}
}
} }
} else {
panic("we only support 2 tips")
} }
} }
return nil return nil
} }
// for the first time, we are allowing programmable consensus rules based on miniblocks
// this should be power of 2
// the below rule works as follows
// say we need blocktime of 15 sec
// so if we configure dynamism parameter to 7
// so some blocks will contain say 13 miniblocks, some will contain 20 miniblock
func (chain *Blockchain) Check_Dynamism(mbls []block.MiniBlock) (err error) {
if chain.simulator { // simulator does not need dynamism check for simplicity
return nil
}
dynamism := uint(7)
if dynamism&(dynamism+1) != 0 {
return fmt.Errorf("dynamic parameter must be a power of 2")
}
minimum_no_of_miniblocks := uint(config.BLOCK_TIME) - dynamism - 1
if uint(len(mbls)) < minimum_no_of_miniblocks {
return fmt.Errorf("more miniblocks required to complete a block required %d have %d", minimum_no_of_miniblocks, len(mbls))
}
last_mini_block := mbls[len(mbls)-1]
last_mini_block_hash := last_mini_block.GetHash()
if !last_mini_block.Odd {
return fmt.Errorf("only odd positioned miniblocks can complete a full block")
}
if uint(last_mini_block_hash[31])&dynamism != dynamism {
return fmt.Errorf("more miniblocks are required to complete a block.")
}
return nil
}
// insert a miniblock to chain and if successfull inserted, notify everyone in need // insert a miniblock to chain and if successfull inserted, notify everyone in need
func (chain *Blockchain) InsertMiniBlock(mbl block.MiniBlock) (err error, result bool) { func (chain *Blockchain) InsertMiniBlock(mbl block.MiniBlock) (err error, result bool) {
if !mbl.IsSafe() {
return fmt.Errorf("miniblock is unsafe"), false
}
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(true, miner_hash) { if !chain.IsAddressHashValid(true, miner_hash) {

View File

@ -23,6 +23,7 @@ import "path/filepath"
import "github.com/deroproject/derohe/globals" import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
import "github.com/deroproject/graviton" import "github.com/deroproject/graviton"
@ -136,11 +137,10 @@ func (chain *Blockchain) Calculate_Height_At_Tips(tips []crypto.Hash) int64 {
} else { // find the best height of past } else { // find the best height of past
for i := range tips { for i := range tips {
bl, err := chain.Load_BL_FROM_ID(tips[i]) past_height := chain.Load_Block_Height(tips[i])
if err != nil { if past_height < 0 {
panic(err) panic(fmt.Errorf("could not find height for blid %s", tips[i]))
} }
past_height := int64(bl.Height)
if height <= past_height { if height <= past_height {
height = past_height height = past_height
} }
@ -330,3 +330,27 @@ func (chain *Blockchain) Load_Merkle_Hash(version uint64) (hash crypto.Hash, err
} }
return hash, nil return hash, nil
} }
// loads a complete block from disk
func (chain *Blockchain) Load_Complete_Block(blid crypto.Hash) (cbl *block.Complete_Block, err error) {
cbl = &block.Complete_Block{}
cbl.Bl, err = chain.Load_BL_FROM_ID(blid)
if err != nil {
return
}
for _, txid := range cbl.Bl.Tx_hashes {
var tx_bytes []byte
if tx_bytes, err = chain.Store.Block_tx_store.ReadTX(txid); err != nil {
return
} else {
var tx transaction.Transaction
if err = tx.Deserialize(tx_bytes); err != nil {
return
}
cbl.Txs = append(cbl.Txs, &tx)
}
}
return
}

View File

@ -116,6 +116,7 @@ func (s *storefs) ReadBlockDifficulty(h [32]byte) (*big.Int, error) {
return nil, os.ErrNotExist return nil, os.ErrNotExist
} }
// this cannot be cached
func (chain *Blockchain) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) { func (chain *Blockchain) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) {
return chain.Store.Block_tx_store.ReadBlockSnapshotVersion(h) return chain.Store.Block_tx_store.ReadBlockSnapshotVersion(h)
} }

View File

@ -124,8 +124,8 @@ func (chain *Blockchain) process_miner_transaction(bl *block.Block, genesis bool
// since perfect division is not possible, ( see money handling) // since perfect division is not possible, ( see money handling)
// any left over change is delivered to main miner who integrated the full block // any left over change is delivered to main miner who integrated the full block
share := full_reward / uint64(len(bl.MiniBlocks)+1) // one block integrator, this is integer division share := full_reward / uint64(len(bl.MiniBlocks)) // one block integrator, this is integer division
leftover := full_reward - (share * uint64(len(bl.MiniBlocks)+1)) // only integrator will get this leftover := full_reward - (share * uint64(len(bl.MiniBlocks))) // only integrator will get this
{ // giver integrator his reward { // giver integrator his reward
balance_serialized, err := balance_tree.Get(tx.MinerAddress[:]) balance_serialized, err := balance_tree.Get(tx.MinerAddress[:])
@ -139,6 +139,9 @@ func (chain *Blockchain) process_miner_transaction(bl *block.Block, genesis bool
// all the other miniblocks will get their share // all the other miniblocks will get their share
for _, mbl := range bl.MiniBlocks { for _, mbl := range bl.MiniBlocks {
if mbl.Final {
continue
}
_, key_compressed, balance_serialized, err := balance_tree.GetKeyValueFromHash(mbl.KeyHash[:16]) _, key_compressed, balance_serialized, err := balance_tree.GetKeyValueFromHash(mbl.KeyHash[:16])
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -317,7 +317,7 @@ func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool
} }
if hash != tx.Payloads[0].Statement.Roothash { if hash != tx.Payloads[0].Statement.Roothash {
return fmt.Errorf("Tx statement roothash mismatch expected %x actual %x", tx.Payloads[0].Statement.Roothash, hash[:]) return fmt.Errorf("Tx statement roothash mismatch ref blid %x expected %x actual %x", tx.BLID, tx.Payloads[0].Statement.Roothash, hash[:])
} }
// we have found the balance tree with which it was built now lets verify // we have found the balance tree with which it was built now lets verify

View File

@ -518,10 +518,11 @@ func (rpc_client *Client) mineblock(tid int) {
runtime.LockOSThread() runtime.LockOSThread()
threadaffinity() threadaffinity()
iterations_per_loop := uint32(0xffffffff) i := uint32(0)
for { for {
mutex.RLock() mutex.RLock()
myjob := job myjob := job
mutex.RUnlock() mutex.RUnlock()
@ -547,13 +548,13 @@ func (rpc_client *Client) mineblock(tid int) {
continue continue
} }
if work[0]&0x10 > 0 { // odd miniblocks have twice the difficulty for {
diff.Mul(new(big.Int).Set(&diff), new(big.Int).SetUint64(2)) i++
}
for i := uint32(0); i < iterations_per_loop; i++ {
binary.BigEndian.PutUint32(nonce_buf, i) binary.BigEndian.PutUint32(nonce_buf, i)
//pow := astrobwt.POW_0alloc(work[:])
if i&0x3ff == 0x3ff { // get updated job every 250 millisecs
break
}
powhash := pow.Pow(work[:]) powhash := pow.Pow(work[:])
atomic.AddUint64(&counter, 1) atomic.AddUint64(&counter, 1)
@ -570,9 +571,7 @@ func (rpc_client *Client) mineblock(tid int) {
block_counter++ block_counter++
} }
logger.V(2).Info("submitting block", "result", result) logger.V(2).Info("submitting block", "result", result)
rpc_client.update_job() go rpc_client.update_job()
break
} else { } else {
logger.Error(err, "error submitting block") logger.Error(err, "error submitting block")
rpc_client.update_job() rpc_client.update_job()

View File

@ -29,7 +29,6 @@ import "runtime"
import "runtime/debug" import "runtime/debug"
import "math/big" import "math/big"
import "os/signal" import "os/signal"
import "io/ioutil"
//import "crypto/sha1" //import "crypto/sha1"
import "encoding/hex" import "encoding/hex"
@ -107,6 +106,7 @@ func dump(filename string) {
} }
func main() { func main() {
runtime.MemProfileRate = 0
var err error var err error
globals.Arguments, err = docopt.Parse(command_line, nil, true, config.Version.String(), false) globals.Arguments, err = docopt.Parse(command_line, nil, true, config.Version.String(), false)
@ -216,7 +216,7 @@ func main() {
p2p.Broadcast_Block(cbl, peerid) p2p.Broadcast_Block(cbl, peerid)
} }
chain.P2P_MiniBlock_Relayer = func(mbl []block.MiniBlock, peerid uint64) { chain.P2P_MiniBlock_Relayer = func(mbl block.MiniBlock, peerid uint64) {
p2p.Broadcast_MiniBlock(mbl, peerid) p2p.Broadcast_MiniBlock(mbl, peerid)
} }
@ -348,7 +348,7 @@ func readline_loop(l *readline.Instance, chain *blockchain.Blockchain, logger lo
}() }()
restart_loop: //restart_loop:
for { for {
line, err := l.Readline() line, err := l.Readline()
if err == io.EOF { if err == io.EOF {
@ -701,42 +701,6 @@ restart_loop:
case strings.ToLower(line) == "quit": case strings.ToLower(line) == "quit":
close(Exit_In_Progress) close(Exit_In_Progress)
return nil return nil
case command == "graphminifull": // renders the graph of miniblocks in memory
ioutil.WriteFile("/tmp/minidag_recent.dot", []byte(chain.MiniBlocks.Graph()), 0644)
logger.Info("Writing mini block graph (from memory) dot format /tmp/minidag_recent.dot\n")
case command == "graphmini": // renders graphs of miniblocks within a block
topo := int64(0)
if len(line_parts) != 2 {
logger.Error(fmt.Errorf("This function requires single parameter a topoheight"), "")
continue
}
if s, err := strconv.ParseInt(line_parts[1], 10, 64); err == nil {
topo = s
} else {
logger.Error(err, "Invalid topo height value", "value", line_parts[1])
continue
}
if hash, err := chain.Load_Block_Topological_order_at_index(topo); err == nil {
if bl, err := chain.Load_BL_FROM_ID(hash); err == nil {
tmp_collection := block.CreateMiniBlockCollection()
for _, tmbl := range bl.MiniBlocks {
if err, ok := tmp_collection.InsertMiniBlock(tmbl); !ok {
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err)
break restart_loop
}
}
ioutil.WriteFile(fmt.Sprintf("/tmp/minidag_%d.dot", topo), []byte(tmp_collection.Graph()), 0644)
logger.Info("Writing mini block graph dot format /tmp/minidag.dot", "topo", topo)
}
}
if err != nil {
fmt.Printf("cannot render graph at topo %d due to error %s\n", topo, err)
}
case command == "graph": case command == "graph":
start := int64(0) start := int64(0)

View File

@ -181,7 +181,7 @@ func Test_Blockchain_Deviation(t *testing.T) {
t.Fatalf("miniblock count not increased.") t.Fatalf("miniblock count not increased.")
} }
if tips := chain.MiniBlocks.GetAllTipsAtHeight(int64(cbl_next.Bl.Height)); len(tips) != 1 { if tips := chain.MiniBlocks.GetAllKeys(int64(cbl_next.Bl.Height)); len(tips) != 1 {
t.Fatalf("Tip count Expected %d Actuak %d", 1, len(tips)) t.Fatalf("Tip count Expected %d Actuak %d", 1, len(tips))
} }

View File

@ -91,6 +91,31 @@ var rpcport = "127.0.0.1:20000"
const wallet_ports_start = 30000 // all wallets will rpc activated on ports const wallet_ports_start = 30000 // all wallets will rpc activated on ports
// this is a crude function used during tests
func Mine_block_single(chain *blockchain.Blockchain, miner_address rpc.Address) error {
var blid crypto.Hash
//if !chain.simulator{
// return fmt.Errorf("this function can only run in simulator mode")
//}
for i := uint64(0); i < config.BLOCK_TIME; i++ {
bl, mbl, _, _, err := chain.Create_new_block_template_mining(miner_address)
if err != nil {
logger.Error(err, "err while request block template")
return err
}
if _, blid, _, err = chain.Accept_new_block(bl.Timestamp, mbl.Serialize()); blid.IsZero() || err != nil {
if err != nil {
logger.Error(err, "err while accepting block template")
}
}
}
return nil
}
func main() { func main() {
var err error var err error
@ -172,7 +197,7 @@ func main() {
rpcserver, _ := derodrpc.RPCServer_Start(params) rpcserver, _ := derodrpc.RPCServer_Start(params)
register_wallets(chain) // setup 22 wallets register_wallets(chain) // setup 22 wallets
mine_block_single(chain, genesis_wallet.GetAddress()) //mine single block to confirm all 22 registrations Mine_block_single(chain, genesis_wallet.GetAddress()) //mine single block to confirm all 22 registrations
go walletapi.Keep_Connectivity() // all wallets maintain connectivity go walletapi.Keep_Connectivity() // all wallets maintain connectivity
@ -526,7 +551,6 @@ exit:
func mine_block_auto(chain *blockchain.Blockchain, miner_address rpc.Address) { func mine_block_auto(chain *blockchain.Blockchain, miner_address rpc.Address) {
last_block_time := time.Now() last_block_time := time.Now()
for { for {
bl, _, _, _, err := chain.Create_new_block_template_mining(miner_address) bl, _, _, _, err := chain.Create_new_block_template_mining(miner_address)
if err != nil { if err != nil {
logger.Error(err, "error while building mining block") logger.Error(err, "error while building mining block")
@ -535,7 +559,7 @@ func mine_block_auto(chain *blockchain.Blockchain, miner_address rpc.Address) {
if time.Now().Sub(last_block_time) > time.Duration(config.BLOCK_TIME)*time.Second || // every X secs generate a block if time.Now().Sub(last_block_time) > time.Duration(config.BLOCK_TIME)*time.Second || // every X secs generate a block
len(bl.Tx_hashes) >= 1 { //pools have a tx, try to mine them ASAP len(bl.Tx_hashes) >= 1 { //pools have a tx, try to mine them ASAP
if err := mine_block_single(chain, miner_address); err != nil { if err := Mine_block_single(chain, miner_address); err != nil {
time.Sleep(time.Second) time.Sleep(time.Second)
continue continue
} }
@ -543,28 +567,10 @@ func mine_block_auto(chain *blockchain.Blockchain, miner_address rpc.Address) {
last_block_time = time.Now() last_block_time = time.Now()
} }
time.Sleep(50 * time.Millisecond) time.Sleep(900 * time.Millisecond)
} }
} }
func mine_block_single(chain *blockchain.Blockchain, miner_address rpc.Address) (err error) {
var blid crypto.Hash
bl, mbl, _, _, err := chain.Create_new_block_template_mining(miner_address)
if err != nil {
logger.Error(err, "err while request block template")
return
}
if _, blid, _, err = chain.Accept_new_block(bl.Timestamp, mbl.Serialize()); blid.IsZero() || err != nil {
if err != nil {
logger.Error(err, "err while accepting block template")
}
//fmt.Printf("error adding miniblock, err %s\n",err)
return
}
return err
}
func prettyprint_json(b []byte) []byte { func prettyprint_json(b []byte) []byte {
var out bytes.Buffer var out bytes.Buffer
err := json.Indent(&out, b, "", " ") err := json.Indent(&out, b, "", " ")

View File

@ -103,7 +103,7 @@ var Mainnet = CHAIN_CONFIG{Name: "mainnet",
} }
var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0 var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0
Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x71, 0x00, 0x00, 0x00}), Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x72, 0x00, 0x00, 0x00}),
P2P_Default_Port: 40401, P2P_Default_Port: 40401,
RPC_Default_Port: 40402, RPC_Default_Port: 40402,
Wallet_RPC_Default_Port: 40403, Wallet_RPC_Default_Port: 40403,

View File

@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
// right now it has to be manually changed // right now it has to be manually changed
// do we need to include git commitsha?? // do we need to include git commitsha??
var Version = semver.MustParse("3.4.89-1.DEROHE.STARGATE+22112021") var Version = semver.MustParse("3.4.91-1.DEROHE.STARGATE+25112021")

View File

@ -20,7 +20,6 @@ import "fmt"
//import "net" //import "net"
import "time" import "time"
import "context"
import "math/big" import "math/big"
import "math/bits" import "math/bits"
import "sync/atomic" import "sync/atomic"
@ -58,8 +57,6 @@ func (connection *Connection) bootstrap_chain() {
return return
} }
var TimeLimit = 10 * time.Second
// we will request top 60 blocks // we will request top 60 blocks
ctopo := connection.TopoHeight - 50 // last 50 blocks have to be synced, this syncing will help us detect error ctopo := connection.TopoHeight - 50 // last 50 blocks have to be synced, this syncing will help us detect error
var topos []int64 var topos []int64
@ -74,9 +71,7 @@ func (connection *Connection) bootstrap_chain() {
} }
fill_common(&request.Common) // fill common info fill_common(&request.Common) // fill common info
if err := connection.Client.Call("Peer.ChangeSet", request, &response); err != nil {
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
if err := connection.Client.CallWithContext(ctx, "Peer.ChangeSet", request, &response); err != nil {
connection.logger.V(1).Error(err, "Call failed ChangeSet") connection.logger.V(1).Error(err, "Call failed ChangeSet")
return return
} }
@ -110,8 +105,7 @@ func (connection *Connection) bootstrap_chain() {
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.BALANCE_TREE), Section: section[:], SectionLength: uint64(path_length)} ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.BALANCE_TREE), Section: section[:], SectionLength: uint64(path_length)}
var ts_response Response_Tree_Section_Struct var ts_response Response_Tree_Section_Struct
fill_common(&ts_response.Common) fill_common(&ts_response.Common)
ctx, _ := context.WithTimeout(context.Background(), TimeLimit) if err := connection.Client.Call("Peer.TreeSection", ts_request, &ts_response); err != nil {
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", ts_request, &ts_response); err != nil {
connection.logger.V(1).Error(err, "Call failed TreeSection") connection.logger.V(1).Error(err, "Call failed TreeSection")
return return
} else { } else {
@ -175,8 +169,7 @@ func (connection *Connection) bootstrap_chain() {
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.SC_META), Section: section[:], SectionLength: uint64(path_length)} ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.SC_META), Section: section[:], SectionLength: uint64(path_length)}
var ts_response Response_Tree_Section_Struct var ts_response Response_Tree_Section_Struct
fill_common(&ts_response.Common) fill_common(&ts_response.Common)
ctx, _ = context.WithTimeout(context.Background(), TimeLimit) if err := connection.Client.Call("Peer.TreeSection", ts_request, &ts_response); err != nil {
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", ts_request, &ts_response); err != nil {
connection.logger.V(1).Error(err, "Call failed TreeSection") connection.logger.V(1).Error(err, "Call failed TreeSection")
return return
} else { } else {
@ -206,8 +199,7 @@ func (connection *Connection) bootstrap_chain() {
sc_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: ts_response.Keys[j], Section: section[:], SectionLength: uint64(0)} sc_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: ts_response.Keys[j], Section: section[:], SectionLength: uint64(0)}
var sc_response Response_Tree_Section_Struct var sc_response Response_Tree_Section_Struct
fill_common(&sc_response.Common) fill_common(&sc_response.Common)
ctx, _ = context.WithTimeout(context.Background(), TimeLimit) if err := connection.Client.Call("Peer.TreeSection", sc_request, &sc_response); err != nil {
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", sc_request, &sc_response); err != nil {
connection.logger.V(1).Error(err, "Call failed TreeSection") connection.logger.V(1).Error(err, "Call failed TreeSection")
return return
} else { } else {

View File

@ -18,14 +18,94 @@ package p2p
import "fmt" import "fmt"
import "time" import "time"
import "context" import "math/big"
import "sync/atomic" import "sync/atomic"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/globals" import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
import "github.com/deroproject/derohe/errormsg" import "github.com/deroproject/derohe/errormsg"
import "github.com/deroproject/derohe/blockchain"
import "github.com/deroproject/derohe/transaction" import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/cryptography/crypto"
// used to satisfy difficulty interface
type MemorySource struct {
Blocks map[crypto.Hash]*block.Complete_Block
Difficulty map[crypto.Hash]*big.Int
}
// local blocks are inserted using this
func (x *MemorySource) insert_block_noprecheck(cbl *block.Complete_Block, diff *big.Int) {
blid := cbl.Bl.GetHash()
x.Blocks[blid] = cbl
x.Difficulty[blid] = diff
}
// remote peers blocks are added using this
func (x *MemorySource) insert_block(cbl *block.Complete_Block) {
if len(cbl.Bl.Tips) == 0 {
panic("genesis block not possible")
}
diff := blockchain.Get_Difficulty_At_Tips(x, cbl.Bl.Tips)
blid := cbl.Bl.GetHash()
for _, mbl := range cbl.Bl.MiniBlocks {
PoW := mbl.GetPoWHash()
if blockchain.CheckPowHashBig(PoW, diff) == false {
panic("pow failed")
}
}
x.Blocks[blid] = cbl
x.Difficulty[blid] = diff
}
func (x *MemorySource) check(blid crypto.Hash) *block.Complete_Block {
cbl, ok := x.Blocks[blid]
if !ok {
panic(fmt.Errorf("no such blid in ram %s", blid))
}
return cbl
}
func (x *MemorySource) Load_Block_Height(blid crypto.Hash) int64 {
return int64(x.check(blid).Bl.Height)
}
func (x *MemorySource) Load_Block_Timestamp(blid crypto.Hash) uint64 {
return uint64(x.check(blid).Bl.Timestamp)
}
func (x *MemorySource) Get_Block_Past(blid crypto.Hash) []crypto.Hash {
return x.check(blid).Bl.Tips
}
func (x *MemorySource) Load_Block_Difficulty(blid crypto.Hash) *big.Int {
diff, ok := x.Difficulty[blid]
if !ok {
panic(fmt.Errorf("no such blid in ram %s", blid))
}
return diff
}
// convert block which was serialized in p2p format to in ram block format
func ConvertCBlock_To_CompleteBlock(cblock Complete_Block) (cbl block.Complete_Block, diff *big.Int) {
var bl block.Block
cbl.Bl = &bl
if err := bl.Deserialize(cblock.Block); err != nil {
panic(err)
}
// complete the txs
for j := range cblock.Txs {
var tx transaction.Transaction
if err := tx.Deserialize(cblock.Txs[j]); err != nil { // we have a tx which could not be deserialized ban peer
panic(err)
}
cbl.Txs = append(cbl.Txs, &tx)
}
if len(bl.Tx_hashes) != len(cbl.Txs) {
panic(fmt.Errorf("txcount mismatch, expected %d txs actual %d", len(bl.Tx_hashes), len(cbl.Txs)))
}
return
}
// we are expecting other side to have a heavier PoW chain, try to sync now // we are expecting other side to have a heavier PoW chain, try to sync now
func (connection *Connection) sync_chain() { func (connection *Connection) sync_chain() {
@ -69,9 +149,7 @@ try_again:
request.TopoHeights = append(request.TopoHeights, 0) request.TopoHeights = append(request.TopoHeights, 0)
fill_common(&request.Common) // fill common info fill_common(&request.Common) // fill common info
var TimeLimit = 10 * time.Second if err := connection.Client.Call("Peer.Chain", request, &response); err != nil {
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
if err := connection.Client.CallWithContext(ctx, "Peer.Chain", request, &response); err != nil {
connection.logger.V(2).Error(err, "Call failed Chain") connection.logger.V(2).Error(err, "Call failed Chain")
return return
} }
@ -79,6 +157,8 @@ try_again:
connection.logger.V(2).Info("Peer wants to give chain", "from topoheight", response.Start_height) connection.logger.V(2).Info("Peer wants to give chain", "from topoheight", response.Start_height)
var pop_count int64
// we do not need reorganisation if deviation is less than or equak to 7 blocks // we do not need reorganisation if deviation is less than or equak to 7 blocks
// only pop blocks if the system has somehow deviated more than 7 blocks // only pop blocks if the system has somehow deviated more than 7 blocks
// if the deviation is less than 7 blocks, we internally reorganise everything // if the deviation is less than 7 blocks, we internally reorganise everything
@ -92,13 +172,78 @@ try_again:
goto try_again goto try_again
} else if chain.Get_Height()-response.Common.Height != 0 && chain.Get_Height()-response.Start_height <= config.STABLE_LIMIT { } else if chain.Get_Height()-response.Common.Height != 0 && chain.Get_Height()-response.Start_height <= config.STABLE_LIMIT {
//pop_count := chain.Load_TOPO_HEIGHT() - response.Start_topoheight pop_count = chain.Load_TOPO_HEIGHT() - response.Start_topoheight
//chain.Rewind_Chain(int(pop_count)) // pop as many blocks as necessary, assumming peer has given us good chain
} else if chain.Get_Height() < connection.Height && chain.Get_Height()-response.Start_height > config.STABLE_LIMIT { // we must somehow notify that deviation is way too much and manual interaction is necessary, so as any bug for chain deviationmay be detected } else if chain.Get_Height() < connection.Height && chain.Get_Height()-response.Start_height > config.STABLE_LIMIT { // we must somehow notify that deviation is way too much and manual interaction is necessary, so as any bug for chain deviationmay be detected
connection.logger.V(1).Error(nil, "we have or others have deviated too much.you may have to use --sync-node option", "our topoheight", chain.Load_TOPO_HEIGHT(), "peer topoheight start", response.Start_topoheight) connection.logger.V(1).Error(nil, "we have or others have deviated too much.you may have to use --sync-node option", "our topoheight", chain.Load_TOPO_HEIGHT(), "peer topoheight start", response.Start_topoheight)
return return
} }
if pop_count >= 1 { // peer is claiming his chain is good and we should rewind
connection.logger.V(1).Info("syncing", "pop_count", pop_count)
ramstore := MemorySource{Blocks: map[crypto.Hash]*block.Complete_Block{}, Difficulty: map[crypto.Hash]*big.Int{}}
start_point := response.Start_topoheight
for i := 0; i < 10 && start_point >= 1; i++ {
start_point--
}
for i := start_point; i < start_point+128 && i <= chain.Load_TOPO_HEIGHT(); i++ {
blid, err := chain.Load_Block_Topological_order_at_index(i)
if err != nil {
connection.logger.V(1).Info("error loading local topo", "i", i)
return
}
cbl, err := chain.Load_Complete_Block(blid)
if err != nil {
connection.logger.V(1).Info("error loading completeblock", "blid", blid)
return
}
diff := chain.Load_Block_Difficulty(blid)
ramstore.insert_block_noprecheck(cbl, diff) // all local blocks are already checked
}
// now we must request peer blocks and verify their pow
for i := range response.Block_list {
our_topo_order := chain.Load_Block_Topological_order(response.Block_list[i])
if our_topo_order != (int64(i)+response.Start_topoheight) || our_topo_order == -1 { // if block is not in our chain, add it to request list
var orequest ObjectList
var oresponse Objects
//fmt.Printf("inserting blocks %d\n", (int64(i) + response.Start_topoheight))
orequest.Block_list = append(orequest.Block_list, response.Block_list[i])
fill_common(&orequest.Common)
if err := connection.Client.Call("Peer.GetObject", orequest, &oresponse); err != nil {
connection.logger.V(2).Error(err, "Call failed GetObject")
return
} else { // process the response
cbl, _ := ConvertCBlock_To_CompleteBlock(oresponse.CBlocks[0])
ramstore.insert_block(&cbl) // insert block with checking verification
}
}
}
// if we reached here we were able to verify basic chain structure
chain.Rewind_Chain(int(pop_count)) // pop as many blocks as necessary, assumming peer has given us good chain
failcount := 0
for i := range response.Block_list {
our_topo_order := chain.Load_Block_Topological_order(response.Block_list[i])
if our_topo_order != (int64(i)+response.Start_topoheight) || our_topo_order == -1 { // if block is not in our chain, add it to request list
if failcount < 4 {
if _, ok := chain.Add_Complete_Block(ramstore.check(response.Block_list[i])); !ok {
failcount++
}
}
}
}
connection.logger.V(1).Info("rewinded blocks", "pop_count", pop_count)
return
}
// response only 128 blocks at a time // response only 128 blocks at a time
max_blocks_to_queue := 128 max_blocks_to_queue := 128
// check whether the objects are in our db or not // check whether the objects are in our db or not
@ -117,8 +262,7 @@ try_again:
orequest.Block_list = append(orequest.Block_list, response.Block_list[i]) orequest.Block_list = append(orequest.Block_list, response.Block_list[i])
fill_common(&orequest.Common) fill_common(&orequest.Common)
ctx, _ := context.WithTimeout(context.Background(), TimeLimit) if err := connection.Client.Call("Peer.GetObject", orequest, &oresponse); err != nil {
if err := connection.Client.CallWithContext(ctx, "Peer.GetObject", orequest, &oresponse); err != nil {
connection.logger.V(2).Error(err, "Call failed GetObject") connection.logger.V(2).Error(err, "Call failed GetObject")
return return
} else { // process the response } else { // process the response

View File

@ -511,17 +511,15 @@ func broadcast_Chunk(chunk *Block_Chunk, PeerID uint64, first_seen int64) { // i
// we can only broadcast a block which is in our db // we can only broadcast a block which is in our db
// this function is trigger from 2 points, one when we receive a unknown block which can be successfully added to chain // this function is trigger from 2 points, one when we receive a unknown block which can be successfully added to chain
// second from the blockchain which has to relay locally mined blocks as soon as possible // second from the blockchain which has to relay locally mined blocks as soon as possible
func Broadcast_MiniBlock(mbls []block.MiniBlock, PeerID uint64) { // if peerid is provided it is skipped func Broadcast_MiniBlock(mbl block.MiniBlock, PeerID uint64) { // if peerid is provided it is skipped
broadcast_MiniBlock(mbls, PeerID, globals.Time().UTC().UnixMicro()) broadcast_MiniBlock(mbl, PeerID, globals.Time().UTC().UnixMicro())
} }
func broadcast_MiniBlock(mbls []block.MiniBlock, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped func broadcast_MiniBlock(mbl block.MiniBlock, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped
defer globals.Recover(3) defer globals.Recover(3)
var peer_specific_block Objects var peer_specific_block Objects
for _, mbl := range mbls { peer_specific_block.MiniBlocks = append(peer_specific_block.MiniBlocks, mbl.Serialize())
peer_specific_block.MiniBlocks = append(peer_specific_block.MiniBlocks, mbl.Serialize())
}
fill_common(&peer_specific_block.Common) // fill common info fill_common(&peer_specific_block.Common) // fill common info
peer_specific_block.Sent = first_seen peer_specific_block.Sent = first_seen

View File

@ -138,7 +138,7 @@ func P2P_Init(params map[string]interface{}) error {
go P2P_Server_v2() // start accepting connections go P2P_Server_v2() // start accepting connections
go P2P_engine() // start outgoing engine go P2P_engine() // start outgoing engine
globals.Cron.AddFunc("@every 2s", syncroniser) // start sync engine globals.Cron.AddFunc("@every 4s", syncroniser) // start sync engine
globals.Cron.AddFunc("@every 5s", Connection_Pending_Clear) // clean dead connections globals.Cron.AddFunc("@every 5s", Connection_Pending_Clear) // clean dead connections
globals.Cron.AddFunc("@every 10s", ping_loop) // ping every one globals.Cron.AddFunc("@every 10s", ping_loop) // ping every one
globals.Cron.AddFunc("@every 10s", chunks_clean_up) // clean chunks globals.Cron.AddFunc("@every 10s", chunks_clean_up) // clean chunks
@ -554,7 +554,7 @@ func P2P_Server_v2() {
func handle_connection_panic(c *Connection) { func handle_connection_panic(c *Connection) {
defer globals.Recover(2) defer globals.Recover(2)
if r := recover(); r != nil { if r := recover(); r != nil {
logger.V(2).Error(nil, "Recovered while handling connection", "r", r, "stack", debug.Stack()) logger.V(2).Error(nil, "Recovered while handling connection", "r", r, "stack", string(debug.Stack()))
c.exit() c.exit()
} }
} }

View File

@ -58,13 +58,13 @@ func (connection *Connection) dispatch_test_handshake() {
ctx, _ := context.WithTimeout(context.Background(), 4*time.Second) ctx, _ := context.WithTimeout(context.Background(), 4*time.Second)
if err := connection.Client.CallWithContext(ctx, "Peer.Handshake", request, &response); err != nil { if err := connection.Client.CallWithContext(ctx, "Peer.Handshake", request, &response); err != nil {
connection.logger.V(2).Error(err, "cannot handshake") connection.logger.V(4).Error(err, "cannot handshake")
connection.exit() connection.exit()
return return
} }
if !Verify_Handshake(&response) { // if not same network boot off if !Verify_Handshake(&response) { // if not same network boot off
connection.logger.V(2).Info("terminating connection network id mismatch ", "networkid", response.Network_ID) connection.logger.V(3).Info("terminating connection network id mismatch ", "networkid", response.Network_ID)
connection.exit() connection.exit()
return return
} }

View File

@ -112,7 +112,6 @@ func (c *Connection) NotifyINV(request ObjectList, response *Dummy) (err error)
// only miniblocks carry extra info, which leads to better time tracking // only miniblocks carry extra info, which leads to better time tracking
func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err error) { func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err error) {
defer handle_connection_panic(c) defer handle_connection_panic(c)
if len(request.MiniBlocks) >= 5 { if len(request.MiniBlocks) >= 5 {
err = fmt.Errorf("Notify Block can notify max 5 miniblocks") err = fmt.Errorf("Notify Block can notify max 5 miniblocks")
c.logger.V(3).Error(err, "Should be banned") c.logger.V(3).Error(err, "Should be banned")
@ -136,8 +135,9 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
for _, mbl := range mbls { for _, mbl := range mbls {
var ok bool var ok bool
if mbl.Timestamp > uint64(globals.Time().UTC().UnixMilli())+50 { // 50 ms passing allowed
return errormsg.ErrInvalidTimestamp if mbl.Final {
return fmt.Errorf("final blocks are not propagted")
} }
// track miniblock propagation // track miniblock propagation
@ -151,39 +151,22 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
continue // miniblock already in chain, so skip it continue // miniblock already in chain, so skip it
} }
// first check whether the incoming minblock can be added to sub chains
if !chain.MiniBlocks.IsConnected(mbl) {
c.logger.V(3).Error(err, "Disconnected miniblock", "mbl", mbl.String())
//return fmt.Errorf("Disconnected miniblock")
continue
}
var miner_hash crypto.Hash var miner_hash crypto.Hash
copy(miner_hash[:], mbl.KeyHash[:]) copy(miner_hash[:], mbl.KeyHash[:])
if !chain.IsAddressHashValid(false, miner_hash) { // this will use cache if !chain.IsAddressHashValid(false, miner_hash) { // this will use cache
c.logger.V(3).Error(err, "unregistered miner")
return fmt.Errorf("unregistered miner") return fmt.Errorf("unregistered miner")
} }
// check whether the genesis blocks are all equal
genesis_list := chain.MiniBlocks.GetGenesisFromMiniBlock(mbl)
var bl block.Block var bl block.Block
if len(genesis_list) >= 1 {
bl.Height = binary.BigEndian.Uint64(genesis_list[0].Check[:])
var tip1, tip2 crypto.Hash bl.Height = mbl.Height
copy(tip1[:], genesis_list[0].Check[8:8+12]) var tip1, tip2 crypto.Hash
bl.Tips = append(bl.Tips, tip1) binary.BigEndian.PutUint32(tip1[:], mbl.Past[0])
bl.Tips = append(bl.Tips, tip1)
if genesis_list[0].PastCount == 2 { if mbl.PastCount == 2 {
copy(tip2[:], genesis_list[0].Check[8+12:]) binary.BigEndian.PutUint32(tip2[:], mbl.Past[1])
bl.Tips = append(bl.Tips, tip2) bl.Tips = append(bl.Tips, tip2)
}
} else {
c.logger.V(3).Error(nil, "no genesis, we cannot do anything")
continue
} }
for i, tip := range bl.Tips { // tips are currently only partial, lets expand tips for i, tip := range bl.Tips { // tips are currently only partial, lets expand tips
@ -194,15 +177,7 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
} }
} }
if bl.Height >= 2 && !chain.CheckDagStructure(bl.Tips) { bl.MiniBlocks = append(bl.MiniBlocks, mbl)
return fmt.Errorf("Invalid DAG structure")
}
bl.MiniBlocks = append(bl.MiniBlocks, chain.MiniBlocks.GetEntireMiniBlockHistory(mbl)...)
if err = chain.Verify_MiniBlocks(bl); err != nil { // whether the structure is okay
return err
}
// lets get the difficulty at tips // lets get the difficulty at tips
if !chain.VerifyMiniblockPoW(&bl, mbl) { if !chain.VerifyMiniblockPoW(&bl, mbl) {
@ -213,12 +188,13 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
return err return err
} else { // rebroadcast miniblock } else { // rebroadcast miniblock
valid_found = true valid_found = true
if valid_found {
Peer_SetSuccess(c.Addr.String())
broadcast_MiniBlock(mbl, c.Peer_ID, request.Sent) // do not send back to the original peer
}
} }
} }
if valid_found {
Peer_SetSuccess(c.Addr.String())
broadcast_MiniBlock(mbls, c.Peer_ID, request.Sent) // do not send back to the original peer
}
fill_common(&response.Common) // fill common info fill_common(&response.Common) // fill common info
fill_common_T0T1T2(&request.Common, &response.Common) // fill time related information fill_common_T0T1T2(&request.Common, &response.Common) // fill time related information
return nil return nil

View File

@ -212,7 +212,7 @@ func Test_Creation_TX(t *testing.T) {
time.Sleep(time.Second) time.Sleep(time.Second)
if err = wsrc.Sync_Wallet_Memory_With_Daemon(); err != nil { if err = wsrc.Sync_Wallet_Memory_With_Daemon(); err != nil {
t.Fatalf("wallet sync error err %s", err) t.Fatalf("wallet sync error err %s chain height %d", err, chain.Get_Height())
} }
// here we are collecting proofs for later on bennhcmarking // here we are collecting proofs for later on bennhcmarking