Release73: Mining Improvements 2
This commit is contained in:
parent
b2fe65c7a1
commit
607af6dfdc
@ -572,7 +572,7 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
|
|||||||
result = true // block's pow is valid
|
result = true // block's pow is valid
|
||||||
|
|
||||||
if !chain.simulator { // if not in simulator mode, relay block to the chain
|
if !chain.simulator { // if not in simulator mode, relay block to the chain
|
||||||
chain.P2P_Block_Relayer(cbl, 0) // lets relay the block to network
|
go chain.P2P_Block_Relayer(cbl, 0) // lets relay the block to network
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logger.V(3).Error(err, "Block Rejected", "blid", bl.GetHash())
|
logger.V(3).Error(err, "Block Rejected", "blid", bl.GetHash())
|
||||||
|
@ -399,7 +399,7 @@ func (chain *Blockchain) process_transaction_sc(cache map[crypto.Hash]*graviton.
|
|||||||
|
|
||||||
if signer, err1 := Extract_signer(&tx); err1 == nil { // if we can identify sender, return funds to him
|
if signer, err1 := Extract_signer(&tx); err1 == nil { // if we can identify sender, return funds to him
|
||||||
dvm.ErrorRevert(ss, cache, balance_tree, signer, scid, incoming_value)
|
dvm.ErrorRevert(ss, cache, balance_tree, signer, scid, incoming_value)
|
||||||
} else { // we could not extract signer, give burned funds to SC
|
} else { // we could not extract signer, we burn all the funds
|
||||||
dvm.ErrorRevert(ss, cache, balance_tree, signer, scid, incoming_value)
|
dvm.ErrorRevert(ss, cache, balance_tree, signer, scid, incoming_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,6 +67,7 @@ var our_height int64
|
|||||||
|
|
||||||
var block_counter uint64
|
var block_counter uint64
|
||||||
var mini_block_counter uint64
|
var mini_block_counter uint64
|
||||||
|
var rejected uint64
|
||||||
var logger logr.Logger
|
var logger logr.Logger
|
||||||
|
|
||||||
var command_line string = `dero-miner
|
var command_line string = `dero-miner
|
||||||
@ -282,7 +283,7 @@ func main() {
|
|||||||
testnet_string = "\033[31m TESTNET"
|
testnet_string = "\033[31m TESTNET"
|
||||||
}
|
}
|
||||||
|
|
||||||
l.SetPrompt(fmt.Sprintf("\033[1m\033[32mDERO Miner: \033[0m"+color+"Height %d "+pcolor+" BLOCKS %d MiniBlocks %d \033[32mNW %s %s>%s>>\033[0m ", our_height, block_counter, mini_block_counter, hash_rate_string, mining_string, testnet_string))
|
l.SetPrompt(fmt.Sprintf("\033[1m\033[32mDERO Miner: \033[0m"+color+"Height %d "+pcolor+" BLOCKS %d MiniBlocks %d Rejected %d \033[32mNW %s %s>%s>>\033[0m ", our_height, block_counter, mini_block_counter, rejected, hash_rate_string, mining_string, testnet_string))
|
||||||
l.Refresh()
|
l.Refresh()
|
||||||
last_our_height = our_height
|
last_our_height = our_height
|
||||||
last_best_height = best_height
|
last_best_height = best_height
|
||||||
@ -432,7 +433,10 @@ func getwork(wallet_address string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
block_counter = job.Blocks
|
block_counter = job.Blocks
|
||||||
mini_block_counter = job.MiniBlocks
|
mini_block_counter = job.MiniBlocks // note if the miner submits the job late, though his counter
|
||||||
|
// will increase, but a block has been already found, so
|
||||||
|
// orphan miniblocks may be there ( means they will not br rewarded)
|
||||||
|
rejected = job.Rejected
|
||||||
hash_rate = job.Difficultyuint64
|
hash_rate = job.Difficultyuint64
|
||||||
our_height = int64(job.Height)
|
our_height = int64(job.Height)
|
||||||
Difficulty = job.Difficultyuint64
|
Difficulty = job.Difficultyuint64
|
||||||
@ -447,6 +451,10 @@ func mineblock(tid int) {
|
|||||||
var diff big.Int
|
var diff big.Int
|
||||||
var work [block.MINIBLOCK_SIZE]byte
|
var work [block.MINIBLOCK_SIZE]byte
|
||||||
|
|
||||||
|
var random_buf [12]byte
|
||||||
|
|
||||||
|
rand.Read(random_buf[:])
|
||||||
|
|
||||||
scratch := astrobwt_fast.Pool.Get().(*astrobwt_fast.ScratchData)
|
scratch := astrobwt_fast.Pool.Get().(*astrobwt_fast.ScratchData)
|
||||||
|
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
@ -467,11 +475,12 @@ func mineblock(tid int) {
|
|||||||
|
|
||||||
n, err := hex.Decode(work[:], []byte(myjob.Blockhashing_blob))
|
n, err := hex.Decode(work[:], []byte(myjob.Blockhashing_blob))
|
||||||
if err != nil || n != block.MINIBLOCK_SIZE {
|
if err != nil || n != block.MINIBLOCK_SIZE {
|
||||||
logger.Error(err, "Blockwork could not decoded successfully", "blockwork", myjob.Blockhashing_blob, "n", n, "job", myjob)
|
logger.Error(err, "Blockwork could not be decoded successfully", "blockwork", myjob.Blockhashing_blob, "n", n, "job", myjob)
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
copy(work[block.MINIBLOCK_SIZE-12:], random_buf[:]) // add more randomization in the mix
|
||||||
work[block.MINIBLOCK_SIZE-1] = byte(tid)
|
work[block.MINIBLOCK_SIZE-1] = byte(tid)
|
||||||
|
|
||||||
diff.SetString(myjob.Difficulty, 10)
|
diff.SetString(myjob.Difficulty, 10)
|
||||||
@ -489,8 +498,8 @@ func mineblock(tid int) {
|
|||||||
powhash := astrobwt_fast.POW_optimized(work[:], scratch)
|
powhash := astrobwt_fast.POW_optimized(work[:], scratch)
|
||||||
atomic.AddUint64(&counter, 1)
|
atomic.AddUint64(&counter, 1)
|
||||||
|
|
||||||
if CheckPowHashBig(powhash, &diff) == true {
|
if CheckPowHashBig(powhash, &diff) == true { // note we are doing a local, NW might have moved meanwhile
|
||||||
logger.V(1).Info("Successfully found DERO miniblock", "difficulty", myjob.Difficulty, "height", myjob.Height)
|
logger.V(1).Info("Successfully found DERO miniblock (going to submit)", "difficulty", myjob.Difficulty, "height", myjob.Height)
|
||||||
func() {
|
func() {
|
||||||
defer globals.Recover(1)
|
defer globals.Recover(1)
|
||||||
connection_mutex.Lock()
|
connection_mutex.Lock()
|
||||||
|
@ -8,7 +8,7 @@ import "golang.org/x/sys/unix"
|
|||||||
|
|
||||||
// we skip type as go will automatically identify type
|
// we skip type as go will automatically identify type
|
||||||
const (
|
const (
|
||||||
UnixMax = 999999
|
UnixMax = 100 * 1024 // some platforms/providers limit these, so we are using a small number
|
||||||
OSXMax = 24576 // see this https://github.com/golang/go/issues/30401
|
OSXMax = 24576 // see this https://github.com/golang/go/issues/30401
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -303,20 +303,7 @@ func main() {
|
|||||||
pcolor = "\033[33m" // make prompt yellow
|
pcolor = "\033[33m" // make prompt yellow
|
||||||
}
|
}
|
||||||
|
|
||||||
hash_rate_string := ""
|
hash_rate_string := hashratetostring(chain.Get_Network_HashRate())
|
||||||
hash_rate := chain.Get_Network_HashRate()
|
|
||||||
switch {
|
|
||||||
case hash_rate > 1000000000000:
|
|
||||||
hash_rate_string = fmt.Sprintf("%.3f TH/s", float64(hash_rate)/1000000000000.0)
|
|
||||||
case hash_rate > 1000000000:
|
|
||||||
hash_rate_string = fmt.Sprintf("%.3f GH/s", float64(hash_rate)/1000000000.0)
|
|
||||||
case hash_rate > 1000000:
|
|
||||||
hash_rate_string = fmt.Sprintf("%.3f MH/s", float64(hash_rate)/1000000.0)
|
|
||||||
case hash_rate > 1000:
|
|
||||||
hash_rate_string = fmt.Sprintf("%.3f KH/s", float64(hash_rate)/1000.0)
|
|
||||||
case hash_rate > 0:
|
|
||||||
hash_rate_string = fmt.Sprintf("%d H/s", hash_rate)
|
|
||||||
}
|
|
||||||
|
|
||||||
testnet_string := ""
|
testnet_string := ""
|
||||||
if globals.IsMainnet() {
|
if globals.IsMainnet() {
|
||||||
@ -327,7 +314,8 @@ func main() {
|
|||||||
|
|
||||||
testnet_string += " " + strconv.Itoa(chain.MiniBlocks.Count()) + " " + globals.GetOffset().Round(time.Millisecond).String() + "|" + globals.GetOffsetNTP().Round(time.Millisecond).String() + "|" + globals.GetOffsetP2P().Round(time.Millisecond).String()
|
testnet_string += " " + strconv.Itoa(chain.MiniBlocks.Count()) + " " + globals.GetOffset().Round(time.Millisecond).String() + "|" + globals.GetOffsetNTP().Round(time.Millisecond).String() + "|" + globals.GetOffsetP2P().Round(time.Millisecond).String()
|
||||||
|
|
||||||
l.SetPrompt(fmt.Sprintf("\033[1m\033[32mDERO HE: \033[0m"+color+"%d/%d [%d/%d] "+pcolor+"P %d TXp %d:%d \033[32mNW %s >Miners %d %s>>\033[0m ", our_height, topo_height, best_height, best_topo_height, peer_count, mempool_tx_count, regpool_tx_count, hash_rate_string, derodrpc.CountMiners(), testnet_string))
|
miner_count := derodrpc.CountMiners()
|
||||||
|
l.SetPrompt(fmt.Sprintf("\033[1m\033[32mDERO HE: \033[0m"+color+"%d/%d [%d/%d] "+pcolor+"P %d TXp %d:%d \033[32mNW %s >MN %d %s>>\033[0m ", our_height, topo_height, best_height, best_topo_height, peer_count, mempool_tx_count, regpool_tx_count, hash_rate_string, miner_count, testnet_string))
|
||||||
l.Refresh()
|
l.Refresh()
|
||||||
last_second = time.Now().Unix()
|
last_second = time.Now().Unix()
|
||||||
last_our_height = our_height
|
last_our_height = our_height
|
||||||
@ -859,6 +847,16 @@ restart_loop:
|
|||||||
fmt.Printf("UTC time %s (offset %s) (as per daemon) should be close to 0\n", globals.Time().UTC(), time.Now().Sub(globals.Time()))
|
fmt.Printf("UTC time %s (offset %s) (as per daemon) should be close to 0\n", globals.Time().UTC(), time.Now().Sub(globals.Time()))
|
||||||
fmt.Printf("Local time %s (as per system clock) \n", time.Now())
|
fmt.Printf("Local time %s (as per system clock) \n", time.Now())
|
||||||
fmt.Printf("Local time %s (offset %s) (as per daemon) should be close to 0\n", globals.Time(), time.Now().Sub(globals.Time()))
|
fmt.Printf("Local time %s (offset %s) (as per daemon) should be close to 0\n", globals.Time(), time.Now().Sub(globals.Time()))
|
||||||
|
|
||||||
|
//if derodrpc.CountMiners() > 0 { // only give info if we have a miner connected
|
||||||
|
fmt.Printf("MB:%d MBR:%d IB:%d\n", derodrpc.CountMinisAccepted, derodrpc.CountMinisRejected, derodrpc.CountBlocks)
|
||||||
|
fmt.Printf("MB %.02f%%(1hr) %.05f%%(1d) %.06f%%(7d) (Moving average %%, will be 0 if no miniblock found)\n", derodrpc.HashrateEstimatePercent_1hr(), derodrpc.HashrateEstimatePercent_1day(), derodrpc.HashrateEstimatePercent_7day())
|
||||||
|
mh_1hr := uint64((float64(chain.Get_Network_HashRate()) * derodrpc.HashrateEstimatePercent_1hr()) / 100)
|
||||||
|
mh_1d := uint64((float64(chain.Get_Network_HashRate()) * derodrpc.HashrateEstimatePercent_1day()) / 100)
|
||||||
|
mh_7d := uint64((float64(chain.Get_Network_HashRate()) * derodrpc.HashrateEstimatePercent_7day()) / 100)
|
||||||
|
fmt.Printf("Avg Mining HR %s(1hr) %s(1d) %s(7d)\n", hashratetostring(mh_1hr), hashratetostring(mh_1d), hashratetostring(mh_7d))
|
||||||
|
//}
|
||||||
|
|
||||||
tips := chain.Get_TIPS()
|
tips := chain.Get_TIPS()
|
||||||
fmt.Printf("Tips ")
|
fmt.Printf("Tips ")
|
||||||
for _, tip := range tips {
|
for _, tip := range tips {
|
||||||
@ -1057,6 +1055,24 @@ func writenode(chain *blockchain.Blockchain, w *bufio.Writer, blid crypto.Hash,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func hashratetostring(hash_rate uint64) string {
|
||||||
|
hash_rate_string := ""
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case hash_rate > 1000000000000:
|
||||||
|
hash_rate_string = fmt.Sprintf("%.3f TH/s", float64(hash_rate)/1000000000000.0)
|
||||||
|
case hash_rate > 1000000000:
|
||||||
|
hash_rate_string = fmt.Sprintf("%.3f GH/s", float64(hash_rate)/1000000000.0)
|
||||||
|
case hash_rate > 1000000:
|
||||||
|
hash_rate_string = fmt.Sprintf("%.3f MH/s", float64(hash_rate)/1000000.0)
|
||||||
|
case hash_rate > 1000:
|
||||||
|
hash_rate_string = fmt.Sprintf("%.3f KH/s", float64(hash_rate)/1000.0)
|
||||||
|
case hash_rate > 0:
|
||||||
|
hash_rate_string = fmt.Sprintf("%d H/s", hash_rate)
|
||||||
|
}
|
||||||
|
return hash_rate_string
|
||||||
|
}
|
||||||
|
|
||||||
func WriteBlockChainTree(chain *blockchain.Blockchain, filename string, start_height, stop_height int64) (err error) {
|
func WriteBlockChainTree(chain *blockchain.Blockchain, filename string, start_height, stop_height int64) (err error) {
|
||||||
|
|
||||||
var node_map = map[crypto.Hash]bool{}
|
var node_map = map[crypto.Hash]bool{}
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lesismal/llib/std/crypto/tls"
|
"github.com/lesismal/llib/std/crypto/tls"
|
||||||
@ -26,7 +26,7 @@ import "math/big"
|
|||||||
import "crypto/ecdsa"
|
import "crypto/ecdsa"
|
||||||
import "crypto/elliptic"
|
import "crypto/elliptic"
|
||||||
|
|
||||||
//import "crypto/tls"
|
import "sync/atomic"
|
||||||
import "crypto/rand"
|
import "crypto/rand"
|
||||||
import "crypto/x509"
|
import "crypto/x509"
|
||||||
import "encoding/pem"
|
import "encoding/pem"
|
||||||
@ -57,6 +57,7 @@ var (
|
|||||||
type user_session struct {
|
type user_session struct {
|
||||||
blocks uint64
|
blocks uint64
|
||||||
miniblocks uint64
|
miniblocks uint64
|
||||||
|
rejected uint64
|
||||||
lasterr string
|
lasterr string
|
||||||
address rpc.Address
|
address rpc.Address
|
||||||
valid_address bool
|
valid_address bool
|
||||||
@ -68,19 +69,79 @@ var client_list = map[*websocket.Conn]*user_session{}
|
|||||||
|
|
||||||
var miners_count int
|
var miners_count int
|
||||||
|
|
||||||
|
// this will track miniblock rate,
|
||||||
|
var mini_found_time []int64 // this array contains a epoch timestamp in int64
|
||||||
|
var rate_lock sync.Mutex
|
||||||
|
|
||||||
|
//this function will return wrong result if too wide time glitches happen to system clock
|
||||||
|
func Counter(seconds int64) (r int) { // we need atleast 1 mini to find a rate
|
||||||
|
rate_lock.Lock()
|
||||||
|
defer rate_lock.Unlock()
|
||||||
|
length := len(mini_found_time)
|
||||||
|
if length > 0 {
|
||||||
|
start_point := time.Now().Unix() - seconds
|
||||||
|
i := sort.Search(length, func(i int) bool { return mini_found_time[i] >= start_point })
|
||||||
|
if i < len(mini_found_time) {
|
||||||
|
r = length - i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return // return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanup() {
|
||||||
|
rate_lock.Lock()
|
||||||
|
defer rate_lock.Unlock()
|
||||||
|
length := len(mini_found_time)
|
||||||
|
if length > 0 {
|
||||||
|
start_point := time.Now().Unix() - 30*24*3600 // only keep data of last 30 days
|
||||||
|
i := sort.Search(length, func(i int) bool { return mini_found_time[i] >= start_point })
|
||||||
|
if i > 1000 && i < length {
|
||||||
|
mini_found_time = append(mini_found_time[:0], mini_found_time[i:]...) // renew the array
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// this will calcuate amount of hashrate based on the number of minis
|
||||||
|
// note this calculation is very crude
|
||||||
|
// note it will always be lagging, since NW conditions are quite dynamic
|
||||||
|
// this is used to roughly estimate your hash rate on this integrator of all miners
|
||||||
|
// note this is a moving avg
|
||||||
|
func HashrateEstimatePercent(timeframe int64) float64 {
|
||||||
|
return float64(Counter(timeframe)*100) / (float64(timeframe*10) / float64(config.BLOCK_TIME))
|
||||||
|
}
|
||||||
|
|
||||||
|
// note this will be be 0, if you have less than 1/48000 hash power
|
||||||
|
func HashrateEstimatePercent_1hr() float64 {
|
||||||
|
return HashrateEstimatePercent(3600)
|
||||||
|
}
|
||||||
|
|
||||||
|
// note result will be 0, if you have less than 1/2000 hash power
|
||||||
|
func HashrateEstimatePercent_1day() float64 {
|
||||||
|
return HashrateEstimatePercent(24 * 3600)
|
||||||
|
}
|
||||||
|
|
||||||
|
// note this will be 0, if you have less than 1/(48000*7)
|
||||||
|
func HashrateEstimatePercent_7day() float64 {
|
||||||
|
return HashrateEstimatePercent(7 * 24 * 3600)
|
||||||
|
}
|
||||||
|
|
||||||
func CountMiners() int {
|
func CountMiners() int {
|
||||||
|
defer cleanup()
|
||||||
client_list_mutex.Lock()
|
client_list_mutex.Lock()
|
||||||
defer client_list_mutex.Unlock()
|
defer client_list_mutex.Unlock()
|
||||||
miners_count = len(client_list)
|
miners_count = len(client_list)
|
||||||
return miners_count
|
return miners_count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var CountMinisAccepted int64 // total accepted which passed Powtest, chain may still ignore them
|
||||||
|
var CountMinisRejected int64 // total rejected // note we are only counting rejected as those which didnot pass Pow test
|
||||||
|
var CountBlocks int64 // total blocks found as integrator, note that block can still be a orphan
|
||||||
|
// total = CountAccepted + CountRejected + CountBlocks(they may be orphan or may not get rewarded)
|
||||||
|
|
||||||
func SendJob() {
|
func SendJob() {
|
||||||
|
|
||||||
defer globals.Recover(1)
|
defer globals.Recover(1)
|
||||||
|
|
||||||
var params rpc.GetBlockTemplate_Result
|
|
||||||
|
|
||||||
// get a block template, and then we will fill the address here as optimization
|
// get a block template, and then we will fill the address here as optimization
|
||||||
bl, mbl_main, _, _, err := chain.Create_new_block_template_mining(chain.IntegratorAddress())
|
bl, mbl_main, _, _, err := chain.Create_new_block_template_mining(chain.IntegratorAddress())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -92,16 +153,11 @@ func SendJob() {
|
|||||||
prev_hash = prev_hash + bl.Tips[i].String()
|
prev_hash = prev_hash + bl.Tips[i].String()
|
||||||
}
|
}
|
||||||
|
|
||||||
params.JobID = fmt.Sprintf("%d.%d.%s", bl.Timestamp, 0, "notified")
|
|
||||||
diff := chain.Get_Difficulty_At_Tips(bl.Tips)
|
diff := chain.Get_Difficulty_At_Tips(bl.Tips)
|
||||||
|
|
||||||
params.Height = bl.Height
|
|
||||||
params.Prev_Hash = prev_hash
|
|
||||||
if mbl_main.HighDiff {
|
if mbl_main.HighDiff {
|
||||||
diff.Mul(diff, new(big.Int).SetUint64(config.MINIBLOCK_HIGHDIFF))
|
diff.Mul(diff, new(big.Int).SetUint64(config.MINIBLOCK_HIGHDIFF))
|
||||||
}
|
}
|
||||||
params.Difficultyuint64 = diff.Uint64()
|
|
||||||
params.Difficulty = diff.String()
|
|
||||||
client_list_mutex.Lock()
|
client_list_mutex.Lock()
|
||||||
defer client_list_mutex.Unlock()
|
defer client_list_mutex.Unlock()
|
||||||
|
|
||||||
@ -112,6 +168,13 @@ func SendJob() {
|
|||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
encoder := json.NewEncoder(&buf)
|
encoder := json.NewEncoder(&buf)
|
||||||
|
|
||||||
|
var params rpc.GetBlockTemplate_Result
|
||||||
|
params.JobID = fmt.Sprintf("%d.%d.%s", bl.Timestamp, 0, "notified")
|
||||||
|
params.Height = bl.Height
|
||||||
|
params.Prev_Hash = prev_hash
|
||||||
|
params.Difficultyuint64 = diff.Uint64()
|
||||||
|
params.Difficulty = diff.String()
|
||||||
|
|
||||||
mbl := mbl_main
|
mbl := mbl_main
|
||||||
|
|
||||||
if !mbl.Final { //write miners address only if possible
|
if !mbl.Final { //write miners address only if possible
|
||||||
@ -122,20 +185,15 @@ func SendJob() {
|
|||||||
mbl.Nonce[i] = globals.Global_Random.Uint32() // fill with randomness
|
mbl.Nonce[i] = globals.Global_Random.Uint32() // fill with randomness
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.lasterr != "" {
|
|
||||||
params.LastError = v.lasterr
|
|
||||||
v.lasterr = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if !v.valid_address && !chain.IsAddressHashValid(false, v.address_sum) {
|
if !v.valid_address && !chain.IsAddressHashValid(false, v.address_sum) {
|
||||||
params.LastError = "unregistered miner or you need to wait 15 mins"
|
params.LastError = "unregistered miner or you need to wait 15 mins"
|
||||||
} else {
|
} else {
|
||||||
params.LastError = ""
|
|
||||||
v.valid_address = true
|
v.valid_address = true
|
||||||
}
|
}
|
||||||
params.Blockhashing_blob = fmt.Sprintf("%x", mbl.Serialize())
|
params.Blockhashing_blob = fmt.Sprintf("%x", mbl.Serialize())
|
||||||
params.Blocks = v.blocks
|
params.Blocks = v.blocks
|
||||||
params.MiniBlocks = v.miniblocks
|
params.MiniBlocks = v.miniblocks
|
||||||
|
params.Rejected = v.rejected
|
||||||
|
|
||||||
encoder.Encode(params)
|
encoder.Encode(params)
|
||||||
k.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
|
k.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
|
||||||
@ -186,11 +244,22 @@ func newUpgrader() *websocket.Upgrader {
|
|||||||
//logger.Infof("Submitted block %s accepted", blid)
|
//logger.Infof("Submitted block %s accepted", blid)
|
||||||
if blid.IsZero() {
|
if blid.IsZero() {
|
||||||
sess.miniblocks++
|
sess.miniblocks++
|
||||||
|
atomic.AddInt64(&CountMinisAccepted, 1)
|
||||||
|
|
||||||
|
rate_lock.Lock()
|
||||||
|
defer rate_lock.Unlock()
|
||||||
|
mini_found_time = append(mini_found_time, time.Now().Unix())
|
||||||
} else {
|
} else {
|
||||||
sess.blocks++
|
sess.blocks++
|
||||||
|
atomic.AddInt64(&CountBlocks, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !sresult || err != nil {
|
||||||
|
sess.rejected++
|
||||||
|
atomic.AddInt64(&CountMinisRejected, 1)
|
||||||
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
u.OnClose(func(c *websocket.Conn, err error) {
|
u.OnClose(func(c *websocket.Conn, err error) {
|
||||||
client_list_mutex.Lock()
|
client_list_mutex.Lock()
|
||||||
|
@ -21,7 +21,7 @@ package config
|
|||||||
// some seed nodes for mainnet (these seed node are not compliant with earlier protocols)
|
// some seed nodes for mainnet (these seed node are not compliant with earlier protocols)
|
||||||
// only version 2
|
// only version 2
|
||||||
var Mainnet_seed_nodes = []string{
|
var Mainnet_seed_nodes = []string{
|
||||||
"185.132.176.174:11011",
|
"89.38.99.117:8443",
|
||||||
"45.82.66.54:8080",
|
"45.82.66.54:8080",
|
||||||
"185.107.69.12:11011",
|
"185.107.69.12:11011",
|
||||||
"89.38.97.110:11011",
|
"89.38.97.110:11011",
|
||||||
|
@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
|
|||||||
|
|
||||||
// right now it has to be manually changed
|
// right now it has to be manually changed
|
||||||
// do we need to include git commitsha??
|
// do we need to include git commitsha??
|
||||||
var Version = semver.MustParse("3.4.141-66.DEROHE.STARGATE+26022022")
|
var Version = semver.MustParse("3.4.141-73.DEROHE.STARGATE+26022022")
|
||||||
|
@ -413,6 +413,11 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
|
|||||||
}
|
}
|
||||||
|
|
||||||
for { // we must send all blocks atleast once, once we are done, break ut
|
for { // we must send all blocks atleast once, once we are done, break ut
|
||||||
|
|
||||||
|
if len(connections) < 1 {
|
||||||
|
globals.Logger.Error(nil, "we want to broadcast block, but donot have peers, most possibly block will go stale")
|
||||||
|
return
|
||||||
|
}
|
||||||
for _, v := range connections {
|
for _, v := range connections {
|
||||||
select {
|
select {
|
||||||
case <-Exit_Event:
|
case <-Exit_Event:
|
||||||
|
@ -116,6 +116,7 @@ type (
|
|||||||
EpochMilli uint64 `json:"epochmilli"`
|
EpochMilli uint64 `json:"epochmilli"`
|
||||||
Blocks uint64 `json:"blocks"` // number of blocks found
|
Blocks uint64 `json:"blocks"` // number of blocks found
|
||||||
MiniBlocks uint64 `json:"miniblocks"` // number of miniblocks found
|
MiniBlocks uint64 `json:"miniblocks"` // number of miniblocks found
|
||||||
|
Rejected uint64 `json:"rejected"` // reject count
|
||||||
LastError string `json:"lasterror"` // last error
|
LastError string `json:"lasterror"` // last error
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user