Release66: P2P and Mining Improvements

This commit is contained in:
Captain 2022-04-04 09:45:56 +00:00
parent a66d3d5051
commit b2fe65c7a1
No known key found for this signature in database
GPG Key ID: 18CDB3ED5E85D2D4
7 changed files with 83 additions and 18 deletions

View File

@ -59,7 +59,7 @@ var command_line string = `derod
DERO : A secure, private blockchain with smart-contracts DERO : A secure, private blockchain with smart-contracts
Usage: Usage:
derod [--help] [--version] [--testnet] [--debug] [--sync-node] [--timeisinsync] [--fastsync] [--socks-proxy=<socks_ip:port>] [--data-dir=<directory>] [--p2p-bind=<0.0.0.0:18089>] [--add-exclusive-node=<ip:port>]... [--add-priority-node=<ip:port>]... [--min-peers=<11>] [--rpc-bind=<127.0.0.1:9999>] [--getwork-bind=<0.0.0.0:18089>] [--node-tag=<unique name>] [--prune-history=<50>] [--integrator-address=<address>] [--clog-level=1] [--flog-level=1] derod [--help] [--version] [--testnet] [--debug] [--sync-node] [--timeisinsync] [--fastsync] [--socks-proxy=<socks_ip:port>] [--data-dir=<directory>] [--p2p-bind=<0.0.0.0:18089>] [--add-exclusive-node=<ip:port>]... [--add-priority-node=<ip:port>]... [--min-peers=<11>] [--max-peers=<100>] [--rpc-bind=<127.0.0.1:9999>] [--getwork-bind=<0.0.0.0:18089>] [--node-tag=<unique name>] [--prune-history=<50>] [--integrator-address=<address>] [--clog-level=1] [--flog-level=1]
derod -h | --help derod -h | --help
derod --version derod --version
@ -82,6 +82,8 @@ Options:
--sync-node Sync node automatically with the seeds nodes. This option is for rare use. --sync-node Sync node automatically with the seeds nodes. This option is for rare use.
--node-tag=<unique name> Unique name of node, visible to everyone --node-tag=<unique name> Unique name of node, visible to everyone
--integrator-address if this node mines a block,Integrator rewards will be given to address.default is dev's address. --integrator-address if this node mines a block,Integrator rewards will be given to address.default is dev's address.
--min-peers=<31> Node will try to maintain atleast this many connections to peers
--max-peers=<101> Node will maintain maximim this many connections to peers and will stop accepting connections
--prune-history=<50> prunes blockchain history until the specific topo_height --prune-history=<50> prunes blockchain history until the specific topo_height
` `
@ -886,7 +888,7 @@ restart_loop:
case strings.ToLower(line) == "peer_list": // print peer list case strings.ToLower(line) == "peer_list": // print peer list
p2p.PeerList_Print() p2p.PeerList_Print()
case strings.ToLower(line) == "sync_info": // print active connections case strings.ToLower(line) == "syncinfo", strings.ToLower(line) == "sync_info": // print active connections
p2p.Connection_Print() p2p.Connection_Print()
case strings.ToLower(line) == "bye": case strings.ToLower(line) == "bye":
fallthrough fallthrough
@ -1108,7 +1110,7 @@ func usage(w io.Writer) {
io.WriteString(w, "\t\033[1mprint_tx\033[0m\tPrint transaction, print_tx <transaction_hash>\n") io.WriteString(w, "\t\033[1mprint_tx\033[0m\tPrint transaction, print_tx <transaction_hash>\n")
io.WriteString(w, "\t\033[1mstatus\033[0m\t\tShow general information\n") io.WriteString(w, "\t\033[1mstatus\033[0m\t\tShow general information\n")
io.WriteString(w, "\t\033[1mpeer_list\033[0m\tPrint peer list\n") io.WriteString(w, "\t\033[1mpeer_list\033[0m\tPrint peer list\n")
io.WriteString(w, "\t\033[1msync_info\033[0m\tPrint information about connected peers and their state\n") io.WriteString(w, "\t\033[1msyncinfo\033[0m\tPrint information about connected peers and their state\n")
io.WriteString(w, "\t\033[1mbye\033[0m\t\tQuit the daemon\n") io.WriteString(w, "\t\033[1mbye\033[0m\t\tQuit the daemon\n")
io.WriteString(w, "\t\033[1mban\033[0m\t\tBan specific ip from making any connections\n") io.WriteString(w, "\t\033[1mban\033[0m\t\tBan specific ip from making any connections\n")
io.WriteString(w, "\t\033[1munban\033[0m\t\tRevoke restrictions on previously banned ips\n") io.WriteString(w, "\t\033[1munban\033[0m\t\tRevoke restrictions on previously banned ips\n")
@ -1146,7 +1148,7 @@ var completer = readline.NewPrefixCompleter(
// readline.PcItem("print_tx"), // readline.PcItem("print_tx"),
readline.PcItem("setintegratoraddress"), readline.PcItem("setintegratoraddress"),
readline.PcItem("status"), readline.PcItem("status"),
readline.PcItem("sync_info"), readline.PcItem("syncinfo"),
readline.PcItem("version"), readline.PcItem("version"),
readline.PcItem("bye"), readline.PcItem("bye"),
readline.PcItem("exit"), readline.PcItem("exit"),

View File

@ -4,6 +4,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"net/http" "net/http"
"os"
"time" "time"
@ -65,14 +66,19 @@ type user_session struct {
var client_list_mutex sync.Mutex var client_list_mutex sync.Mutex
var client_list = map[*websocket.Conn]*user_session{} var client_list = map[*websocket.Conn]*user_session{}
var miners_count int
func CountMiners() int { func CountMiners() int {
client_list_mutex.Lock() client_list_mutex.Lock()
defer client_list_mutex.Unlock() defer client_list_mutex.Unlock()
return len(client_list) miners_count = len(client_list)
return miners_count
} }
func SendJob() { func SendJob() {
defer globals.Recover(1)
var params rpc.GetBlockTemplate_Result var params rpc.GetBlockTemplate_Result
// get a block template, and then we will fill the address here as optimization // get a block template, and then we will fill the address here as optimization
@ -282,7 +288,32 @@ func Getwork_server() {
memPool.Put(b) memPool.Put(b)
}) })
globals.Cron.AddFunc("@every 2s", SendJob) // if daemon restart automaticaly send job //globals.Cron.AddFunc("@every 2s", SendJob) // if daemon restart automaticaly send job
go func() { // try to be as optimized as possible to lower hash wastage
sleeptime, _ := time.ParseDuration(os.Getenv("JOB_SEND_TIME_DELAY")) // this will hopefully be never required to change
if sleeptime.Milliseconds() < 40 {
sleeptime = 500 * time.Millisecond
}
logger_getwork.Info("Job will be dispatched every", "time", sleeptime)
old_mini_count := 0
old_time := time.Now()
old_height := int64(0)
for {
if miners_count > 0 {
current_mini_count := chain.MiniBlocks.Count()
current_height := chain.Get_Height()
if old_mini_count != current_mini_count || old_height != current_height || time.Now().Sub(old_time) > sleeptime {
old_mini_count = current_mini_count
old_height = current_height
SendJob()
old_time = time.Now()
}
} else {
}
time.Sleep(10 * time.Millisecond)
}
}()
if err = svr.Start(); err != nil { if err = svr.Start(); err != nil {
logger_getwork.Error(err, "nbio.Start failed.") logger_getwork.Error(err, "nbio.Start failed.")

View File

@ -25,6 +25,7 @@ var Mainnet_seed_nodes = []string{
"45.82.66.54:8080", "45.82.66.54:8080",
"185.107.69.12:11011", "185.107.69.12:11011",
"89.38.97.110:11011", "89.38.97.110:11011",
"45.82.66.55:11011",
} }
// some seed node for testnet // some seed node for testnet

View File

@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
// right now it has to be manually changed // right now it has to be manually changed
// do we need to include git commitsha?? // do we need to include git commitsha??
var Version = semver.MustParse("3.4.141-62.DEROHE.STARGATE+26022022") var Version = semver.MustParse("3.4.141-66.DEROHE.STARGATE+26022022")

View File

@ -20,6 +20,7 @@ package p2p
* this will also ensure that a single IP is connected only once * this will also ensure that a single IP is connected only once
* *
*/ */
import "os"
import "fmt" import "fmt"
import "net" import "net"
import "math" import "math"
@ -27,6 +28,7 @@ import "sync"
import "sort" import "sort"
import "time" import "time"
import "strings" import "strings"
import "strconv"
import "context" import "context"
import "sync/atomic" import "sync/atomic"
import "runtime/debug" import "runtime/debug"
@ -405,8 +407,12 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
return connections[i].Latency < connections[j].Latency return connections[i].Latency < connections[j].Latency
}) })
bw_factor, _ := strconv.Atoi(os.Getenv("BW_FACTOR"))
if bw_factor < 1 {
bw_factor = 1
}
for { // we must send all blocks atleast once, once we are done, break ut for { // we must send all blocks atleast once, once we are done, break ut
old_count := count
for _, v := range connections { for _, v := range connections {
select { select {
case <-Exit_Event: case <-Exit_Event:
@ -415,16 +421,16 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
} }
if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && PeerID != v.Peer_ID && v.Peer_ID != GetPeerID() { // skip pre-handshake connections if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && PeerID != v.Peer_ID && v.Peer_ID != GetPeerID() { // skip pre-handshake connections
// if the other end is > 50 blocks behind, do not broadcast block to hime // if the other end is > 2 blocks behind, do not broadcast block to hime
// this is an optimisation, since if the other end is syncing // this is an optimisation, since if the other end is syncing
// every peer will keep on broadcasting and thus making it more lagging // every peer will keep on broadcasting and thus making it more lagging
// due to overheads // due to overheads
peer_height := atomic.LoadInt64(&v.Height) peer_height := atomic.LoadInt64(&v.Height)
if (our_height - peer_height) > 25 { if (our_height - peer_height) > 2 {
continue continue
} }
if count > chunk_count { if count > len(unique_map) && count > bw_factor*chunk_count { // every connected peer shuld get ateleast one chunk
goto done goto done
} }
@ -450,11 +456,6 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
count++ count++
} }
} }
if old_count == count { // exit the loop
break
}
old_count = count
} }
done: done:

View File

@ -72,6 +72,7 @@ var backoff = map[string]int64{} // if server receives a connection, then it wil
var backoff_mutex = sync.Mutex{} var backoff_mutex = sync.Mutex{}
var Min_Peers = int64(31) // we need to expose this to be modifieable at runtime without taking daemon offline var Min_Peers = int64(31) // we need to expose this to be modifieable at runtime without taking daemon offline
var Max_Peers = int64(101)
// return true if we should back off else we can connect // return true if we should back off else we can connect
func shouldwebackoff(ip string) bool { func shouldwebackoff(ip string) bool {
@ -111,6 +112,14 @@ func P2P_Init(params map[string]interface{}) error {
logger.Info("P2P is in turbo mode") logger.Info("P2P is in turbo mode")
} }
if os.Getenv("BW_FACTOR") != "" {
bw_factor, _ := strconv.Atoi(os.Getenv("BW_FACTOR"))
if bw_factor <= 0 {
bw_factor = 1
}
logger.Info("", "BW_FACTOR", bw_factor)
}
// permanently unban any seed nodes // permanently unban any seed nodes
if globals.IsMainnet() { if globals.IsMainnet() {
for i := range config.Mainnet_seed_nodes { for i := range config.Mainnet_seed_nodes {
@ -419,7 +428,21 @@ func maintain_connection_to_peers() {
Min_Peers = i Min_Peers = i
} }
} }
logger.Info("Min outgoing peers", "min-peers", Min_Peers) logger.Info("Min peers", "min-peers", Min_Peers)
}
if _, ok := globals.Arguments["--max-peers"]; ok && globals.Arguments["--max-peers"] != nil { // user specified a limit, use it if possible
i, err := strconv.ParseInt(globals.Arguments["--max-peers"].(string), 10, 64)
if err != nil {
logger.Error(err, "Error Parsing --max-peers")
} else {
if i < Min_Peers {
logger.Error(fmt.Errorf("--max-peers should be positive and more than --min-peers"), "")
} else {
Max_Peers = i
}
}
logger.Info("Max peers", "max-peers", Max_Peers)
} }
delay := time.NewTicker(200 * time.Millisecond) delay := time.NewTicker(200 * time.Millisecond)
@ -481,7 +504,7 @@ func P2P_Server_v2() {
in, out := Peer_Direction_Count() in, out := Peer_Direction_Count()
if int64(in+out) > Min_Peers { // do not allow incoming ddos if int64(in+out) > Max_Peers { // do not allow incoming ddos
connection.exit() connection.exit()
return return
} }

View File

@ -206,6 +206,7 @@ func Peer_SetSuccess(address string) {
p.ConnectAfter = 0 p.ConnectAfter = 0
p.Whitelist = true p.Whitelist = true
p.LastConnected = uint64(time.Now().UTC().Unix()) // set time when last connected p.LastConnected = uint64(time.Now().UTC().Unix()) // set time when last connected
// logger.Infof("Setting peer as white listed") // logger.Infof("Setting peer as white listed")
} }
@ -316,6 +317,12 @@ func get_peer_list() (peers []Peer_Info) {
peer_mutex.Lock() peer_mutex.Lock()
defer peer_mutex.Unlock() defer peer_mutex.Unlock()
for _, v := range peer_map { // trim the white list
if v.Whitelist && !IsAddressConnected(ParseIPNoError(v.Address)) {
delete(peer_map, ParseIPNoError(v.Address))
}
}
for _, v := range peer_map { for _, v := range peer_map {
if v.Whitelist { if v.Whitelist {
peers = append(peers, Peer_Info{Addr: v.Address}) peers = append(peers, Peer_Info{Addr: v.Address})