DERO-HE STARGATE Testnet Release26
This commit is contained in:
parent
81bc28a537
commit
ac9d12577c
@ -199,12 +199,20 @@ func (mbl *MiniBlock) Deserialize(buf []byte) (err error) {
|
|||||||
copy(mbl.Nonce[:], buf[15+16+32:])
|
copy(mbl.Nonce[:], buf[15+16+32:])
|
||||||
mbl.Height = int64(binary.BigEndian.Uint64(mbl.Check[:]))
|
mbl.Height = int64(binary.BigEndian.Uint64(mbl.Check[:]))
|
||||||
|
|
||||||
if mbl.GetMiniID() == mbl.Past[0] {
|
|
||||||
return fmt.Errorf("Self Collision")
|
|
||||||
}
|
|
||||||
if mbl.PastCount == 2 && mbl.GetMiniID() == mbl.Past[1] {
|
|
||||||
return fmt.Errorf("Self Collision")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checks for basic sanity
|
||||||
|
func (mbl *MiniBlock) IsSafe() bool {
|
||||||
|
id := mbl.GetMiniID()
|
||||||
|
if id == mbl.Past[0] {
|
||||||
|
//return fmt.Errorf("Self Collision")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if mbl.PastCount == 2 && id == mbl.Past[1] {
|
||||||
|
//return fmt.Errorf("Self Collision")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
@ -119,7 +119,7 @@ func (c *MiniBlocksCollection) Get(id uint32) (mbl MiniBlock) {
|
|||||||
var ok bool
|
var ok bool
|
||||||
|
|
||||||
if mbl, ok = c.Collection[id]; !ok {
|
if mbl, ok = c.Collection[id]; !ok {
|
||||||
panic("past should be present")
|
panic("id requested should be present")
|
||||||
}
|
}
|
||||||
return mbl
|
return mbl
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ package blockchain
|
|||||||
// We must not call any packages that can call panic
|
// We must not call any packages that can call panic
|
||||||
// NO Panics or FATALs please
|
// NO Panics or FATALs please
|
||||||
|
|
||||||
|
import "os"
|
||||||
import "fmt"
|
import "fmt"
|
||||||
import "sync"
|
import "sync"
|
||||||
import "time"
|
import "time"
|
||||||
@ -68,9 +69,13 @@ type Blockchain struct {
|
|||||||
cache_IsNonceValidTips *lru.Cache // used to cache nonce tests on specific tips
|
cache_IsNonceValidTips *lru.Cache // used to cache nonce tests on specific tips
|
||||||
cache_IsAddressHashValid *lru.Cache // used to cache some outputs
|
cache_IsAddressHashValid *lru.Cache // used to cache some outputs
|
||||||
cache_Get_Difficulty_At_Tips *lru.Cache // used to cache some outputs
|
cache_Get_Difficulty_At_Tips *lru.Cache // used to cache some outputs
|
||||||
|
cache_BlockPast *lru.Cache // used to cache a blocks past
|
||||||
|
cache_BlockHeight *lru.Cache // used to cache a blocks past
|
||||||
|
|
||||||
integrator_address rpc.Address // integrator rewards will be given to this address
|
integrator_address rpc.Address // integrator rewards will be given to this address
|
||||||
|
|
||||||
|
cache_disabled bool // disables all cache, based on ENV DISABLE_CACHE
|
||||||
|
|
||||||
Difficulty uint64 // current cumulative difficulty
|
Difficulty uint64 // current cumulative difficulty
|
||||||
Median_Block_Size uint64 // current median block size
|
Median_Block_Size uint64 // current median block size
|
||||||
Mempool *mempool.Mempool // normal tx pool
|
Mempool *mempool.Mempool // normal tx pool
|
||||||
@ -83,7 +88,7 @@ type Blockchain struct {
|
|||||||
simulator bool // is simulator mode
|
simulator bool // is simulator mode
|
||||||
|
|
||||||
P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found
|
P2P_Block_Relayer func(*block.Complete_Block, uint64) // tell p2p to broadcast any block this daemon hash found
|
||||||
P2P_MiniBlock_Relayer func(mbl block.MiniBlock, peerid uint64)
|
P2P_MiniBlock_Relayer func(mbl []block.MiniBlock, peerid uint64)
|
||||||
|
|
||||||
RPC_NotifyNewBlock *sync.Cond // used to notify rpc that a new block has been found
|
RPC_NotifyNewBlock *sync.Cond // used to notify rpc that a new block has been found
|
||||||
RPC_NotifyHeightChanged *sync.Cond // used to notify rpc that chain height has changed due to addition of block
|
RPC_NotifyHeightChanged *sync.Cond // used to notify rpc that chain height has changed due to addition of block
|
||||||
@ -122,7 +127,6 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
|
|||||||
if addr, err = rpc.NewAddress(strings.TrimSpace(globals.Config.Dev_Address)); err != nil {
|
if addr, err = rpc.NewAddress(strings.TrimSpace(globals.Config.Dev_Address)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if addr, err = rpc.NewAddress(strings.TrimSpace(params["--integrator-address"].(string))); err != nil {
|
if addr, err = rpc.NewAddress(strings.TrimSpace(params["--integrator-address"].(string))); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -149,6 +153,19 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if chain.cache_BlockPast, err = lru.New(100 * 1024); err != nil { // temporary cache for a blocks past
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if chain.cache_BlockHeight, err = lru.New(100 * 1024); err != nil { // temporary cache for a blocks height
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chain.cache_disabled = os.Getenv("DISABLE_CACHE") != "" // disable cache if the environ var is set
|
||||||
|
if chain.cache_disabled {
|
||||||
|
logger.Info("All caching except mining jobs will be disabled")
|
||||||
|
}
|
||||||
|
|
||||||
if params["--simulator"] == true {
|
if params["--simulator"] == true {
|
||||||
chain.simulator = true // enable simulator mode, this will set hard coded difficulty to 1
|
chain.simulator = true // enable simulator mode, this will set hard coded difficulty to 1
|
||||||
}
|
}
|
||||||
@ -197,10 +214,56 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go clean_up_valid_cache() // clean up valid cache
|
|
||||||
|
|
||||||
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
||||||
|
|
||||||
|
globals.Cron.AddFunc("@every 360s", clean_up_valid_cache) // cleanup valid tx cache
|
||||||
|
globals.Cron.AddFunc("@every 60s", func() { // mempool house keeping
|
||||||
|
|
||||||
|
stable_height := int64(0)
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
logger.Error(nil, "Mempool House Keeping triggered panic", "r", r, "height", stable_height)
|
||||||
|
}
|
||||||
|
|
||||||
|
stable_height = chain.Get_Stable_Height()
|
||||||
|
|
||||||
|
// give mempool an oppurtunity to clean up tx, but only if they are not mined
|
||||||
|
chain.Mempool.HouseKeeping(uint64(stable_height))
|
||||||
|
|
||||||
|
top_block_topo_index := chain.Load_TOPO_HEIGHT()
|
||||||
|
|
||||||
|
if top_block_topo_index < 10 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
top_block_topo_index -= 10
|
||||||
|
|
||||||
|
blid, err := chain.Load_Block_Topological_order_at_index(top_block_topo_index)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
record_version, err := chain.ReadBlockSnapshotVersion(blid)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// give regpool a chance to register
|
||||||
|
if ss, err := chain.Store.Balance_store.LoadSnapshot(record_version); err == nil {
|
||||||
|
if balance_tree, err := ss.GetTree(config.BALANCE_TREE); err == nil {
|
||||||
|
chain.Regpool.HouseKeeping(uint64(stable_height), func(tx *transaction.Transaction) bool {
|
||||||
|
if tx.TransactionType != transaction.REGISTRATION { // tx not registration so delete
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, err := balance_tree.Get(tx.MinerAddress[:]); err != nil { // address already registered
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false // account not already registered, so give another chance
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
return &chain, nil
|
return &chain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,6 +272,45 @@ func (chain *Blockchain) IntegratorAddress() rpc.Address {
|
|||||||
return chain.integrator_address
|
return chain.integrator_address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// this function is called to read blockchain state from DB
|
||||||
|
// It is callable at any point in time
|
||||||
|
|
||||||
|
func (chain *Blockchain) Initialise_Chain_From_DB() {
|
||||||
|
chain.Lock()
|
||||||
|
defer chain.Unlock()
|
||||||
|
|
||||||
|
chain.Pruned = chain.LocatePruneTopo()
|
||||||
|
|
||||||
|
// find the tips from the chain , first by reaching top height
|
||||||
|
// then downgrading to top-10 height
|
||||||
|
// then reworking the chain to get the tip
|
||||||
|
best_height := chain.Load_TOP_HEIGHT()
|
||||||
|
chain.Height = best_height
|
||||||
|
|
||||||
|
chain.Tips = map[crypto.Hash]crypto.Hash{} // reset the map
|
||||||
|
// reload top tip from disk
|
||||||
|
top := chain.Get_Top_ID()
|
||||||
|
|
||||||
|
chain.Tips[top] = top // we only can load a single tip from db
|
||||||
|
|
||||||
|
logger.V(1).Info("Reloaded Chain from disk", "Tips", chain.Tips, "Height", chain.Height)
|
||||||
|
}
|
||||||
|
|
||||||
|
// before shutdown , make sure p2p is confirmed stopped
|
||||||
|
func (chain *Blockchain) Shutdown() {
|
||||||
|
|
||||||
|
chain.Lock() // take the lock as chain is no longer in unsafe mode
|
||||||
|
close(chain.Exit_Event) // send signal to everyone we are shutting down
|
||||||
|
|
||||||
|
chain.Mempool.Shutdown() // shutdown mempool first
|
||||||
|
chain.Regpool.Shutdown() // shutdown regpool first
|
||||||
|
|
||||||
|
logger.Info("Stopping Blockchain")
|
||||||
|
//chain.Store.Shutdown()
|
||||||
|
atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
|
||||||
|
logger.Info("Stopped Blockchain")
|
||||||
|
}
|
||||||
|
|
||||||
// this is the only entrypoint for new / old blocks even for genesis block
|
// this is the only entrypoint for new / old blocks even for genesis block
|
||||||
// this will add the entire block atomically to the chain
|
// this will add the entire block atomically to the chain
|
||||||
// this is the only function which can add blocks to the chain
|
// this is the only function which can add blocks to the chain
|
||||||
@ -254,7 +356,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if result == true { // block was successfully added, commit it atomically
|
if result == true { // block was successfully added, commit it atomically
|
||||||
logger.V(2).Info("Block successfully accepted by chain", "blid", block_hash.String())
|
logger.V(2).Info("Block successfully accepted by chain", "blid", block_hash.String(), "err", err)
|
||||||
|
|
||||||
// gracefully try to instrument
|
// gracefully try to instrument
|
||||||
func() {
|
func() {
|
||||||
@ -542,7 +644,11 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
|
|
||||||
var history_array []crypto.Hash
|
var history_array []crypto.Hash
|
||||||
for i := range bl.Tips {
|
for i := range bl.Tips {
|
||||||
history_array = append(history_array, chain.get_ordered_past(bl.Tips[i], 26)...)
|
h := int64(bl.Height) - 25
|
||||||
|
if h < 0 {
|
||||||
|
h = 0
|
||||||
|
}
|
||||||
|
history_array = append(history_array, chain.get_ordered_past(bl.Tips[i], h)...)
|
||||||
}
|
}
|
||||||
for _, h := range history_array {
|
for _, h := range history_array {
|
||||||
history[h] = true
|
history[h] = true
|
||||||
@ -683,7 +789,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
if height_changed {
|
||||||
|
|
||||||
var full_order []crypto.Hash
|
var full_order []crypto.Hash
|
||||||
var base_topo_index int64 // new topo id will start from here
|
var base_topo_index int64 // new topo id will start from here
|
||||||
@ -699,15 +805,18 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
// we will directly use graviton to mov in to history
|
// we will directly use graviton to mov in to history
|
||||||
logger.V(3).Info("Full order data", "full_order", full_order, "base_topo_index", base_topo_index)
|
logger.V(3).Info("Full order data", "full_order", full_order, "base_topo_index", base_topo_index)
|
||||||
|
|
||||||
|
if base_topo_index < 0 {
|
||||||
|
logger.Error(nil, "negative base topo, not possible, probably disk corruption or core issue")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
topos_written := false
|
||||||
for i := int64(0); i < int64(len(full_order)); i++ {
|
for i := int64(0); i < int64(len(full_order)); i++ {
|
||||||
logger.V(3).Info("will execute order ", "i", i, "blid", full_order[i].String())
|
logger.V(3).Info("will execute order ", "i", i, "blid", full_order[i].String())
|
||||||
|
|
||||||
current_topo_block := i + base_topo_index
|
current_topo_block := i + base_topo_index
|
||||||
previous_topo_block := current_topo_block - 1
|
//previous_topo_block := current_topo_block - 1
|
||||||
|
|
||||||
_ = previous_topo_block
|
if !topos_written && current_topo_block == chain.Load_Block_Topological_order(full_order[i]) { // skip if same order
|
||||||
|
|
||||||
if current_topo_block == chain.Load_Block_Topological_order(full_order[i]) { // skip if same order
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -738,8 +847,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
var balance_tree, sc_meta *graviton.Tree
|
var balance_tree, sc_meta *graviton.Tree
|
||||||
_ = sc_meta
|
|
||||||
|
|
||||||
var ss *graviton.Snapshot
|
var ss *graviton.Snapshot
|
||||||
if bl_current.Height == 0 { // if it's genesis block
|
if bl_current.Height == 0 { // if it's genesis block
|
||||||
if ss, err = chain.Store.Balance_store.LoadSnapshot(0); err != nil {
|
if ss, err = chain.Store.Balance_store.LoadSnapshot(0); err != nil {
|
||||||
@ -780,7 +887,10 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
//chain.Store.Topo_store.Write(i+base_topo_index, full_order[i],0, int64(bl_current.Height)) // write entry so as sideblock could work
|
//chain.Store.Topo_store.Write(i+base_topo_index, full_order[i],0, int64(bl_current.Height)) // write entry so as sideblock could work
|
||||||
var data_trees []*graviton.Tree
|
var data_trees []*graviton.Tree
|
||||||
|
|
||||||
if !chain.isblock_SideBlock_internal(full_order[i], current_topo_block, int64(bl_current.Height)) {
|
if chain.isblock_SideBlock_internal(full_order[i], current_topo_block, int64(bl_current.Height)) {
|
||||||
|
logger.V(3).Info("this block is a side block", "height", chain.Load_Block_Height(full_order[i]), "blid", full_order[i])
|
||||||
|
} else {
|
||||||
|
logger.V(3).Info("this block is a full block", "height", chain.Load_Block_Height(full_order[i]), "blid", full_order[i])
|
||||||
|
|
||||||
sc_change_cache := map[crypto.Hash]*graviton.Tree{} // cache entire changes for entire block
|
sc_change_cache := map[crypto.Hash]*graviton.Tree{} // cache entire changes for entire block
|
||||||
|
|
||||||
@ -847,9 +957,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
chain.process_miner_transaction(bl_current, bl_current.Height == 0, balance_tree, fees_collected, bl_current.Height)
|
chain.process_miner_transaction(bl_current, bl_current.Height == 0, balance_tree, fees_collected, bl_current.Height)
|
||||||
} else {
|
|
||||||
block_logger.V(1).Info("this block is a side block", "height", chain.Load_Block_Height(full_order[i]), "blid", full_order[i])
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// we are here, means everything is okay, lets commit the update balance tree
|
// we are here, means everything is okay, lets commit the update balance tree
|
||||||
@ -860,16 +967,15 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chain.StoreBlock(bl, commit_version)
|
chain.StoreBlock(bl_current, commit_version)
|
||||||
if height_changed {
|
topos_written = true
|
||||||
chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i]))
|
chain.Store.Topo_store.Write(current_topo_block, full_order[i], commit_version, chain.Load_Block_Height(full_order[i]))
|
||||||
if logger.V(3).Enabled() {
|
if logger.V(3).Enabled() {
|
||||||
merkle_root, err := chain.Load_Merkle_Hash(commit_version)
|
merkle_root, err := chain.Load_Merkle_Hash(commit_version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
logger.V(3).Info("height changed storing topo", "i", i, "blid", full_order[i].String(), "topoheight", current_topo_block, "commit_version", commit_version, "committed_merkle", merkle_root)
|
logger.V(3).Info("storing topo", "i", i, "blid", full_order[i].String(), "topoheight", current_topo_block, "commit_version", commit_version, "committed_merkle", merkle_root)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -921,111 +1027,18 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
|
|||||||
block_logger.Info(fmt.Sprintf("Chain Height %d", chain.Height))
|
block_logger.Info(fmt.Sprintf("Chain Height %d", chain.Height))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
purge_count := chain.MiniBlocks.PurgeHeight(chain.Get_Stable_Height()) // purge all miniblocks upto this height
|
||||||
|
logger.V(2).Info("Purged miniblock", "count", purge_count)
|
||||||
|
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
// TODO fix hard fork
|
// TODO fix hard fork
|
||||||
// maintain hard fork votes to keep them SANE
|
// maintain hard fork votes to keep them SANE
|
||||||
//chain.Recount_Votes() // does not return anything
|
//chain.Recount_Votes() // does not return anything
|
||||||
|
|
||||||
// enable mempool book keeping
|
|
||||||
|
|
||||||
func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
logger.Error(nil, "Mempool House Keeping triggered panic", "r", r, "height", block_height)
|
|
||||||
}
|
|
||||||
|
|
||||||
purge_count := chain.MiniBlocks.PurgeHeight(chain.Get_Stable_Height()) // purge all miniblocks upto this height
|
|
||||||
logger.V(2).Info("Purged miniblock", "count", purge_count)
|
|
||||||
|
|
||||||
// discard the transactions from mempool if they are present there
|
|
||||||
chain.Mempool.Monitor()
|
|
||||||
|
|
||||||
for i := 0; i < len(cbl.Txs); i++ {
|
|
||||||
txid := cbl.Txs[i].GetHash()
|
|
||||||
|
|
||||||
switch cbl.Txs[i].TransactionType {
|
|
||||||
|
|
||||||
case transaction.REGISTRATION:
|
|
||||||
if chain.Regpool.Regpool_TX_Exist(txid) {
|
|
||||||
logger.V(3).Info("Deleting TX from regpool", "txid", txid)
|
|
||||||
chain.Regpool.Regpool_Delete_TX(txid)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
case transaction.NORMAL, transaction.BURN_TX, transaction.SC_TX:
|
|
||||||
if chain.Mempool.Mempool_TX_Exist(txid) {
|
|
||||||
logger.V(3).Info("Deleting TX from mempool", "txid", txid)
|
|
||||||
chain.Mempool.Mempool_Delete_TX(txid)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// give mempool an oppurtunity to clean up tx, but only if they are not mined
|
|
||||||
chain.Mempool.HouseKeeping(uint64(block_height))
|
|
||||||
|
|
||||||
// give regpool a chance to register
|
|
||||||
if ss, err := chain.Store.Balance_store.LoadSnapshot(0); err == nil {
|
|
||||||
if balance_tree, err := ss.GetTree(config.BALANCE_TREE); err == nil {
|
|
||||||
|
|
||||||
chain.Regpool.HouseKeeping(uint64(block_height), func(tx *transaction.Transaction) bool {
|
|
||||||
if tx.TransactionType != transaction.REGISTRATION { // tx not registration so delete
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, err := balance_tree.Get(tx.MinerAddress[:]); err != nil { // address already registered
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false // account not already registered, so give another chance
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}()
|
|
||||||
|
|
||||||
return // run any handlers necesary to atomically
|
return // run any handlers necesary to atomically
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function is called to read blockchain state from DB
|
|
||||||
// It is callable at any point in time
|
|
||||||
|
|
||||||
func (chain *Blockchain) Initialise_Chain_From_DB() {
|
|
||||||
chain.Lock()
|
|
||||||
defer chain.Unlock()
|
|
||||||
|
|
||||||
chain.Pruned = chain.LocatePruneTopo()
|
|
||||||
|
|
||||||
// find the tips from the chain , first by reaching top height
|
|
||||||
// then downgrading to top-10 height
|
|
||||||
// then reworking the chain to get the tip
|
|
||||||
best_height := chain.Load_TOP_HEIGHT()
|
|
||||||
chain.Height = best_height
|
|
||||||
|
|
||||||
chain.Tips = map[crypto.Hash]crypto.Hash{} // reset the map
|
|
||||||
// reload top tip from disk
|
|
||||||
top := chain.Get_Top_ID()
|
|
||||||
|
|
||||||
chain.Tips[top] = top // we only can load a single tip from db
|
|
||||||
|
|
||||||
logger.V(1).Info("Reloaded Chain from disk", "Tips", chain.Tips, "Height", chain.Height)
|
|
||||||
}
|
|
||||||
|
|
||||||
// before shutdown , make sure p2p is confirmed stopped
|
|
||||||
func (chain *Blockchain) Shutdown() {
|
|
||||||
|
|
||||||
chain.Lock() // take the lock as chain is no longer in unsafe mode
|
|
||||||
close(chain.Exit_Event) // send signal to everyone we are shutting down
|
|
||||||
|
|
||||||
chain.Mempool.Shutdown() // shutdown mempool first
|
|
||||||
chain.Regpool.Shutdown() // shutdown regpool first
|
|
||||||
|
|
||||||
logger.Info("Stopping Blockchain")
|
|
||||||
//chain.Store.Shutdown()
|
|
||||||
atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
|
|
||||||
}
|
|
||||||
|
|
||||||
// get top unstable height
|
// get top unstable height
|
||||||
// this is obtained by getting the highest topo block and getting its height
|
// this is obtained by getting the highest topo block and getting its height
|
||||||
func (chain *Blockchain) Get_Height() int64 {
|
func (chain *Blockchain) Get_Height() int64 {
|
||||||
@ -1433,73 +1446,57 @@ func (chain *Blockchain) IsBlockSyncBlockHeightSpecific(blid crypto.Hash, chain_
|
|||||||
// converts a DAG's partial order into a full order, this function is recursive
|
// converts a DAG's partial order into a full order, this function is recursive
|
||||||
// dag can be processed only one height at a time
|
// dag can be processed only one height at a time
|
||||||
// blocks are ordered recursively, till we find a find a block which is already in the chain
|
// blocks are ordered recursively, till we find a find a block which is already in the chain
|
||||||
|
// this could be done via binary search also, but this is also easy
|
||||||
func (chain *Blockchain) Generate_Full_Order_New(current_tip crypto.Hash, new_tip crypto.Hash) (order []crypto.Hash, topo int64) {
|
func (chain *Blockchain) Generate_Full_Order_New(current_tip crypto.Hash, new_tip crypto.Hash) (order []crypto.Hash, topo int64) {
|
||||||
|
|
||||||
/*if !(chain.Load_Height_for_BL_ID(new_tip) == chain.Load_Height_for_BL_ID(current_tip)+1 ||
|
start := time.Now()
|
||||||
chain.Load_Height_for_BL_ID(new_tip) == chain.Load_Height_for_BL_ID(current_tip)) {
|
defer logger.V(2).Info("generating full order", "took", time.Now().Sub(start))
|
||||||
panic("dag can only grow one height at a time")
|
|
||||||
}*/
|
|
||||||
|
|
||||||
depth := 20
|
matchtill := chain.Load_Height_for_BL_ID(new_tip)
|
||||||
for ; ; depth += 20 {
|
step_size := int64(10)
|
||||||
current_history := chain.get_ordered_past(current_tip, depth)
|
|
||||||
new_history := chain.get_ordered_past(new_tip, depth)
|
|
||||||
|
|
||||||
if len(current_history) < 5 { // we assume chain will not fork before 4 blocks
|
for {
|
||||||
var current_history_rev []crypto.Hash
|
matchtill -= step_size
|
||||||
var new_history_rev []crypto.Hash
|
if matchtill < 0 {
|
||||||
|
matchtill = 0
|
||||||
for i := range current_history {
|
|
||||||
current_history_rev = append(current_history_rev, current_history[len(current_history)-i-1])
|
|
||||||
}
|
|
||||||
for i := range new_history {
|
|
||||||
new_history_rev = append(new_history_rev, new_history[len(new_history)-i-1])
|
|
||||||
}
|
}
|
||||||
|
current_history := chain.get_ordered_past(current_tip, matchtill)
|
||||||
|
new_history := chain.get_ordered_past(new_tip, matchtill)
|
||||||
|
|
||||||
for j := range new_history_rev {
|
if matchtill == 0 {
|
||||||
found := false
|
if current_history[0] != new_history[0] {
|
||||||
for i := range current_history_rev {
|
panic("genesis not matching")
|
||||||
if current_history_rev[i] == new_history_rev[j] {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
topo = 0
|
||||||
|
order = append(order, new_history...)
|
||||||
if !found { // we have a contention point
|
|
||||||
topo = chain.Load_Block_Topological_order(new_history_rev[j-1])
|
|
||||||
order = append(order, new_history_rev[j-1:]...) // order is already stored and store
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
|
||||||
panic("not possible")
|
if current_history[0] != new_history[0] { // base are not matching, step back further
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(current_history)-4; i++ {
|
if current_history[0] != new_history[0] ||
|
||||||
for j := 0; j < len(new_history)-4; j++ {
|
current_history[1] != new_history[1] ||
|
||||||
if current_history[i+0] == new_history[j+0] &&
|
current_history[2] != new_history[2] ||
|
||||||
current_history[i+1] == new_history[j+1] &&
|
current_history[3] != new_history[3] {
|
||||||
current_history[i+2] == new_history[j+2] &&
|
|
||||||
current_history[i+3] == new_history[j+3] {
|
|
||||||
|
|
||||||
topo = chain.Load_Block_Topological_order(new_history[j])
|
continue // base are not matching, step back further
|
||||||
for k := j; k >= 0; k-- {
|
|
||||||
order = append(order, new_history[k]) // reverse order and store
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
order = append(order, new_history[:]...)
|
||||||
|
topo = chain.Load_Block_Topological_order(order[0])
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// we will collect atleast 50 blocks or till genesis
|
// we will collect atleast 50 blocks or till genesis
|
||||||
func (chain *Blockchain) get_ordered_past(tip crypto.Hash, count int) (order []crypto.Hash) {
|
func (chain *Blockchain) get_ordered_past(tip crypto.Hash, tillheight int64) (order []crypto.Hash) {
|
||||||
order = append(order, tip)
|
order = append(order, tip)
|
||||||
current := tip
|
current := tip
|
||||||
for len(order) < count {
|
for chain.Load_Height_for_BL_ID(current) > tillheight {
|
||||||
past := chain.Get_Block_Past(current)
|
past := chain.Get_Block_Past(current)
|
||||||
|
|
||||||
switch len(past) {
|
switch len(past) {
|
||||||
@ -1520,5 +1517,9 @@ func (chain *Blockchain) get_ordered_past(tip crypto.Hash, count int) (order []c
|
|||||||
panic("data corruption")
|
panic("data corruption")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, j := 0, len(order)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
order[i], order[j] = order[j], order[i]
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,9 @@ func (chain *Blockchain) Get_Difficulty_At_Tips(tips []crypto.Hash) *big.Int {
|
|||||||
biggest_difficulty.Set(MinimumDifficulty)
|
biggest_difficulty.Set(MinimumDifficulty)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !chain.cache_disabled {
|
||||||
chain.cache_Get_Difficulty_At_Tips.Add(tips_string, string(biggest_difficulty.Bytes())) // set in cache
|
chain.cache_Get_Difficulty_At_Tips.Add(tips_string, string(biggest_difficulty.Bytes())) // set in cache
|
||||||
|
}
|
||||||
return biggest_difficulty
|
return biggest_difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,7 +237,9 @@ func (chain *Blockchain) VerifyMiniblockPoW(bl *block.Block, mbl block.MiniBlock
|
|||||||
}*/
|
}*/
|
||||||
|
|
||||||
if CheckPowHashBig(PoW, block_difficulty) == true {
|
if CheckPowHashBig(PoW, block_difficulty) == true {
|
||||||
|
if !chain.cache_disabled {
|
||||||
chain.cache_IsMiniblockPowValid.Add(fmt.Sprintf("%s", cachekey), true) // set in cache
|
chain.cache_IsMiniblockPowValid.Add(fmt.Sprintf("%s", cachekey), true) // set in cache
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -21,8 +21,6 @@ import "sync"
|
|||||||
import "sort"
|
import "sort"
|
||||||
import "time"
|
import "time"
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
import "encoding/hex"
|
|
||||||
import "encoding/json"
|
|
||||||
|
|
||||||
import "github.com/go-logr/logr"
|
import "github.com/go-logr/logr"
|
||||||
|
|
||||||
@ -55,9 +53,6 @@ type Mempool struct {
|
|||||||
modified bool // used to monitor whethel mem pool contents have changed,
|
modified bool // used to monitor whethel mem pool contents have changed,
|
||||||
height uint64 // track blockchain height
|
height uint64 // track blockchain height
|
||||||
|
|
||||||
P2P_TX_Relayer p2p_TX_Relayer // actual pointer, setup by the dero daemon during runtime
|
|
||||||
|
|
||||||
relayer chan crypto.Hash // used for immediate relay
|
|
||||||
// global variable , but don't see it utilisation here except fot tx verification
|
// global variable , but don't see it utilisation here except fot tx verification
|
||||||
//chain *Blockchain
|
//chain *Blockchain
|
||||||
Exit_Mutex chan bool
|
Exit_Mutex chan bool
|
||||||
@ -70,65 +65,12 @@ type mempool_object struct {
|
|||||||
Tx *transaction.Transaction
|
Tx *transaction.Transaction
|
||||||
Added uint64 // time in epoch format
|
Added uint64 // time in epoch format
|
||||||
Height uint64 // at which height the tx unlocks in the mempool
|
Height uint64 // at which height the tx unlocks in the mempool
|
||||||
Relayed int // relayed count
|
|
||||||
RelayedAt int64 // when was tx last relayed
|
|
||||||
Size uint64 // size in bytes of the TX
|
Size uint64 // size in bytes of the TX
|
||||||
FEEperBYTE uint64 // fee per byte
|
FEEperBYTE uint64 // fee per byte
|
||||||
}
|
}
|
||||||
|
|
||||||
var loggerpool logr.Logger
|
var loggerpool logr.Logger
|
||||||
|
|
||||||
// marshal object as json
|
|
||||||
func (obj *mempool_object) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(&struct {
|
|
||||||
Tx string `json:"tx"` // hex encoding
|
|
||||||
Added uint64 `json:"added"`
|
|
||||||
Height uint64 `json:"height"`
|
|
||||||
Relayed int `json:"relayed"`
|
|
||||||
RelayedAt int64 `json:"relayedat"`
|
|
||||||
}{
|
|
||||||
Tx: hex.EncodeToString(obj.Tx.Serialize()),
|
|
||||||
Added: obj.Added,
|
|
||||||
Height: obj.Height,
|
|
||||||
Relayed: obj.Relayed,
|
|
||||||
RelayedAt: obj.RelayedAt,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshal object from json encoding
|
|
||||||
func (obj *mempool_object) UnmarshalJSON(data []byte) error {
|
|
||||||
aux := &struct {
|
|
||||||
Tx string `json:"tx"`
|
|
||||||
Added uint64 `json:"added"`
|
|
||||||
Height uint64 `json:"height"`
|
|
||||||
Relayed int `json:"relayed"`
|
|
||||||
RelayedAt int64 `json:"relayedat"`
|
|
||||||
}{}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(data, &aux); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.Added = aux.Added
|
|
||||||
obj.Height = aux.Height
|
|
||||||
obj.Relayed = aux.Relayed
|
|
||||||
obj.RelayedAt = aux.RelayedAt
|
|
||||||
|
|
||||||
tx_bytes, err := hex.DecodeString(aux.Tx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
obj.Size = uint64(len(tx_bytes))
|
|
||||||
|
|
||||||
obj.Tx = &transaction.Transaction{}
|
|
||||||
err = obj.Tx.Deserialize(tx_bytes)
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
obj.FEEperBYTE = obj.Tx.Fees() / obj.Size
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func Init_Mempool(params map[string]interface{}) (*Mempool, error) {
|
func Init_Mempool(params map[string]interface{}) (*Mempool, error) {
|
||||||
var mempool Mempool
|
var mempool Mempool
|
||||||
//mempool.chain = params["chain"].(*Blockchain)
|
//mempool.chain = params["chain"].(*Blockchain)
|
||||||
@ -137,7 +79,6 @@ func Init_Mempool(params map[string]interface{}) (*Mempool, error) {
|
|||||||
loggerpool.Info("Mempool started")
|
loggerpool.Info("Mempool started")
|
||||||
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
||||||
|
|
||||||
mempool.relayer = make(chan crypto.Hash, 1024*10)
|
|
||||||
mempool.Exit_Mutex = make(chan bool)
|
mempool.Exit_Mutex = make(chan bool)
|
||||||
|
|
||||||
metrics.Set.GetOrCreateGauge("mempool_count", func() float64 {
|
metrics.Set.GetOrCreateGauge("mempool_count", func() float64 {
|
||||||
@ -152,20 +93,6 @@ func Init_Mempool(params map[string]interface{}) (*Mempool, error) {
|
|||||||
return &mempool, nil
|
return &mempool, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is created per incoming block and then discarded
|
|
||||||
// This does not require shutting down and will be garbage collected automatically
|
|
||||||
/*
|
|
||||||
func Init_Block_Mempool(params map[string]interface{}) (*Mempool, error) {
|
|
||||||
var mempool Mempool
|
|
||||||
|
|
||||||
// initialize maps
|
|
||||||
//mempool.txs = map[crypto.Hash]*mempool_object{}
|
|
||||||
//mempool.nonces = map[crypto.Hash]bool{}
|
|
||||||
|
|
||||||
return &mempool, nil
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
func (pool *Mempool) HouseKeeping(height uint64) {
|
func (pool *Mempool) HouseKeeping(height uint64) {
|
||||||
pool.height = height
|
pool.height = height
|
||||||
|
|
||||||
@ -257,8 +184,6 @@ func (pool *Mempool) Mempool_Add_TX(tx *transaction.Transaction, Height uint64)
|
|||||||
object.FEEperBYTE = tx.Fees() / object.Size
|
object.FEEperBYTE = tx.Fees() / object.Size
|
||||||
|
|
||||||
pool.txs.Store(tx_hash, &object)
|
pool.txs.Store(tx_hash, &object)
|
||||||
|
|
||||||
pool.relayer <- tx_hash
|
|
||||||
pool.modified = true // pool has been modified
|
pool.modified = true // pool has been modified
|
||||||
|
|
||||||
//pool.sort_list() // sort and update pool list
|
//pool.sort_list() // sort and update pool list
|
||||||
@ -402,12 +327,12 @@ func (pool *Mempool) Mempool_Print() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
loggerpool.Info(fmt.Sprintf("Total TX in mempool = %d\n", len(klist)))
|
loggerpool.Info(fmt.Sprintf("Total TX in mempool = %d\n", len(klist)))
|
||||||
loggerpool.Info(fmt.Sprintf("%20s %14s %7s %7s %6s %32s\n", "Added", "Last Relayed", "Relayed", "Size", "Height", "TXID"))
|
loggerpool.Info(fmt.Sprintf("%20s %14s %7s %7s %6s %32s\n", "Added", "Size", "Height", "TXID"))
|
||||||
|
|
||||||
for i := range klist {
|
for i := range klist {
|
||||||
k := klist[i]
|
k := klist[i]
|
||||||
v := vlist[i]
|
v := vlist[i]
|
||||||
loggerpool.Info(fmt.Sprintf("%20s %14s %7d %7d %6d %32s\n", time.Unix(int64(v.Added), 0).UTC().Format(time.RFC3339), time.Duration(v.RelayedAt)*time.Second, v.Relayed,
|
loggerpool.Info(fmt.Sprintf("%20s %14s %7d %7d %6d %32s\n", time.Unix(int64(v.Added), 0).UTC().Format(time.RFC3339),
|
||||||
len(v.Tx.Serialize()), v.Height, k))
|
len(v.Tx.Serialize()), v.Height, k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,5 +386,3 @@ func (pool *Mempool) sort_list() ([]crypto.Hash, []TX_Sorting_struct) {
|
|||||||
return sorted_list, data
|
return sorted_list, data
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type p2p_TX_Relayer func(*transaction.Transaction, uint64) int // function type, exported in p2p but cannot use due to cyclic dependency
|
|
||||||
|
@ -523,7 +523,17 @@ func (chain *Blockchain) Accept_new_block(tstamp uint64, miniblock_blob []byte)
|
|||||||
|
|
||||||
// notify peers, we have a miniblock and return to miner
|
// notify peers, we have a miniblock and return to miner
|
||||||
if !chain.simulator { // if not in simulator mode, relay miniblock to the chain
|
if !chain.simulator { // if not in simulator mode, relay miniblock to the chain
|
||||||
go chain.P2P_MiniBlock_Relayer(mbl, 0)
|
var mbls []block.MiniBlock
|
||||||
|
|
||||||
|
if !mbl.Genesis {
|
||||||
|
for i := uint8(0); i < mbl.PastCount; i++ {
|
||||||
|
mbls = append(mbls, chain.MiniBlocks.Get(mbl.Past[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
mbls = append(mbls, mbl)
|
||||||
|
go chain.P2P_MiniBlock_Relayer(mbls, 0)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if a duplicate block is being sent, reject the block
|
// if a duplicate block is being sent, reject the block
|
||||||
@ -665,8 +675,10 @@ hard_way:
|
|||||||
if err != nil || bits >= 120 {
|
if err != nil || bits >= 120 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if !chain.cache_disabled {
|
||||||
chain.cache_IsAddressHashValid.Add(fmt.Sprintf("%s", hash), true) // set in cache
|
chain.cache_IsAddressHashValid.Add(fmt.Sprintf("%s", hash), true) // set in cache
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -80,6 +80,9 @@ func (chain *Blockchain) Verify_MiniBlocks(bl block.Block) (err error) {
|
|||||||
|
|
||||||
// check whether the genesis blocks are all equal
|
// check whether the genesis blocks are all equal
|
||||||
for _, mbl := range bl.MiniBlocks {
|
for _, mbl := range bl.MiniBlocks {
|
||||||
|
if !mbl.IsSafe() {
|
||||||
|
return fmt.Errorf("MiniBlock is unsafe")
|
||||||
|
}
|
||||||
if mbl.Genesis { // make sure all genesis blocks point to all the actual tips
|
if mbl.Genesis { // make sure all genesis blocks point to all the actual tips
|
||||||
|
|
||||||
if bl.Height != binary.BigEndian.Uint64(mbl.Check[:]) {
|
if bl.Height != binary.BigEndian.Uint64(mbl.Check[:]) {
|
||||||
@ -194,7 +197,9 @@ func (chain *Blockchain) Check_Dynamism(mbls []block.MiniBlock) (err error) {
|
|||||||
|
|
||||||
// insert a miniblock to chain and if successfull inserted, notify everyone in need
|
// insert a miniblock to chain and if successfull inserted, notify everyone in need
|
||||||
func (chain *Blockchain) InsertMiniBlock(mbl block.MiniBlock) (err error, result bool) {
|
func (chain *Blockchain) InsertMiniBlock(mbl block.MiniBlock) (err error, result bool) {
|
||||||
|
if !mbl.IsSafe() {
|
||||||
|
return fmt.Errorf("miniblock is unsafe"), false
|
||||||
|
}
|
||||||
var miner_hash crypto.Hash
|
var miner_hash crypto.Hash
|
||||||
copy(miner_hash[:], mbl.KeyHash[:])
|
copy(miner_hash[:], mbl.KeyHash[:])
|
||||||
if !chain.IsAddressHashValid(true, miner_hash) {
|
if !chain.IsAddressHashValid(true, miner_hash) {
|
||||||
|
@ -55,8 +55,6 @@ type Regpool struct {
|
|||||||
modified bool // used to monitor whethel mem pool contents have changed,
|
modified bool // used to monitor whethel mem pool contents have changed,
|
||||||
height uint64 // track blockchain height
|
height uint64 // track blockchain height
|
||||||
|
|
||||||
relayer chan crypto.Hash // used for immediate relay
|
|
||||||
|
|
||||||
// global variable , but don't see it utilisation here except fot tx verification
|
// global variable , but don't see it utilisation here except fot tx verification
|
||||||
//chain *Blockchain
|
//chain *Blockchain
|
||||||
Exit_Mutex chan bool
|
Exit_Mutex chan bool
|
||||||
@ -136,7 +134,6 @@ func Init_Regpool(params map[string]interface{}) (*Regpool, error) {
|
|||||||
loggerpool.Info("Regpool started")
|
loggerpool.Info("Regpool started")
|
||||||
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
|
||||||
|
|
||||||
regpool.relayer = make(chan crypto.Hash, 1024*10)
|
|
||||||
regpool.Exit_Mutex = make(chan bool)
|
regpool.Exit_Mutex = make(chan bool)
|
||||||
|
|
||||||
metrics.Set.GetOrCreateGauge("regpool_count", func() float64 {
|
metrics.Set.GetOrCreateGauge("regpool_count", func() float64 {
|
||||||
@ -259,7 +256,6 @@ func (pool *Regpool) Regpool_Add_TX(tx *transaction.Transaction, Height uint64)
|
|||||||
object.Size = uint64(len(tx.Serialize()))
|
object.Size = uint64(len(tx.Serialize()))
|
||||||
|
|
||||||
pool.txs.Store(tx_hash, &object)
|
pool.txs.Store(tx_hash, &object)
|
||||||
pool.relayer <- tx_hash
|
|
||||||
pool.modified = true // pool has been modified
|
pool.modified = true // pool has been modified
|
||||||
|
|
||||||
//pool.sort_list() // sort and update pool list
|
//pool.sort_list() // sort and update pool list
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
import "sync"
|
|
||||||
import "math/big"
|
import "math/big"
|
||||||
import "crypto/rand"
|
import "crypto/rand"
|
||||||
import "path/filepath"
|
import "path/filepath"
|
||||||
@ -29,8 +28,6 @@ import "github.com/deroproject/derohe/cryptography/crypto"
|
|||||||
|
|
||||||
import "github.com/deroproject/graviton"
|
import "github.com/deroproject/graviton"
|
||||||
|
|
||||||
import "github.com/hashicorp/golang-lru"
|
|
||||||
|
|
||||||
// though these can be done within a single DB, these are separated for completely clarity purposes
|
// though these can be done within a single DB, these are separated for completely clarity purposes
|
||||||
type storage struct {
|
type storage struct {
|
||||||
Balance_store *graviton.Store // stores most critical data, only history can be purged, its merkle tree is stored in the block
|
Balance_store *graviton.Store // stores most critical data, only history can be purged, its merkle tree is stored in the block
|
||||||
@ -98,7 +95,7 @@ func (chain *Blockchain) StoreBlock(bl *block.Block, snapshot_version uint64) {
|
|||||||
|
|
||||||
chain.Store.Block_tx_store.DeleteBlock(hash) // what should we do on error
|
chain.Store.Block_tx_store.DeleteBlock(hash) // what should we do on error
|
||||||
|
|
||||||
err := chain.Store.Block_tx_store.WriteBlock(hash, serialized_bytes, difficulty_of_current_block, snapshot_version)
|
err := chain.Store.Block_tx_store.WriteBlock(hash, serialized_bytes, difficulty_of_current_block, snapshot_version, bl.Height)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("error while writing block"))
|
panic(fmt.Sprintf("error while writing block"))
|
||||||
}
|
}
|
||||||
@ -169,36 +166,27 @@ func (chain *Blockchain) Load_Block_Timestamp(h crypto.Hash) uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (chain *Blockchain) Load_Block_Height(h crypto.Hash) (height int64) {
|
func (chain *Blockchain) Load_Block_Height(h crypto.Hash) (height int64) {
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
height = -1
|
height = -1
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
bl, err := chain.Load_BL_FROM_ID(h)
|
if heighti, err := chain.ReadBlockHeight(h); err != nil {
|
||||||
if err != nil {
|
return -1
|
||||||
panic(err)
|
} else {
|
||||||
|
return int64(heighti)
|
||||||
}
|
}
|
||||||
height = int64(bl.Height)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
func (chain *Blockchain) Load_Height_for_BL_ID(h crypto.Hash) int64 {
|
func (chain *Blockchain) Load_Height_for_BL_ID(h crypto.Hash) int64 {
|
||||||
return chain.Load_Block_Height(h)
|
return chain.Load_Block_Height(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
var past_cache, _ = lru.New(10240)
|
|
||||||
var past_cache_lock sync.Mutex
|
|
||||||
|
|
||||||
// all the immediate past of a block
|
// all the immediate past of a block
|
||||||
func (chain *Blockchain) Get_Block_Past(hash crypto.Hash) (blocks []crypto.Hash) {
|
func (chain *Blockchain) Get_Block_Past(hash crypto.Hash) (blocks []crypto.Hash) {
|
||||||
|
|
||||||
//fmt.Printf("loading tips for block %x\n", hash)
|
//fmt.Printf("loading tips for block %x\n", hash)
|
||||||
past_cache_lock.Lock()
|
if keysi, ok := chain.cache_BlockPast.Get(hash); ok {
|
||||||
defer past_cache_lock.Unlock()
|
|
||||||
|
|
||||||
if keysi, ok := past_cache.Get(hash); ok {
|
|
||||||
keys := keysi.([]crypto.Hash)
|
keys := keysi.([]crypto.Hash)
|
||||||
blocks = make([]crypto.Hash, len(keys))
|
blocks = make([]crypto.Hash, len(keys))
|
||||||
for i := range keys {
|
for i := range keys {
|
||||||
@ -223,7 +211,7 @@ func (chain *Blockchain) Get_Block_Past(hash crypto.Hash) (blocks []crypto.Hash)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//set in cache
|
//set in cache
|
||||||
past_cache.Add(hash, cache_copy)
|
chain.cache_BlockPast.Add(hash, cache_copy)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import "strings"
|
|||||||
import "io/ioutil"
|
import "io/ioutil"
|
||||||
import "math/big"
|
import "math/big"
|
||||||
import "path/filepath"
|
import "path/filepath"
|
||||||
|
import "github.com/deroproject/derohe/globals"
|
||||||
|
|
||||||
type storefs struct {
|
type storefs struct {
|
||||||
basedir string
|
basedir string
|
||||||
@ -33,6 +34,7 @@ type storefs struct {
|
|||||||
// hex block id (64 chars).block._ rewards (decimal) _ difficulty _ cumulative difficulty
|
// hex block id (64 chars).block._ rewards (decimal) _ difficulty _ cumulative difficulty
|
||||||
|
|
||||||
func (s *storefs) ReadBlock(h [32]byte) ([]byte, error) {
|
func (s *storefs) ReadBlock(h [32]byte) ([]byte, error) {
|
||||||
|
defer globals.Recover(0)
|
||||||
var dummy [32]byte
|
var dummy [32]byte
|
||||||
if h == dummy {
|
if h == dummy {
|
||||||
return nil, fmt.Errorf("empty block")
|
return nil, fmt.Errorf("empty block")
|
||||||
@ -40,7 +42,7 @@ func (s *storefs) ReadBlock(h [32]byte) ([]byte, error) {
|
|||||||
|
|
||||||
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
files, err := os.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -50,7 +52,7 @@ func (s *storefs) ReadBlock(h [32]byte) ([]byte, error) {
|
|||||||
if strings.HasPrefix(file.Name(), filename_start) {
|
if strings.HasPrefix(file.Name(), filename_start) {
|
||||||
//fmt.Printf("Reading block with filename %s\n", file.Name())
|
//fmt.Printf("Reading block with filename %s\n", file.Name())
|
||||||
file := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]), file.Name())
|
file := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]), file.Name())
|
||||||
return ioutil.ReadFile(file)
|
return os.ReadFile(file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,7 +62,7 @@ func (s *storefs) ReadBlock(h [32]byte) ([]byte, error) {
|
|||||||
func (s *storefs) DeleteBlock(h [32]byte) error {
|
func (s *storefs) DeleteBlock(h [32]byte) error {
|
||||||
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
files, err := os.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -87,7 +89,7 @@ func (s *storefs) DeleteBlock(h [32]byte) error {
|
|||||||
func (s *storefs) ReadBlockDifficulty(h [32]byte) (*big.Int, error) {
|
func (s *storefs) ReadBlockDifficulty(h [32]byte) (*big.Int, error) {
|
||||||
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
files, err := os.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -99,7 +101,7 @@ func (s *storefs) ReadBlockDifficulty(h [32]byte) (*big.Int, error) {
|
|||||||
diff := new(big.Int)
|
diff := new(big.Int)
|
||||||
|
|
||||||
parts := strings.Split(file.Name(), "_")
|
parts := strings.Split(file.Name(), "_")
|
||||||
if len(parts) != 3 {
|
if len(parts) != 4 {
|
||||||
panic("such filename cannot occur")
|
panic("such filename cannot occur")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +122,7 @@ func (chain *Blockchain) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) {
|
|||||||
func (s *storefs) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) {
|
func (s *storefs) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) {
|
||||||
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
files, err := os.ReadDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -128,28 +130,65 @@ func (s *storefs) ReadBlockSnapshotVersion(h [32]byte) (uint64, error) {
|
|||||||
filename_start := fmt.Sprintf("%x.block", h[:])
|
filename_start := fmt.Sprintf("%x.block", h[:])
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if strings.HasPrefix(file.Name(), filename_start) {
|
if strings.HasPrefix(file.Name(), filename_start) {
|
||||||
|
var ssversion uint64
|
||||||
var diff uint64
|
|
||||||
|
|
||||||
parts := strings.Split(file.Name(), "_")
|
parts := strings.Split(file.Name(), "_")
|
||||||
if len(parts) != 3 {
|
if len(parts) != 4 {
|
||||||
panic("such filename cannot occur")
|
panic("such filename cannot occur")
|
||||||
}
|
}
|
||||||
|
_, err := fmt.Sscan(parts[2], &ssversion)
|
||||||
_, err := fmt.Sscan(parts[2], &diff)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return diff, nil
|
return ssversion, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, os.ErrNotExist
|
return 0, os.ErrNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *storefs) WriteBlock(h [32]byte, data []byte, difficulty *big.Int, ss_version uint64) (err error) {
|
func (chain *Blockchain) ReadBlockHeight(h [32]byte) (uint64, error) {
|
||||||
|
if heighti, ok := chain.cache_BlockHeight.Get(h); ok {
|
||||||
|
height := heighti.(uint64)
|
||||||
|
return height, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
height, err := chain.Store.Block_tx_store.ReadBlockHeight(h)
|
||||||
|
if err == nil {
|
||||||
|
chain.cache_BlockHeight.Add(h, height)
|
||||||
|
}
|
||||||
|
return height, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *storefs) ReadBlockHeight(h [32]byte) (uint64, error) {
|
||||||
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
file := filepath.Join(dir, fmt.Sprintf("%x.block_%s_%d", h[:], difficulty.String(), ss_version))
|
|
||||||
|
files, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename_start := fmt.Sprintf("%x.block", h[:])
|
||||||
|
for _, file := range files {
|
||||||
|
if strings.HasPrefix(file.Name(), filename_start) {
|
||||||
|
var height uint64
|
||||||
|
parts := strings.Split(file.Name(), "_")
|
||||||
|
if len(parts) != 4 {
|
||||||
|
panic("such filename cannot occur")
|
||||||
|
}
|
||||||
|
_, err := fmt.Sscan(parts[3], &height)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return height, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *storefs) WriteBlock(h [32]byte, data []byte, difficulty *big.Int, ss_version uint64, height uint64) (err error) {
|
||||||
|
dir := filepath.Join(filepath.Join(s.basedir, "bltx_store"), fmt.Sprintf("%02x", h[0]), fmt.Sprintf("%02x", h[1]), fmt.Sprintf("%02x", h[2]))
|
||||||
|
file := filepath.Join(dir, fmt.Sprintf("%x.block_%s_%d_%d", h[:], difficulty.String(), ss_version, height))
|
||||||
if err = os.MkdirAll(dir, 0700); err != nil {
|
if err = os.MkdirAll(dir, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -37,25 +37,16 @@ import "github.com/deroproject/derohe/cryptography/crypto"
|
|||||||
import "github.com/deroproject/derohe/transaction"
|
import "github.com/deroproject/derohe/transaction"
|
||||||
import "github.com/deroproject/derohe/cryptography/bn256"
|
import "github.com/deroproject/derohe/cryptography/bn256"
|
||||||
|
|
||||||
//import "github.com/deroproject/derosuite/emission"
|
|
||||||
|
|
||||||
// caches x of transactions validity
|
// caches x of transactions validity
|
||||||
// it is always atomic
|
// it is always atomic
|
||||||
// the cache is not txhash -> validity mapping
|
// the cache is txhash -> validity mapping
|
||||||
// instead it is txhash+expanded ringmembers
|
|
||||||
// if the entry exist, the tx is valid
|
// if the entry exist, the tx is valid
|
||||||
// it stores special hash and first seen time
|
// it stores special hash and first seen time
|
||||||
// this can only be used on expanded transactions
|
|
||||||
var transaction_valid_cache sync.Map
|
var transaction_valid_cache sync.Map
|
||||||
|
|
||||||
// this go routine continuously scans and cleans up the cache for expired entries
|
// this go routine continuously scans and cleans up the cache for expired entries
|
||||||
func clean_up_valid_cache() {
|
func clean_up_valid_cache() {
|
||||||
|
|
||||||
for {
|
|
||||||
time.Sleep(3600 * time.Second)
|
|
||||||
current_time := time.Now()
|
current_time := time.Now()
|
||||||
|
|
||||||
// track propagation upto 10 minutes
|
|
||||||
transaction_valid_cache.Range(func(k, value interface{}) bool {
|
transaction_valid_cache.Range(func(k, value interface{}) bool {
|
||||||
first_seen := value.(time.Time)
|
first_seen := value.(time.Time)
|
||||||
if current_time.Sub(first_seen).Round(time.Second).Seconds() > 3600 {
|
if current_time.Sub(first_seen).Round(time.Second).Seconds() > 3600 {
|
||||||
@ -63,50 +54,14 @@ func clean_up_valid_cache() {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Coinbase transactions need to verify registration
|
// Coinbase transactions need to verify registration
|
||||||
* */
|
|
||||||
func (chain *Blockchain) Verify_Transaction_Coinbase(cbl *block.Complete_Block, minertx *transaction.Transaction) (err error) {
|
func (chain *Blockchain) Verify_Transaction_Coinbase(cbl *block.Complete_Block, minertx *transaction.Transaction) (err error) {
|
||||||
|
|
||||||
if !minertx.IsCoinbase() { // transaction is not coinbase, return failed
|
if !minertx.IsCoinbase() { // transaction is not coinbase, return failed
|
||||||
return fmt.Errorf("tx is not coinbase")
|
return fmt.Errorf("tx is not coinbase")
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure miner address is registered
|
|
||||||
|
|
||||||
_, topos := chain.Store.Topo_store.binarySearchHeight(int64(cbl.Bl.Height - 1))
|
|
||||||
// load all db versions one by one and check whether the root hash matches the one mentioned in the tx
|
|
||||||
if len(topos) < 1 {
|
|
||||||
return fmt.Errorf("could not find previous height blocks %d", cbl.Bl.Height-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
var balance_tree *graviton.Tree
|
|
||||||
for i := range topos {
|
|
||||||
|
|
||||||
toporecord, err := chain.Store.Topo_store.Read(topos[i])
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not read block at height %d due to error while obtaining toporecord topos %+v processing %d err:%s\n", cbl.Bl.Height-1, topos, i, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if balance_tree, err = ss.GetTree(config.BALANCE_TREE); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := balance_tree.Get(minertx.MinerAddress[:]); err != nil {
|
|
||||||
return fmt.Errorf("balance not obtained err %s\n", err)
|
|
||||||
//return false
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil // success comes last
|
return nil // success comes last
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,7 +184,9 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase_CheckNonce_Tips(hf_versi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !chain.cache_disabled {
|
||||||
chain.cache_IsNonceValidTips.Add(tips_string, true) // set in cache
|
chain.cache_IsNonceValidTips.Add(tips_string, true) // set in cache
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -271,7 +228,9 @@ func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if tx.IsRegistrationValid() {
|
if tx.IsRegistrationValid() {
|
||||||
|
if !chain.cache_disabled {
|
||||||
transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it
|
transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Registration has invalid signature")
|
return fmt.Errorf("Registration has invalid signature")
|
||||||
@ -482,7 +441,10 @@ func (chain *Blockchain) verify_Transaction_NonCoinbase_internal(skip_proof bool
|
|||||||
|
|
||||||
// these transactions are done
|
// these transactions are done
|
||||||
if tx.TransactionType == transaction.NORMAL || tx.TransactionType == transaction.BURN_TX || tx.TransactionType == transaction.SC_TX {
|
if tx.TransactionType == transaction.NORMAL || tx.TransactionType == transaction.BURN_TX || tx.TransactionType == transaction.SC_TX {
|
||||||
|
if !chain.cache_disabled {
|
||||||
transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it
|
transaction_valid_cache.Store(tx_hash, time.Now()) // signature got verified, cache it
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,8 +130,8 @@ func handle_easymenu_post_open_command(l *readline.Instance, line string) (proce
|
|||||||
reg_tx := wallet.GetRegistrationTX()
|
reg_tx := wallet.GetRegistrationTX()
|
||||||
|
|
||||||
// at this point we must send the registration transaction
|
// at this point we must send the registration transaction
|
||||||
|
|
||||||
fmt.Fprintf(l.Stderr(), "Wallet address : "+color_green+"%s"+color_white+" is going to be registered.Pls wait till the account is registered.\n", wallet.GetAddress())
|
fmt.Fprintf(l.Stderr(), "Wallet address : "+color_green+"%s"+color_white+" is going to be registered.Pls wait till the account is registered.\n", wallet.GetAddress())
|
||||||
|
fmt.Fprintf(l.Stderr(), "Registration TXID %s\n", reg_tx.GetHash())
|
||||||
err := wallet.SendTransaction(reg_tx)
|
err := wallet.SendTransaction(reg_tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(l.Stderr(), "sending registration tx err %s\n", err)
|
fmt.Fprintf(l.Stderr(), "sending registration tx err %s\n", err)
|
||||||
|
@ -38,9 +38,6 @@ func display_easymenu_pre_open_command(l *readline.Instance) {
|
|||||||
io.WriteString(w, "\t\033[1m2\033[0m\tCreate New Wallet\n")
|
io.WriteString(w, "\t\033[1m2\033[0m\tCreate New Wallet\n")
|
||||||
io.WriteString(w, "\t\033[1m3\033[0m\tRecover Wallet using recovery seed (25 words)\n")
|
io.WriteString(w, "\t\033[1m3\033[0m\tRecover Wallet using recovery seed (25 words)\n")
|
||||||
io.WriteString(w, "\t\033[1m4\033[0m\tRecover Wallet using recovery key (64 char private spend key hex)\n")
|
io.WriteString(w, "\t\033[1m4\033[0m\tRecover Wallet using recovery key (64 char private spend key hex)\n")
|
||||||
// io.WriteString(w, "\t\033[1m5\033[0m\tCreate Watch-able Wallet (view only) using wallet view key\n")
|
|
||||||
// io.WriteString(w, "\t\033[1m6\033[0m\tRecover Non-deterministic Wallet key\n")
|
|
||||||
|
|
||||||
io.WriteString(w, "\n\t\033[1m9\033[0m\tExit menu and start prompt\n")
|
io.WriteString(w, "\n\t\033[1m9\033[0m\tExit menu and start prompt\n")
|
||||||
io.WriteString(w, "\t\033[1m0\033[0m\tExit Wallet\n")
|
io.WriteString(w, "\t\033[1m0\033[0m\tExit Wallet\n")
|
||||||
}
|
}
|
||||||
|
@ -355,10 +355,9 @@ func main() {
|
|||||||
func update_prompt(l *readline.Instance) {
|
func update_prompt(l *readline.Instance) {
|
||||||
|
|
||||||
last_wallet_height := uint64(0)
|
last_wallet_height := uint64(0)
|
||||||
last_daemon_height := uint64(0)
|
last_daemon_height := int64(0)
|
||||||
daemon_online := false
|
daemon_online := false
|
||||||
last_update_time := int64(0)
|
last_update_time := int64(0)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
time.Sleep(30 * time.Millisecond) // give user a smooth running number
|
time.Sleep(30 * time.Millisecond) // give user a smooth running number
|
||||||
|
|
||||||
@ -385,7 +384,8 @@ func update_prompt(l *readline.Instance) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if wallet == nil {
|
if wallet == nil {
|
||||||
l.SetPrompt(fmt.Sprintf("\033[1m\033[32m%s \033[0m"+color_green+"0/%d \033[32m>>>\033[0m ", address_trim, 0))
|
l.SetPrompt(fmt.Sprintf("\033[1m\033[32m%s \033[0m"+color_green+"0/%d \033[32m>>>\033[0m ", address_trim, walletapi.Get_Daemon_Height()))
|
||||||
|
l.Refresh()
|
||||||
prompt_mutex.Unlock()
|
prompt_mutex.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -395,7 +395,7 @@ func update_prompt(l *readline.Instance) {
|
|||||||
_ = daemon_online
|
_ = daemon_online
|
||||||
|
|
||||||
//fmt.Printf("chekcing if update is required\n")
|
//fmt.Printf("chekcing if update is required\n")
|
||||||
if last_wallet_height != wallet.Get_Height() || last_daemon_height != wallet.Get_Daemon_Height() ||
|
if last_wallet_height != wallet.Get_Height() || last_daemon_height != walletapi.Get_Daemon_Height() ||
|
||||||
/*daemon_online != wallet.IsDaemonOnlineCached() ||*/ (time.Now().Unix()-last_update_time) >= 1 {
|
/*daemon_online != wallet.IsDaemonOnlineCached() ||*/ (time.Now().Unix()-last_update_time) >= 1 {
|
||||||
// choose color based on urgency
|
// choose color based on urgency
|
||||||
color := "\033[32m" // default is green color
|
color := "\033[32m" // default is green color
|
||||||
@ -403,7 +403,7 @@ func update_prompt(l *readline.Instance) {
|
|||||||
color = "\033[33m" // make prompt yellow
|
color = "\033[33m" // make prompt yellow
|
||||||
}
|
}
|
||||||
|
|
||||||
dheight := wallet.Get_Daemon_Height()
|
//dheight := walletapi.Get_Daemon_Height()
|
||||||
|
|
||||||
/*if wallet.IsDaemonOnlineCached() == false {
|
/*if wallet.IsDaemonOnlineCached() == false {
|
||||||
color = "\033[33m" // make prompt yellow
|
color = "\033[33m" // make prompt yellow
|
||||||
@ -427,10 +427,10 @@ func update_prompt(l *readline.Instance) {
|
|||||||
testnet_string = "\033[31m TESTNET"
|
testnet_string = "\033[31m TESTNET"
|
||||||
}
|
}
|
||||||
|
|
||||||
l.SetPrompt(fmt.Sprintf("\033[1m\033[32m%s \033[0m"+color+"%d/%d %s %s\033[32m>>>\033[0m ", address_trim, wallet.Get_Height(), dheight, balance_string, testnet_string))
|
l.SetPrompt(fmt.Sprintf("\033[1m\033[32m%s \033[0m"+color+"%d/%d %s %s\033[32m>>>\033[0m ", address_trim, wallet.Get_Height(), walletapi.Get_Daemon_Height(), balance_string, testnet_string))
|
||||||
l.Refresh()
|
l.Refresh()
|
||||||
last_wallet_height = wallet.Get_Height()
|
last_wallet_height = wallet.Get_Height()
|
||||||
last_daemon_height = wallet.Get_Daemon_Height()
|
last_daemon_height = walletapi.Get_Daemon_Height()
|
||||||
last_update_time = time.Now().Unix()
|
last_update_time = time.Now().Unix()
|
||||||
//daemon_online = wallet.IsDaemonOnlineCached()
|
//daemon_online = wallet.IsDaemonOnlineCached()
|
||||||
_ = last_update_time
|
_ = last_update_time
|
||||||
|
@ -47,6 +47,7 @@ import "gopkg.in/natefinch/lumberjack.v2"
|
|||||||
import "github.com/deroproject/derohe/p2p"
|
import "github.com/deroproject/derohe/p2p"
|
||||||
import "github.com/deroproject/derohe/globals"
|
import "github.com/deroproject/derohe/globals"
|
||||||
import "github.com/deroproject/derohe/block"
|
import "github.com/deroproject/derohe/block"
|
||||||
|
import "github.com/deroproject/derohe/transaction"
|
||||||
import "github.com/deroproject/derohe/config"
|
import "github.com/deroproject/derohe/config"
|
||||||
import "github.com/deroproject/derohe/rpc"
|
import "github.com/deroproject/derohe/rpc"
|
||||||
import "github.com/deroproject/derohe/blockchain"
|
import "github.com/deroproject/derohe/blockchain"
|
||||||
@ -199,10 +200,12 @@ func main() {
|
|||||||
p2p.Broadcast_Block(cbl, peerid)
|
p2p.Broadcast_Block(cbl, peerid)
|
||||||
}
|
}
|
||||||
|
|
||||||
chain.P2P_MiniBlock_Relayer = func(mbl block.MiniBlock, peerid uint64) {
|
chain.P2P_MiniBlock_Relayer = func(mbl []block.MiniBlock, peerid uint64) {
|
||||||
p2p.Broadcast_MiniBlock(mbl, peerid)
|
p2p.Broadcast_MiniBlock(mbl, peerid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
globals.Cron.Start() // start cron jobs
|
||||||
|
|
||||||
// This tiny goroutine continuously updates status as required
|
// This tiny goroutine continuously updates status as required
|
||||||
go func() {
|
go func() {
|
||||||
last_our_height := int64(0)
|
last_our_height := int64(0)
|
||||||
@ -226,14 +229,6 @@ func main() {
|
|||||||
mempool_tx_count := len(chain.Mempool.Mempool_List_TX())
|
mempool_tx_count := len(chain.Mempool.Mempool_List_TX())
|
||||||
regpool_tx_count := len(chain.Regpool.Regpool_List_TX())
|
regpool_tx_count := len(chain.Regpool.Regpool_List_TX())
|
||||||
|
|
||||||
/*if our_height < 0 { // somehow the data folder got deleted/renamed/corrupted
|
|
||||||
logger.Error(nil, "Somehow the data directory is not accessible. shutting down")
|
|
||||||
l.Terminal.ExitRawMode()
|
|
||||||
l.Terminal.Print("\n\n")
|
|
||||||
os.Exit(-1)
|
|
||||||
return
|
|
||||||
}*/
|
|
||||||
|
|
||||||
// only update prompt if needed
|
// only update prompt if needed
|
||||||
if last_second != time.Now().Unix() || last_our_height != our_height || last_best_height != best_height || last_peer_count != peer_count || last_topo_height != topo_height || last_mempool_tx_count != mempool_tx_count || last_regpool_tx_count != regpool_tx_count {
|
if last_second != time.Now().Unix() || last_our_height != our_height || last_best_height != best_height || last_peer_count != peer_count || last_topo_height != topo_height || last_mempool_tx_count != mempool_tx_count || last_regpool_tx_count != regpool_tx_count {
|
||||||
// choose color based on urgency
|
// choose color based on urgency
|
||||||
@ -605,8 +600,6 @@ restart_loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
case command == "print_tx":
|
case command == "print_tx":
|
||||||
/*
|
|
||||||
|
|
||||||
if len(line_parts) == 2 && len(line_parts[1]) == 64 {
|
if len(line_parts) == 2 && len(line_parts[1]) == 64 {
|
||||||
txid, err := hex.DecodeString(strings.ToLower(line_parts[1]))
|
txid, err := hex.DecodeString(strings.ToLower(line_parts[1]))
|
||||||
|
|
||||||
@ -617,25 +610,29 @@ restart_loop:
|
|||||||
var hash crypto.Hash
|
var hash crypto.Hash
|
||||||
copy(hash[:32], []byte(txid))
|
copy(hash[:32], []byte(txid))
|
||||||
|
|
||||||
tx, err := chain.Load_TX_FROM_ID(nil, hash)
|
var tx transaction.Transaction
|
||||||
if err == nil {
|
if tx_bytes, err := chain.Store.Block_tx_store.ReadTX(hash); err != nil {
|
||||||
//s_bytes := tx.Serialize()
|
fmt.Printf("err while reading txid err %s\n", err)
|
||||||
//fmt.Printf("tx : %x\n", s_bytes)
|
continue
|
||||||
json_bytes, err := json.MarshalIndent(tx, "", " ")
|
} else if err = tx.Deserialize(tx_bytes); err != nil {
|
||||||
_ = err
|
fmt.Printf("err deserializing tx err %s\n", err)
|
||||||
fmt.Printf("%s\n", string(json_bytes))
|
continue
|
||||||
|
|
||||||
//tx.RctSignature.Message = ringct.Key(tx.GetPrefixHash())
|
|
||||||
//ringct.Get_pre_mlsag_hash(tx.RctSignature)
|
|
||||||
//chain.Expand_Transaction_v2(tx)
|
|
||||||
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Err %s\n", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if valid_blid, invalid, valid := chain.IS_TX_Valid(hash); valid {
|
||||||
|
fmt.Printf("TX is valid in block %s\n", valid_blid)
|
||||||
|
} else if len(invalid) == 0 {
|
||||||
|
fmt.Printf("TX is mined in a side chain\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("TX is mined in blocks %+v\n", invalid)
|
||||||
|
}
|
||||||
|
if tx.IsRegistration() {
|
||||||
|
fmt.Printf("Registration TX validity could not be detected\n")
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("print_tx needs a single transaction id as arugument\n")
|
fmt.Printf("print_tx needs a single transaction id as arugument\n")
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
case strings.ToLower(line) == "status":
|
case strings.ToLower(line) == "status":
|
||||||
inc, out := p2p.Peer_Direction_Count()
|
inc, out := p2p.Peer_Direction_Count()
|
||||||
@ -782,8 +779,11 @@ restart_loop:
|
|||||||
logger.Error(fmt.Errorf("POP needs argument n to pop this many blocks from the top"), "")
|
logger.Error(fmt.Errorf("POP needs argument n to pop this many blocks from the top"), "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case command == "gc":
|
||||||
|
runtime.GC()
|
||||||
|
|
||||||
case command == "ban":
|
case command == "ban":
|
||||||
/*
|
|
||||||
if len(line_parts) >= 4 || len(line_parts) == 1 {
|
if len(line_parts) >= 4 || len(line_parts) == 1 {
|
||||||
fmt.Printf("IP address required to ban\n")
|
fmt.Printf("IP address required to ban\n")
|
||||||
break
|
break
|
||||||
@ -805,9 +805,9 @@ restart_loop:
|
|||||||
fmt.Printf("err parsing address %s", err)
|
fmt.Printf("err parsing address %s", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
case command == "unban":
|
case command == "unban":
|
||||||
/*
|
|
||||||
if len(line_parts) >= 3 || len(line_parts) == 1 {
|
if len(line_parts) >= 3 || len(line_parts) == 1 {
|
||||||
fmt.Printf("IP address required to unban\n")
|
fmt.Printf("IP address required to unban\n")
|
||||||
break
|
break
|
||||||
@ -819,9 +819,9 @@ restart_loop:
|
|||||||
} else {
|
} else {
|
||||||
fmt.Printf("unbann %s successful", line_parts[1])
|
fmt.Printf("unbann %s successful", line_parts[1])
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
case command == "bans":
|
case command == "bans":
|
||||||
//p2p.BanList_Print() // print ban list
|
p2p.BanList_Print() // print ban list
|
||||||
|
|
||||||
case line == "sleep":
|
case line == "sleep":
|
||||||
logger.Info("console sleeping for 1 second")
|
logger.Info("console sleeping for 1 second")
|
||||||
@ -948,6 +948,7 @@ func usage(w io.Writer) {
|
|||||||
var completer = readline.NewPrefixCompleter(
|
var completer = readline.NewPrefixCompleter(
|
||||||
readline.PcItem("help"),
|
readline.PcItem("help"),
|
||||||
readline.PcItem("diff"),
|
readline.PcItem("diff"),
|
||||||
|
readline.PcItem("gc"),
|
||||||
readline.PcItem("mempool_flush"),
|
readline.PcItem("mempool_flush"),
|
||||||
readline.PcItem("mempool_delete_tx"),
|
readline.PcItem("mempool_delete_tx"),
|
||||||
readline.PcItem("mempool_print"),
|
readline.PcItem("mempool_print"),
|
||||||
|
@ -106,7 +106,7 @@ func GetSC(ctx context.Context, p rpc.GetSC_Params) (result rpc.GetSC_Result, er
|
|||||||
_ = k
|
_ = k
|
||||||
_ = v
|
_ = v
|
||||||
|
|
||||||
fmt.Printf("key '%x' value '%x'\n", k, v)
|
//fmt.Printf("key '%x' value '%x'\n", k, v)
|
||||||
if len(k) == 32 && len(v) == 8 { // it's SC balance
|
if len(k) == 32 && len(v) == 8 { // it's SC balance
|
||||||
result.Balances[fmt.Sprintf("%x", k)] = binary.BigEndian.Uint64(v)
|
result.Balances[fmt.Sprintf("%x", k)] = binary.BigEndian.Uint64(v)
|
||||||
} else if k[len(k)-1] >= 0x3 && k[len(k)-1] < 0x80 && nil == vark.UnmarshalBinary(k) && nil == varv.UnmarshalBinary(v) {
|
} else if k[len(k)-1] >= 0x3 && k[len(k)-1] < 0x80 && nil == vark.UnmarshalBinary(k) && nil == varv.UnmarshalBinary(v) {
|
||||||
|
@ -527,9 +527,13 @@ func mine_block_auto(chain *blockchain.Blockchain, miner_address rpc.Address) {
|
|||||||
last_block_time := time.Now()
|
last_block_time := time.Now()
|
||||||
for {
|
for {
|
||||||
|
|
||||||
|
bl, _, _, _, err := chain.Create_new_block_template_mining(miner_address)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "error while building mining block")
|
||||||
|
}
|
||||||
|
|
||||||
if time.Now().Sub(last_block_time) > time.Duration(config.BLOCK_TIME)*time.Second || // every X secs generate a block
|
if time.Now().Sub(last_block_time) > time.Duration(config.BLOCK_TIME)*time.Second || // every X secs generate a block
|
||||||
len(chain.Mempool.Mempool_List_TX_SortedInfo()) >= 1 ||
|
len(bl.Tx_hashes) >= 1 { //pools have a tx, try to mine them ASAP
|
||||||
len(chain.Regpool.Regpool_List_TX()) >= 1 { //pools have a tx, try to mine them ASAP
|
|
||||||
|
|
||||||
if err := mine_block_single(chain, miner_address); err != nil {
|
if err := mine_block_single(chain, miner_address); err != nil {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
@ -56,7 +56,7 @@ const MAINNET_BOOTSTRAP_DIFFICULTY = uint64(80000000) // atlantis mainnet botstr
|
|||||||
const MAINNET_MINIMUM_DIFFICULTY = uint64(800000000) // 80 MH/s
|
const MAINNET_MINIMUM_DIFFICULTY = uint64(800000000) // 80 MH/s
|
||||||
|
|
||||||
// testnet bootstraps at 1 MH
|
// testnet bootstraps at 1 MH
|
||||||
const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(50000) // testnet bootstrap at 50KH/s
|
const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(10000) // testnet bootstrap at 50KH/s
|
||||||
const TESTNET_MINIMUM_DIFFICULTY = uint64(10000) // 10KH/s
|
const TESTNET_MINIMUM_DIFFICULTY = uint64(10000) // 10KH/s
|
||||||
|
|
||||||
// this single parameter controls lots of various parameters
|
// this single parameter controls lots of various parameters
|
||||||
|
@ -29,5 +29,5 @@ var Mainnet_seed_nodes = []string{
|
|||||||
|
|
||||||
// some seed node for testnet
|
// some seed node for testnet
|
||||||
var Testnet_seed_nodes = []string{
|
var Testnet_seed_nodes = []string{
|
||||||
"212.8.242.60:40401",
|
"68.183.12.117:40401",
|
||||||
}
|
}
|
||||||
|
@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"
|
|||||||
|
|
||||||
// right now it has to be manually changed
|
// right now it has to be manually changed
|
||||||
// do we need to include git commitsha??
|
// do we need to include git commitsha??
|
||||||
var Version = semver.MustParse("3.4.69-1.DEROHE.STARGATE+15112021")
|
var Version = semver.MustParse("3.4.80-1.DEROHE.STARGATE+20112021")
|
||||||
|
@ -33,6 +33,7 @@ import "go.uber.org/zap"
|
|||||||
import "go.uber.org/zap/zapcore"
|
import "go.uber.org/zap/zapcore"
|
||||||
import "github.com/go-logr/logr"
|
import "github.com/go-logr/logr"
|
||||||
import "github.com/go-logr/zapr"
|
import "github.com/go-logr/zapr"
|
||||||
|
import "github.com/robfig/cron/v3"
|
||||||
|
|
||||||
import "github.com/deroproject/derohe/config"
|
import "github.com/deroproject/derohe/config"
|
||||||
import "github.com/deroproject/derohe/rpc"
|
import "github.com/deroproject/derohe/rpc"
|
||||||
@ -89,6 +90,10 @@ func GetOffsetP2P() time.Duration {
|
|||||||
return ClockOffsetP2P
|
return ClockOffsetP2P
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var Cron = cron.New(cron.WithChain(
|
||||||
|
cron.Recover(Logger), // or use cron.DefaultLogger
|
||||||
|
))
|
||||||
|
|
||||||
var Dialer proxy.Dialer = proxy.Direct // for proxy and direct connections
|
var Dialer proxy.Dialer = proxy.Direct // for proxy and direct connections
|
||||||
// all outgoing connections , including DNS requests must be made using this
|
// all outgoing connections , including DNS requests must be made using this
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@ import "net/http"
|
|||||||
import "path/filepath"
|
import "path/filepath"
|
||||||
import "github.com/go-logr/logr"
|
import "github.com/go-logr/logr"
|
||||||
import "github.com/VictoriaMetrics/metrics"
|
import "github.com/VictoriaMetrics/metrics"
|
||||||
|
import "github.com/xtaci/kcp-go/v5"
|
||||||
|
|
||||||
// these are exported by the daemon for various analysis
|
// these are exported by the daemon for various analysis
|
||||||
var Version string //this is later converted to metrics format
|
var Version string //this is later converted to metrics format
|
||||||
@ -60,6 +61,33 @@ func writePrometheusMetrics(w io.Writer) {
|
|||||||
|
|
||||||
usage := NewDiskUsage(".")
|
usage := NewDiskUsage(".")
|
||||||
fmt.Fprintf(w, "free_disk_space_bytes %d\n", usage.Available())
|
fmt.Fprintf(w, "free_disk_space_bytes %d\n", usage.Available())
|
||||||
|
|
||||||
|
// write kcp metrics, see https://github.com/xtaci/kcp-go/blob/v5.4.20/snmp.go#L9
|
||||||
|
fmt.Fprintf(w, "KCP_BytesSent %d\n", kcp.DefaultSnmp.BytesSent)
|
||||||
|
fmt.Fprintf(w, "KCP_BytesReceived %d\n", kcp.DefaultSnmp.BytesReceived)
|
||||||
|
fmt.Fprintf(w, "KCP_MaxConn %d\n", kcp.DefaultSnmp.MaxConn)
|
||||||
|
fmt.Fprintf(w, "KCP_ActiveOpens %d\n", kcp.DefaultSnmp.ActiveOpens)
|
||||||
|
fmt.Fprintf(w, "KCP_PassiveOpens %d\n", kcp.DefaultSnmp.PassiveOpens)
|
||||||
|
fmt.Fprintf(w, "KCP_CurrEstab %d\n", kcp.DefaultSnmp.CurrEstab)
|
||||||
|
fmt.Fprintf(w, "KCP_InErrs %d\n", kcp.DefaultSnmp.InErrs)
|
||||||
|
fmt.Fprintf(w, "KCP_InCsumErrors %d\n", kcp.DefaultSnmp.InCsumErrors)
|
||||||
|
fmt.Fprintf(w, "KCP_KCPInErrors %d\n", kcp.DefaultSnmp.KCPInErrors)
|
||||||
|
fmt.Fprintf(w, "KCP_InPkts %d\n", kcp.DefaultSnmp.InPkts)
|
||||||
|
fmt.Fprintf(w, "KCP_OutPkts %d\n", kcp.DefaultSnmp.OutPkts)
|
||||||
|
fmt.Fprintf(w, "KCP_InSegs %d\n", kcp.DefaultSnmp.InSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_OutSegs %d\n", kcp.DefaultSnmp.OutSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_InBytes %d\n", kcp.DefaultSnmp.InBytes)
|
||||||
|
fmt.Fprintf(w, "KCP_OutBytes %d\n", kcp.DefaultSnmp.OutBytes)
|
||||||
|
fmt.Fprintf(w, "KCP_RetransSegs %d\n", kcp.DefaultSnmp.RetransSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_FastRetransSegs %d\n", kcp.DefaultSnmp.FastRetransSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_EarlyRetransSegs %d\n", kcp.DefaultSnmp.EarlyRetransSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_LostSegs %d\n", kcp.DefaultSnmp.LostSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_RepeatSegs %d\n", kcp.DefaultSnmp.RepeatSegs)
|
||||||
|
fmt.Fprintf(w, "KCP_FECRecovered %d\n", kcp.DefaultSnmp.FECRecovered)
|
||||||
|
fmt.Fprintf(w, "KCP_FECErrs %d\n", kcp.DefaultSnmp.FECErrs)
|
||||||
|
fmt.Fprintf(w, "KCP_FECParityShards %d\n", kcp.DefaultSnmp.FECParityShards)
|
||||||
|
fmt.Fprintf(w, "KCP_FECShortShards %d\n", kcp.DefaultSnmp.FECShortShards)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Dump_metrics_data_directly(logger logr.Logger, specificnamei interface{}) {
|
func Dump_metrics_data_directly(logger logr.Logger, specificnamei interface{}) {
|
||||||
|
@ -20,6 +20,7 @@ import "fmt"
|
|||||||
|
|
||||||
//import "net"
|
//import "net"
|
||||||
import "time"
|
import "time"
|
||||||
|
import "context"
|
||||||
import "math/big"
|
import "math/big"
|
||||||
import "math/bits"
|
import "math/bits"
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
@ -40,6 +41,8 @@ import "github.com/deroproject/derohe/cryptography/crypto"
|
|||||||
// we are expecting other side to have a heavier PoW chain
|
// we are expecting other side to have a heavier PoW chain
|
||||||
// this is for the case when the chain only moves in pruned state
|
// this is for the case when the chain only moves in pruned state
|
||||||
// if after bootstraping the chain can continousky sync for few minutes, this means we have got the job done
|
// if after bootstraping the chain can continousky sync for few minutes, this means we have got the job done
|
||||||
|
// TODO if during bootstrap error occurs, then we must discard data and restart from scratch
|
||||||
|
// resume may be implemented in future
|
||||||
func (connection *Connection) bootstrap_chain() {
|
func (connection *Connection) bootstrap_chain() {
|
||||||
defer handle_connection_panic(connection)
|
defer handle_connection_panic(connection)
|
||||||
var request ChangeList
|
var request ChangeList
|
||||||
@ -55,6 +58,8 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var TimeLimit = 10 * time.Second
|
||||||
|
|
||||||
// we will request top 60 blocks
|
// we will request top 60 blocks
|
||||||
ctopo := connection.TopoHeight - 50 // last 50 blocks have to be synced, this syncing will help us detect error
|
ctopo := connection.TopoHeight - 50 // last 50 blocks have to be synced, this syncing will help us detect error
|
||||||
var topos []int64
|
var topos []int64
|
||||||
@ -69,7 +74,9 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fill_common(&request.Common) // fill common info
|
fill_common(&request.Common) // fill common info
|
||||||
if err := connection.RConn.Client.Call("Peer.ChangeSet", request, &response); err != nil {
|
|
||||||
|
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
|
||||||
|
if err := connection.Client.CallWithContext(ctx, "Peer.ChangeSet", request, &response); err != nil {
|
||||||
connection.logger.V(1).Error(err, "Call failed ChangeSet")
|
connection.logger.V(1).Error(err, "Call failed ChangeSet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -103,8 +110,9 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.BALANCE_TREE), Section: section[:], SectionLength: uint64(path_length)}
|
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.BALANCE_TREE), Section: section[:], SectionLength: uint64(path_length)}
|
||||||
var ts_response Response_Tree_Section_Struct
|
var ts_response Response_Tree_Section_Struct
|
||||||
fill_common(&ts_response.Common)
|
fill_common(&ts_response.Common)
|
||||||
if err := connection.RConn.Client.Call("Peer.TreeSection", ts_request, &ts_response); err != nil {
|
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
|
||||||
connection.logger.V(2).Error(err, "Call failed TreeSection")
|
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", ts_request, &ts_response); err != nil {
|
||||||
|
connection.logger.V(1).Error(err, "Call failed TreeSection")
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
// now we must write all the state changes to gravition
|
// now we must write all the state changes to gravition
|
||||||
@ -167,8 +175,9 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.SC_META), Section: section[:], SectionLength: uint64(path_length)}
|
ts_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: []byte(config.SC_META), Section: section[:], SectionLength: uint64(path_length)}
|
||||||
var ts_response Response_Tree_Section_Struct
|
var ts_response Response_Tree_Section_Struct
|
||||||
fill_common(&ts_response.Common)
|
fill_common(&ts_response.Common)
|
||||||
if err := connection.RConn.Client.Call("Peer.TreeSection", ts_request, &ts_response); err != nil {
|
ctx, _ = context.WithTimeout(context.Background(), TimeLimit)
|
||||||
connection.logger.V(2).Error(err, "Call failed TreeSection")
|
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", ts_request, &ts_response); err != nil {
|
||||||
|
connection.logger.V(1).Error(err, "Call failed TreeSection")
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
// now we must write all the state changes to gravition
|
// now we must write all the state changes to gravition
|
||||||
@ -197,8 +206,9 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
sc_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: ts_response.Keys[j], Section: section[:], SectionLength: uint64(0)}
|
sc_request := Request_Tree_Section_Struct{Topo: request.TopoHeights[0], TreeName: ts_response.Keys[j], Section: section[:], SectionLength: uint64(0)}
|
||||||
var sc_response Response_Tree_Section_Struct
|
var sc_response Response_Tree_Section_Struct
|
||||||
fill_common(&sc_response.Common)
|
fill_common(&sc_response.Common)
|
||||||
if err := connection.RConn.Client.Call("Peer.TreeSection", sc_request, &sc_response); err != nil {
|
ctx, _ = context.WithTimeout(context.Background(), TimeLimit)
|
||||||
connection.logger.V(2).Error(err, "Call failed TreeSection")
|
if err := connection.Client.CallWithContext(ctx, "Peer.TreeSection", sc_request, &sc_response); err != nil {
|
||||||
|
connection.logger.V(1).Error(err, "Call failed TreeSection")
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
var sc_data_tree *graviton.Tree
|
var sc_data_tree *graviton.Tree
|
||||||
@ -327,7 +337,7 @@ func (connection *Connection) bootstrap_chain() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = chain.Store.Block_tx_store.WriteBlock(bl.GetHash(), bl.Serialize(), diff, commit_version); err != nil {
|
if err = chain.Store.Block_tx_store.WriteBlock(bl.GetHash(), bl.Serialize(), diff, commit_version, bl.Height); err != nil {
|
||||||
panic(fmt.Sprintf("error while writing block"))
|
panic(fmt.Sprintf("error while writing block"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,24 +16,17 @@
|
|||||||
|
|
||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
//import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
//import "net"
|
|
||||||
import "time"
|
import "time"
|
||||||
|
import "context"
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
|
|
||||||
//import "container/list"
|
|
||||||
|
|
||||||
import "github.com/deroproject/derohe/config"
|
import "github.com/deroproject/derohe/config"
|
||||||
import "github.com/deroproject/derohe/globals"
|
import "github.com/deroproject/derohe/globals"
|
||||||
import "github.com/deroproject/derohe/block"
|
import "github.com/deroproject/derohe/block"
|
||||||
import "github.com/deroproject/derohe/errormsg"
|
import "github.com/deroproject/derohe/errormsg"
|
||||||
import "github.com/deroproject/derohe/transaction"
|
import "github.com/deroproject/derohe/transaction"
|
||||||
|
|
||||||
//import "github.com/deroproject/derohe/cryptography/crypto"
|
|
||||||
|
|
||||||
//import "github.com/deroproject/derosuite/blockchain"
|
|
||||||
|
|
||||||
// we are expecting other side to have a heavier PoW chain, try to sync now
|
// we are expecting other side to have a heavier PoW chain, try to sync now
|
||||||
func (connection *Connection) sync_chain() {
|
func (connection *Connection) sync_chain() {
|
||||||
|
|
||||||
@ -75,8 +68,11 @@ try_again:
|
|||||||
request.Block_list = append(request.Block_list, globals.Config.Genesis_Block_Hash)
|
request.Block_list = append(request.Block_list, globals.Config.Genesis_Block_Hash)
|
||||||
request.TopoHeights = append(request.TopoHeights, 0)
|
request.TopoHeights = append(request.TopoHeights, 0)
|
||||||
fill_common(&request.Common) // fill common info
|
fill_common(&request.Common) // fill common info
|
||||||
if err := connection.RConn.Client.Call("Peer.Chain", request, &response); err != nil {
|
|
||||||
connection.logger.V(2).Error(err, "Call failed Chain", err)
|
var TimeLimit = 10 * time.Second
|
||||||
|
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
|
||||||
|
if err := connection.Client.CallWithContext(ctx, "Peer.Chain", request, &response); err != nil {
|
||||||
|
connection.logger.V(2).Error(err, "Call failed Chain")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// we have a response, see if its valid and try to add to get the blocks
|
// we have a response, see if its valid and try to add to get the blocks
|
||||||
@ -111,7 +107,7 @@ try_again:
|
|||||||
connection.logger.V(2).Info("response block list", "count", len(response.Block_list))
|
connection.logger.V(2).Info("response block list", "count", len(response.Block_list))
|
||||||
for i := range response.Block_list {
|
for i := range response.Block_list {
|
||||||
our_topo_order := chain.Load_Block_Topological_order(response.Block_list[i])
|
our_topo_order := chain.Load_Block_Topological_order(response.Block_list[i])
|
||||||
if our_topo_order != (int64(i)+response.Start_topoheight) || chain.Load_Block_Topological_order(response.Block_list[i]) == -1 { // if block is not in our chain, add it to request list
|
if our_topo_order != (int64(i)+response.Start_topoheight) || our_topo_order == -1 { // if block is not in our chain, add it to request list
|
||||||
//queue_block(request.Block_list[i])
|
//queue_block(request.Block_list[i])
|
||||||
if max_blocks_to_queue >= 0 {
|
if max_blocks_to_queue >= 0 {
|
||||||
max_blocks_to_queue--
|
max_blocks_to_queue--
|
||||||
@ -121,7 +117,8 @@ try_again:
|
|||||||
|
|
||||||
orequest.Block_list = append(orequest.Block_list, response.Block_list[i])
|
orequest.Block_list = append(orequest.Block_list, response.Block_list[i])
|
||||||
fill_common(&orequest.Common)
|
fill_common(&orequest.Common)
|
||||||
if err := connection.RConn.Client.Call("Peer.GetObject", orequest, &oresponse); err != nil {
|
ctx, _ := context.WithTimeout(context.Background(), TimeLimit)
|
||||||
|
if err := connection.Client.CallWithContext(ctx, "Peer.GetObject", orequest, &oresponse); err != nil {
|
||||||
connection.logger.V(2).Error(err, "Call failed GetObject")
|
connection.logger.V(2).Error(err, "Call failed GetObject")
|
||||||
return
|
return
|
||||||
} else { // process the response
|
} else { // process the response
|
||||||
@ -130,10 +127,10 @@ try_again:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//fmt.Printf("Queuing block %x height %d %s", response.Block_list[i], response.Start_height+int64(i), connection.logid)
|
// fmt.Printf("Queuing block %x height %d %s", response.Block_list[i], response.Start_height+int64(i), connection.logid)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
connection.logger.V(3).Info("We must have queued but we skipped it at height", "blid", response.Block_list[i], "height", response.Start_height+int64(i))
|
connection.logger.V(3).Info("We must have queued but we skipped it at height", "blid", fmt.Sprintf("%x", response.Block_list[i]), "height", response.Start_height+int64(i))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,18 +36,13 @@ type Chunks_Per_Block_Data struct {
|
|||||||
|
|
||||||
// cleans up chunks every minute
|
// cleans up chunks every minute
|
||||||
func chunks_clean_up() {
|
func chunks_clean_up() {
|
||||||
for {
|
|
||||||
time.Sleep(5 * time.Second) // cleanup every 5 seconds
|
|
||||||
|
|
||||||
chunk_map.Range(func(key, value interface{}) bool {
|
chunk_map.Range(func(key, value interface{}) bool {
|
||||||
|
|
||||||
chunks_per_block := value.(*Chunks_Per_Block_Data)
|
chunks_per_block := value.(*Chunks_Per_Block_Data)
|
||||||
if time.Now().Sub(chunks_per_block.Created) > time.Second*180 {
|
if time.Now().Sub(chunks_per_block.Created) > time.Second*180 {
|
||||||
chunk_map.Delete(key)
|
chunk_map.Delete(key)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// return whether chunk exist
|
// return whether chunk exist
|
||||||
@ -64,6 +59,9 @@ func is_chunk_exist(hhash [32]byte, cid uint8) *Block_Chunk {
|
|||||||
// feed a chunk until we are able to fully decode a chunk
|
// feed a chunk until we are able to fully decode a chunk
|
||||||
func (connection *Connection) feed_chunk(chunk *Block_Chunk, sent int64) error {
|
func (connection *Connection) feed_chunk(chunk *Block_Chunk, sent int64) error {
|
||||||
|
|
||||||
|
chunk_lock.Lock()
|
||||||
|
defer chunk_lock.Unlock()
|
||||||
|
|
||||||
if chunk.HHash != chunk.HeaderHash() {
|
if chunk.HHash != chunk.HeaderHash() {
|
||||||
connection.logger.V(2).Info("This peer should be banned, since he supplied wrong chunk")
|
connection.logger.V(2).Info("This peer should be banned, since he supplied wrong chunk")
|
||||||
connection.exit()
|
connection.exit()
|
||||||
|
@ -47,8 +47,7 @@ func fill_common_skip_topoheight(common *Common_Struct) {
|
|||||||
|
|
||||||
// update some common properties quickly
|
// update some common properties quickly
|
||||||
func (connection *Connection) update(common *Common_Struct) {
|
func (connection *Connection) update(common *Common_Struct) {
|
||||||
//connection.Lock()
|
connection.update_received = time.Now()
|
||||||
//defer connection.Unlock()
|
|
||||||
var hash crypto.Hash
|
var hash crypto.Hash
|
||||||
atomic.StoreInt64(&connection.Height, common.Height) // satify race detector GOD
|
atomic.StoreInt64(&connection.Height, common.Height) // satify race detector GOD
|
||||||
if common.StableHeight != 0 {
|
if common.StableHeight != 0 {
|
||||||
|
@ -27,7 +27,7 @@ import "sync"
|
|||||||
import "sort"
|
import "sort"
|
||||||
import "time"
|
import "time"
|
||||||
import "strings"
|
import "strings"
|
||||||
import "math/rand"
|
import "context"
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
import "runtime/debug"
|
import "runtime/debug"
|
||||||
|
|
||||||
@ -35,14 +35,14 @@ import "github.com/go-logr/logr"
|
|||||||
|
|
||||||
import "github.com/dustin/go-humanize"
|
import "github.com/dustin/go-humanize"
|
||||||
|
|
||||||
import "github.com/paulbellamy/ratecounter"
|
|
||||||
|
|
||||||
import "github.com/deroproject/derohe/block"
|
import "github.com/deroproject/derohe/block"
|
||||||
import "github.com/deroproject/derohe/cryptography/crypto"
|
import "github.com/deroproject/derohe/cryptography/crypto"
|
||||||
import "github.com/deroproject/derohe/globals"
|
import "github.com/deroproject/derohe/globals"
|
||||||
import "github.com/deroproject/derohe/metrics"
|
import "github.com/deroproject/derohe/metrics"
|
||||||
import "github.com/deroproject/derohe/transaction"
|
import "github.com/deroproject/derohe/transaction"
|
||||||
|
|
||||||
|
import "github.com/cenkalti/rpc2"
|
||||||
|
|
||||||
// any connection incoming/outgoing can only be in this state
|
// any connection incoming/outgoing can only be in this state
|
||||||
//type Conn_State uint32
|
//type Conn_State uint32
|
||||||
|
|
||||||
@ -52,32 +52,30 @@ const (
|
|||||||
ACTIVE = 2 // "Active"
|
ACTIVE = 2 // "Active"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Queued_Command struct {
|
|
||||||
Command uint64 // we are waiting for this response
|
|
||||||
BLID []crypto.Hash
|
|
||||||
TXID []crypto.Hash
|
|
||||||
Topos []int64
|
|
||||||
}
|
|
||||||
|
|
||||||
const MAX_CLOCK_DATA_SET = 16
|
const MAX_CLOCK_DATA_SET = 16
|
||||||
|
|
||||||
// This structure is used to do book keeping for the connection and keeps other DATA related to peer
|
// This structure is used to do book keeping for the connection and keeps other DATA related to peer
|
||||||
// golang restricts 64 bit uint64/int atomic on a 64 bit boundary
|
// golang restricts 64 bit uint64/int atomic on a 64 bit boundary
|
||||||
// therefore all atomics are on the top
|
// therefore all atomics are on the top
|
||||||
type Connection struct {
|
type Connection struct {
|
||||||
|
Client *rpc2.Client
|
||||||
|
Conn net.Conn // actual object to talk
|
||||||
|
ConnTls net.Conn // tls layered conn
|
||||||
|
|
||||||
Height int64 // last height sent by peer ( first member alignments issues)
|
Height int64 // last height sent by peer ( first member alignments issues)
|
||||||
StableHeight int64 // last stable height
|
StableHeight int64 // last stable height
|
||||||
TopoHeight int64 // topo height, current topo height, this is the only thing we require for syncing
|
TopoHeight int64 // topo height, current topo height, this is the only thing we require for syncing
|
||||||
StateHash crypto.Hash // statehash at the top
|
StateHash crypto.Hash // statehash at the top
|
||||||
Pruned int64 // till where chain has been pruned on this node
|
Pruned int64 // till where chain has been pruned on this node
|
||||||
|
|
||||||
|
Created time.Time // when was object created
|
||||||
LastObjectRequestTime int64 // when was the last item placed in object list
|
LastObjectRequestTime int64 // when was the last item placed in object list
|
||||||
BytesIn uint64 // total bytes in
|
BytesIn uint64 // total bytes in
|
||||||
BytesOut uint64 // total bytes out
|
BytesOut uint64 // total bytes out
|
||||||
Latency int64 // time.Duration // latency to this node when sending timed sync
|
Latency int64 // time.Duration // latency to this node when sending timed sync
|
||||||
|
|
||||||
Incoming bool // is connection incoming or outgoing
|
Incoming bool // is connection incoming or outgoing
|
||||||
Addr *net.TCPAddr // endpoint on the other end
|
Addr net.Addr // endpoint on the other end
|
||||||
Port uint32 // port advertised by other end as its server,if it's 0 server cannot accept connections
|
Port uint32 // port advertised by other end as its server,if it's 0 server cannot accept connections
|
||||||
Peer_ID uint64 // Remote peer id
|
Peer_ID uint64 // Remote peer id
|
||||||
SyncNode bool // whether the peer has been added to command line as sync node
|
SyncNode bool // whether the peer has been added to command line as sync node
|
||||||
@ -85,100 +83,105 @@ type Connection struct {
|
|||||||
ProtocolVersion string
|
ProtocolVersion string
|
||||||
Tag string // tag for the other end
|
Tag string // tag for the other end
|
||||||
DaemonVersion string
|
DaemonVersion string
|
||||||
//Exit chan bool // Exit marker that connection needs to be killed
|
|
||||||
ExitCounter int32
|
|
||||||
State uint32 // state of the connection
|
State uint32 // state of the connection
|
||||||
Top_ID crypto.Hash // top block id of the connection
|
Top_ID crypto.Hash // top block id of the connection
|
||||||
|
|
||||||
logger logr.Logger // connection specific logger
|
logger logr.Logger // connection specific logger
|
||||||
logid string // formatted version of connection
|
|
||||||
Requested_Objects [][32]byte // currently unused as we sync up with a single peer at a time
|
|
||||||
Conn net.Conn // actual object to talk
|
|
||||||
RConn *RPC_Connection // object for communication
|
|
||||||
// Command_queue *list.List // New protocol is partly syncronous
|
|
||||||
Objects chan Queued_Command // contains all objects that are requested
|
|
||||||
SpeedIn *ratecounter.RateCounter // average speed in last 60 seconds
|
|
||||||
SpeedOut *ratecounter.RateCounter // average speed in last 60 secs
|
|
||||||
request_time atomic.Value //time.Time // used to track latency
|
|
||||||
writelock sync.Mutex // used to Serialize writes
|
|
||||||
|
|
||||||
previous_mbl []byte // single slot cache
|
Requested_Objects [][32]byte // currently unused as we sync up with a single peer at a time
|
||||||
|
|
||||||
peer_sent_time time.Time // contains last time when peerlist was sent
|
peer_sent_time time.Time // contains last time when peerlist was sent
|
||||||
|
update_received time.Time // last time when upated was received
|
||||||
|
ping_in_progress int32 // contains ping pending against this connection
|
||||||
|
|
||||||
|
ping_count int64
|
||||||
|
|
||||||
clock_index int
|
clock_index int
|
||||||
clock_offsets [MAX_CLOCK_DATA_SET]time.Duration
|
clock_offsets [MAX_CLOCK_DATA_SET]time.Duration
|
||||||
delays [MAX_CLOCK_DATA_SET]time.Duration
|
delays [MAX_CLOCK_DATA_SET]time.Duration
|
||||||
clock_offset int64 // duration updated on every miniblock
|
clock_offset int64 // duration updated on every miniblock
|
||||||
|
|
||||||
|
onceexit sync.Once
|
||||||
|
|
||||||
Mutex sync.Mutex // used only by connection go routine
|
Mutex sync.Mutex // used only by connection go routine
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Connection) exit() {
|
func Address(c *Connection) string {
|
||||||
c.RConn.Session.Close()
|
if c.Addr == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return ParseIPNoError(c.Addr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connection) exit() {
|
||||||
|
defer globals.Recover(0)
|
||||||
|
c.onceexit.Do(func() {
|
||||||
|
c.ConnTls.Close()
|
||||||
|
c.Conn.Close()
|
||||||
|
c.Client.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// add connection to map
|
||||||
|
func Connection_Delete(c *Connection) {
|
||||||
|
connection_map.Range(func(k, value interface{}) bool {
|
||||||
|
v := value.(*Connection)
|
||||||
|
if c.Addr.String() == v.Addr.String() {
|
||||||
|
connection_map.Delete(Address(v))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Connection_Pending_Clear() {
|
||||||
|
connection_map.Range(func(k, value interface{}) bool {
|
||||||
|
v := value.(*Connection)
|
||||||
|
if atomic.LoadUint32(&v.State) == HANDSHAKE_PENDING && time.Now().Sub(v.Created) > 10*time.Second { //and skip ourselves
|
||||||
|
v.exit()
|
||||||
|
v.logger.V(3).Info("Cleaning pending connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().Sub(v.update_received).Round(time.Second).Seconds() > 20 {
|
||||||
|
v.exit()
|
||||||
|
Connection_Delete(v)
|
||||||
|
v.logger.Info("Purging connection due since idle")
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsAddressInBanList(Address(v)) {
|
||||||
|
v.exit()
|
||||||
|
Connection_Delete(v)
|
||||||
|
v.logger.Info("Purging connection due to ban list")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var connection_map sync.Map // map[string]*Connection{}
|
var connection_map sync.Map // map[string]*Connection{}
|
||||||
var connection_per_ip_counter = map[string]int{} // only keeps the counter of counter of connections
|
|
||||||
|
|
||||||
// for incoming connections we use their peer id to assertain uniquenesss
|
|
||||||
// for outgoing connections, we use the tcp endpoint address, so as not more than 1 connection is done
|
|
||||||
func Key(c *Connection) string {
|
|
||||||
if c.Incoming {
|
|
||||||
return fmt.Sprintf("%d", c.Peer_ID)
|
|
||||||
}
|
|
||||||
return string(c.Addr.String()) // Simple []byte => string conversion
|
|
||||||
}
|
|
||||||
|
|
||||||
// check whether an IP is in the map already
|
// check whether an IP is in the map already
|
||||||
func IsAddressConnected(address string) bool {
|
func IsAddressConnected(address string) bool {
|
||||||
|
|
||||||
if _, ok := connection_map.Load(strings.TrimSpace(address)); ok {
|
if _, ok := connection_map.Load(strings.TrimSpace(address)); ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// add connection to map
|
// add connection to map, only if we are not connected already
|
||||||
// we also check for limits for incoming connections
|
// we also check for limits for incoming connections
|
||||||
// same ip max 8 ip ( considering NAT)
|
// same ip max 8 ip ( considering NAT)
|
||||||
//same Peer ID 4
|
//same Peer ID 4
|
||||||
func Connection_Add(c *Connection) {
|
func Connection_Add(c *Connection) bool {
|
||||||
//connection_mutex.Lock()
|
if dup, ok := connection_map.LoadOrStore(Address(c), c); !ok {
|
||||||
//defer connection_mutex.Unlock()
|
c.Created = time.Now()
|
||||||
|
c.logger.V(3).Info("IP address being added", "ip", c.Addr.String())
|
||||||
ip_count := 0
|
|
||||||
peer_id_count := 0
|
|
||||||
|
|
||||||
incoming_ip := c.Addr.IP.String()
|
|
||||||
incoming_peer_id := c.Peer_ID
|
|
||||||
|
|
||||||
if c.Incoming { // we need extra protection for incoming for various attacks
|
|
||||||
|
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
|
||||||
v := value.(*Connection)
|
|
||||||
if v.Incoming {
|
|
||||||
if incoming_ip == v.Addr.IP.String() {
|
|
||||||
ip_count++
|
|
||||||
}
|
|
||||||
|
|
||||||
if incoming_peer_id == v.Peer_ID {
|
|
||||||
peer_id_count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
})
|
} else {
|
||||||
|
c.logger.V(3).Info("IP address already has one connection, exiting this connection", "ip", c.Addr.String(), "pre", dup.(*Connection).Addr.String())
|
||||||
}
|
|
||||||
|
|
||||||
if ip_count >= 8 || peer_id_count >= 4 {
|
|
||||||
c.logger.V(3).Info("IP address already has too many connections, exiting this connection", "ip", incoming_ip, "count", ip_count, "peerid", incoming_peer_id)
|
|
||||||
c.exit()
|
c.exit()
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
connection_map.Store(Key(c), c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// unique connection list
|
// unique connection list
|
||||||
@ -199,21 +202,28 @@ func UniqueConnections() map[uint64]*Connection {
|
|||||||
|
|
||||||
// this function has infinite loop to keep ping every few sec
|
// this function has infinite loop to keep ping every few sec
|
||||||
func ping_loop() {
|
func ping_loop() {
|
||||||
for {
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
connection_map.Range(func(k, value interface{}) bool {
|
||||||
c := value.(*Connection)
|
c := value.(*Connection)
|
||||||
if atomic.LoadUint32(&c.State) != HANDSHAKE_PENDING && GetPeerID() != c.Peer_ID {
|
if atomic.LoadUint32(&c.State) != HANDSHAKE_PENDING && GetPeerID() != c.Peer_ID /*&& atomic.LoadInt32(&c.ping_in_progress) == 0*/ {
|
||||||
go func() {
|
go func() {
|
||||||
defer globals.Recover(3)
|
defer globals.Recover(3)
|
||||||
|
atomic.AddInt32(&c.ping_in_progress, 1)
|
||||||
|
defer atomic.AddInt32(&c.ping_in_progress, -1)
|
||||||
|
|
||||||
var request, response Dummy
|
var request, response Dummy
|
||||||
fill_common(&request.Common) // fill common info
|
fill_common(&request.Common) // fill common info
|
||||||
|
|
||||||
if c.peer_sent_time.Add(5 * time.Second).Before(time.Now()) {
|
c.ping_count++
|
||||||
c.peer_sent_time = time.Now()
|
if c.ping_count%100 == 1 {
|
||||||
request.Common.PeerList = get_peer_list()
|
request.Common.PeerList = get_peer_list()
|
||||||
}
|
}
|
||||||
if err := c.RConn.Client.Call("Peer.Ping", request, &response); err != nil {
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := c.Client.CallWithContext(ctx, "Peer.Ping", request, &response); err != nil {
|
||||||
|
c.logger.V(2).Error(err, "ping failed")
|
||||||
|
c.exit()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.update(&response.Common) // update common information
|
c.update(&response.Common) // update common information
|
||||||
@ -221,12 +231,6 @@ func ping_loop() {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add connection to map
|
|
||||||
func Connection_Delete(c *Connection) {
|
|
||||||
connection_map.Delete(Key(c))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// prints all the connection info to screen
|
// prints all the connection info to screen
|
||||||
@ -239,15 +243,21 @@ func Connection_Print() {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
logger.Info("Connection info for peers", "count", len(clist))
|
version, err := chain.ReadBlockSnapshotVersion(chain.Get_Top_ID())
|
||||||
|
if err != nil {
|
||||||
if globals.Arguments["--debug"].(bool) == true {
|
panic(err)
|
||||||
fmt.Printf("%-20s %-16s %-5s %-7s %-7s %-7s %23s %3s %5s %s %s %s %s %16s %16s\n", "Remote Addr", "PEER ID", "PORT", " State", "Latency", "Offset", "S/H/T", "DIR", "QUEUE", " IN", " OUT", " IN SPEED", " OUT SPEED", "Version", "Statehash")
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%-20s %-16s %-5s %-7s %-7s %-7s %17s %3s %5s %s %s %s %s %16s %16s\n", "Remote Addr", "PEER ID", "PORT", " State", "Latency", "Offset", "H/T", "DIR", "QUEUE", " IN", " OUT", " IN SPEED", " OUT SPEED", "Version", "Statehash")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
StateHash, err := chain.Load_Merkle_Hash(version)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("Connection info for peers", "count", len(clist), "our Statehash", StateHash)
|
||||||
|
|
||||||
|
fmt.Printf("%-30s %-16s %-5s %-7s %-7s %-7s %23s %3s %5s %s %s %16s %16s\n", "Remote Addr", "PEER ID", "PORT", " State", "Latency", "Offset", "S/H/T", "DIR", "QUEUE", " IN", " OUT", "Version", "Statehash")
|
||||||
|
|
||||||
// sort the list
|
// sort the list
|
||||||
sort.Slice(clist, func(i, j int) bool { return clist[i].Addr.String() < clist[j].Addr.String() })
|
sort.Slice(clist, func(i, j int) bool { return clist[i].Addr.String() < clist[j].Addr.String() })
|
||||||
|
|
||||||
@ -290,15 +300,10 @@ func Connection_Print() {
|
|||||||
fmt.Print(color_yellow)
|
fmt.Print(color_yellow)
|
||||||
}
|
}
|
||||||
|
|
||||||
if globals.Arguments["--debug"].(bool) == true {
|
ctime := time.Now().Sub(clist[i].Created).Round(time.Second)
|
||||||
|
|
||||||
hstring := fmt.Sprintf("%d/%d/%d", clist[i].StableHeight, clist[i].Height, clist[i].TopoHeight)
|
hstring := fmt.Sprintf("%d/%d/%d", clist[i].StableHeight, clist[i].Height, clist[i].TopoHeight)
|
||||||
fmt.Printf("%-20s %16x %5d %7s %7s %7s %23s %s %5d %7s %7s %8s %9s %16s %s %x\n", clist[i].Addr.IP, clist[i].Peer_ID, clist[i].Port, state, time.Duration(atomic.LoadInt64(&clist[i].Latency)).Round(time.Millisecond).String(), time.Duration(atomic.LoadInt64(&clist[i].clock_offset)).Round(time.Millisecond).String(), hstring, dir, clist[i].isConnectionSyncing(), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesIn)), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesOut)), humanize.Bytes(uint64(clist[i].SpeedIn.Rate()/60)), humanize.Bytes(uint64(clist[i].SpeedOut.Rate()/60)), version, tag, clist[i].StateHash[:])
|
fmt.Printf("%-30s %16x %5d %7s %7s %7s %23s %s %5d %7s %7s %16s %s %x\n", Address(clist[i])+" ("+ctime.String()+")", clist[i].Peer_ID, clist[i].Port, state, time.Duration(atomic.LoadInt64(&clist[i].Latency)).Round(time.Millisecond).String(), time.Duration(atomic.LoadInt64(&clist[i].clock_offset)).Round(time.Millisecond).String(), hstring, dir, 0, humanize.Bytes(atomic.LoadUint64(&clist[i].BytesIn)), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesOut)), version, tag, clist[i].StateHash[:])
|
||||||
|
|
||||||
} else {
|
|
||||||
hstring := fmt.Sprintf("%d/%d", clist[i].Height, clist[i].TopoHeight)
|
|
||||||
fmt.Printf("%-20s %16x %5d %7s %7s %7s %17s %s %5d %7s %7s %8s %9s %16s %s %x\n", clist[i].Addr.IP, clist[i].Peer_ID, clist[i].Port, state, time.Duration(atomic.LoadInt64(&clist[i].Latency)).Round(time.Millisecond).String(), time.Duration(atomic.LoadInt64(&clist[i].clock_offset)).Round(time.Millisecond).String(), hstring, dir, clist[i].isConnectionSyncing(), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesIn)), humanize.Bytes(atomic.LoadUint64(&clist[i].BytesOut)), humanize.Bytes(uint64(clist[i].SpeedIn.Rate()/60)), humanize.Bytes(uint64(clist[i].SpeedOut.Rate()/60)), version, tag, clist[i].StateHash[:8])
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Print(color_normal)
|
fmt.Print(color_normal)
|
||||||
}
|
}
|
||||||
@ -328,21 +333,6 @@ func Best_Peer_Height() (best_height, best_topo_height int64) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function return peer count which have successful handshake
|
|
||||||
func Disconnect_All() (Count uint64) {
|
|
||||||
return
|
|
||||||
/*
|
|
||||||
connection_mutex.Lock()
|
|
||||||
for _, v := range connection_map {
|
|
||||||
// v.Lock()
|
|
||||||
close(v.Exit) // close the connection
|
|
||||||
//v.Unlock()
|
|
||||||
}
|
|
||||||
connection_mutex.Unlock()
|
|
||||||
return
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
// this function return peer count which have successful handshake
|
// this function return peer count which have successful handshake
|
||||||
func Peer_Count() (Count uint64) {
|
func Peer_Count() (Count uint64) {
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
connection_map.Range(func(k, value interface{}) bool {
|
||||||
@ -355,29 +345,8 @@ func Peer_Count() (Count uint64) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function returnw random connection which have successful handshake
|
|
||||||
func Random_Connection(height int64) (c *Connection) {
|
|
||||||
|
|
||||||
var clist []*Connection
|
|
||||||
|
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
|
||||||
v := value.(*Connection)
|
|
||||||
if atomic.LoadInt64(&v.Height) >= height {
|
|
||||||
clist = append(clist, v)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(clist) > 0 {
|
|
||||||
return clist[rand.Int()%len(clist)]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// this returns count of peers in both directions
|
// this returns count of peers in both directions
|
||||||
func Peer_Direction_Count() (Incoming uint64, Outgoing uint64) {
|
func Peer_Direction_Count() (Incoming uint64, Outgoing uint64) {
|
||||||
|
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
connection_map.Range(func(k, value interface{}) bool {
|
||||||
v := value.(*Connection)
|
v := value.(*Connection)
|
||||||
if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && GetPeerID() != v.Peer_ID {
|
if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && GetPeerID() != v.Peer_ID {
|
||||||
@ -389,41 +358,9 @@ func Peer_Direction_Count() (Incoming uint64, Outgoing uint64) {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func broadcast_Block_tester(topo int64) (err error) {
|
|
||||||
|
|
||||||
blid, err := chain.Load_Block_Topological_order_at_index(topo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("err occurred topo %d err %s\n", topo, err)
|
|
||||||
}
|
|
||||||
var cbl block.Complete_Block
|
|
||||||
bl, err := chain.Load_BL_FROM_ID(blid)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cbl.Bl = bl
|
|
||||||
for j := range bl.Tx_hashes {
|
|
||||||
var tx_bytes []byte
|
|
||||||
if tx_bytes, err = chain.Store.Block_tx_store.ReadTX(bl.Tx_hashes[j]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var tx transaction.Transaction
|
|
||||||
if err = tx.Deserialize(tx_bytes); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cbl.Txs = append(cbl.Txs, &tx) // append all the txs
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
Broadcast_Block(&cbl, 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Broadcast_Block(cbl *block.Complete_Block, PeerID uint64) {
|
func Broadcast_Block(cbl *block.Complete_Block, PeerID uint64) {
|
||||||
Broadcast_Block_Coded(cbl, PeerID)
|
Broadcast_Block_Coded(cbl, PeerID)
|
||||||
}
|
}
|
||||||
@ -491,7 +428,7 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
|
|||||||
connection.logger.V(3).Info("Sending erasure coded chunk to peer ", "cid", cid)
|
connection.logger.V(3).Info("Sending erasure coded chunk to peer ", "cid", cid)
|
||||||
var dummy Dummy
|
var dummy Dummy
|
||||||
fill_common(&peer_specific_list.Common) // fill common info
|
fill_common(&peer_specific_list.Common) // fill common info
|
||||||
if err := connection.RConn.Client.Call("Peer.NotifyINV", peer_specific_list, &dummy); err != nil {
|
if err := connection.Client.Call("Peer.NotifyINV", peer_specific_list, &dummy); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
connection.update(&dummy.Common) // update common information
|
connection.update(&dummy.Common) // update common information
|
||||||
@ -518,15 +455,10 @@ done:
|
|||||||
// this function is triggerred from 2 points, one when we receive a unknown block which can be successfully added to chain
|
// this function is triggerred from 2 points, one when we receive a unknown block which can be successfully added to chain
|
||||||
// second from the blockchain which has to relay locally mined blocks as soon as possible
|
// second from the blockchain which has to relay locally mined blocks as soon as possible
|
||||||
func broadcast_Chunk(chunk *Block_Chunk, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped
|
func broadcast_Chunk(chunk *Block_Chunk, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped
|
||||||
|
|
||||||
defer globals.Recover(3)
|
defer globals.Recover(3)
|
||||||
|
|
||||||
/*if IsSyncing() { // if we are syncing, do NOT broadcast the block
|
|
||||||
return
|
|
||||||
}*/
|
|
||||||
|
|
||||||
our_height := chain.Get_Height()
|
our_height := chain.Get_Height()
|
||||||
// build the request once and dispatch it to all possible peers
|
|
||||||
count := 0
|
count := 0
|
||||||
unique_map := UniqueConnections()
|
unique_map := UniqueConnections()
|
||||||
|
|
||||||
@ -565,7 +497,7 @@ func broadcast_Chunk(chunk *Block_Chunk, PeerID uint64, first_seen int64) { // i
|
|||||||
connection.logger.V(3).Info("Sending erasure coded chunk INV to peer ", "raw", fmt.Sprintf("%x", chunkid), "blid", fmt.Sprintf("%x", chunk.BLID), "cid", chunk.CHUNK_ID, "hhash", fmt.Sprintf("%x", hhash), "exists", nil != is_chunk_exist(hhash, uint8(chunk.CHUNK_ID)))
|
connection.logger.V(3).Info("Sending erasure coded chunk INV to peer ", "raw", fmt.Sprintf("%x", chunkid), "blid", fmt.Sprintf("%x", chunk.BLID), "cid", chunk.CHUNK_ID, "hhash", fmt.Sprintf("%x", hhash), "exists", nil != is_chunk_exist(hhash, uint8(chunk.CHUNK_ID)))
|
||||||
var dummy Dummy
|
var dummy Dummy
|
||||||
fill_common(&peer_specific_list.Common) // fill common info
|
fill_common(&peer_specific_list.Common) // fill common info
|
||||||
if err := connection.RConn.Client.Call("Peer.NotifyINV", peer_specific_list, &dummy); err != nil {
|
if err := connection.Client.Call("Peer.NotifyINV", peer_specific_list, &dummy); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
connection.update(&dummy.Common) // update common information
|
connection.update(&dummy.Common) // update common information
|
||||||
@ -579,17 +511,17 @@ func broadcast_Chunk(chunk *Block_Chunk, PeerID uint64, first_seen int64) { // i
|
|||||||
// we can only broadcast a block which is in our db
|
// we can only broadcast a block which is in our db
|
||||||
// this function is trigger from 2 points, one when we receive a unknown block which can be successfully added to chain
|
// this function is trigger from 2 points, one when we receive a unknown block which can be successfully added to chain
|
||||||
// second from the blockchain which has to relay locally mined blocks as soon as possible
|
// second from the blockchain which has to relay locally mined blocks as soon as possible
|
||||||
func Broadcast_MiniBlock(mbl block.MiniBlock, PeerID uint64) { // if peerid is provided it is skipped
|
func Broadcast_MiniBlock(mbls []block.MiniBlock, PeerID uint64) { // if peerid is provided it is skipped
|
||||||
broadcast_MiniBlock(mbl, PeerID, globals.Time().UTC().UnixMicro())
|
broadcast_MiniBlock(mbls, PeerID, globals.Time().UTC().UnixMicro())
|
||||||
}
|
}
|
||||||
func broadcast_MiniBlock(mbl block.MiniBlock, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped
|
func broadcast_MiniBlock(mbls []block.MiniBlock, PeerID uint64, first_seen int64) { // if peerid is provided it is skipped
|
||||||
|
|
||||||
defer globals.Recover(3)
|
defer globals.Recover(3)
|
||||||
|
|
||||||
miniblock_serialized := mbl.Serialize()
|
|
||||||
|
|
||||||
var peer_specific_block Objects
|
var peer_specific_block Objects
|
||||||
peer_specific_block.MiniBlocks = append(peer_specific_block.MiniBlocks, miniblock_serialized)
|
for _, mbl := range mbls {
|
||||||
|
peer_specific_block.MiniBlocks = append(peer_specific_block.MiniBlocks, mbl.Serialize())
|
||||||
|
}
|
||||||
fill_common(&peer_specific_block.Common) // fill common info
|
fill_common(&peer_specific_block.Common) // fill common info
|
||||||
peer_specific_block.Sent = first_seen
|
peer_specific_block.Sent = first_seen
|
||||||
|
|
||||||
@ -622,7 +554,7 @@ func broadcast_MiniBlock(mbl block.MiniBlock, PeerID uint64, first_seen int64) {
|
|||||||
defer globals.Recover(3)
|
defer globals.Recover(3)
|
||||||
|
|
||||||
var dummy Dummy
|
var dummy Dummy
|
||||||
if err := connection.RConn.Client.Call("Peer.NotifyMiniBlock", peer_specific_block, &dummy); err != nil {
|
if err := connection.Client.Call("Peer.NotifyMiniBlock", peer_specific_block, &dummy); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
connection.update(&dummy.Common) // update common information
|
connection.update(&dummy.Common) // update common information
|
||||||
@ -682,7 +614,7 @@ func broadcast_Tx(tx *transaction.Transaction, PeerID uint64, sent int64) (relay
|
|||||||
|
|
||||||
var dummy Dummy
|
var dummy Dummy
|
||||||
fill_common(&dummy.Common) // fill common info
|
fill_common(&dummy.Common) // fill common info
|
||||||
if err := connection.RConn.Client.Call("Peer.NotifyINV", request, &dummy); err != nil {
|
if err := connection.Client.Call("Peer.NotifyINV", request, &dummy); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
connection.update(&dummy.Common) // update common information
|
connection.update(&dummy.Common) // update common information
|
||||||
@ -698,33 +630,6 @@ func broadcast_Tx(tx *transaction.Transaction, PeerID uint64, sent int64) (relay
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//var sync_in_progress bool
|
|
||||||
|
|
||||||
// we can tell whether we are syncing by seeing the pending queue of expected response
|
|
||||||
// if objects response are queued, we are syncing
|
|
||||||
// if even one of the connection is syncing, then we are syncronising
|
|
||||||
// returns a number how many blocks are queued
|
|
||||||
func (connection *Connection) isConnectionSyncing() (count int) {
|
|
||||||
//connection.Lock()
|
|
||||||
//defer connection.Unlock()
|
|
||||||
|
|
||||||
if atomic.LoadUint32(&connection.State) == HANDSHAKE_PENDING { // skip pre-handshake connections
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// check whether 15 secs have passed, if yes close the connection
|
|
||||||
// so we can try some other connection
|
|
||||||
if len(connection.Objects) > 0 {
|
|
||||||
if time.Now().Unix() >= (13 + atomic.LoadInt64(&connection.LastObjectRequestTime)) {
|
|
||||||
connection.exit()
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(connection.Objects)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// trigger a sync with a random peer
|
// trigger a sync with a random peer
|
||||||
func trigger_sync() {
|
func trigger_sync() {
|
||||||
defer globals.Recover(3)
|
defer globals.Recover(3)
|
||||||
@ -800,22 +705,6 @@ func trigger_sync() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//detect if something is queued to any of the peer
|
|
||||||
// is something is queue we are syncing
|
|
||||||
func IsSyncing() (result bool) {
|
|
||||||
|
|
||||||
syncing := false
|
|
||||||
connection_map.Range(func(k, value interface{}) bool {
|
|
||||||
v := value.(*Connection)
|
|
||||||
if v.isConnectionSyncing() != 0 {
|
|
||||||
syncing = true
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return syncing
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:noinline
|
//go:noinline
|
||||||
func Abs(n int64) int64 {
|
func Abs(n int64) int64 {
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
@ -826,22 +715,18 @@ func Abs(n int64) int64 {
|
|||||||
|
|
||||||
// detect whether we are behind any of the connected peers and trigger sync ASAP
|
// detect whether we are behind any of the connected peers and trigger sync ASAP
|
||||||
// randomly with one of the peers
|
// randomly with one of the peers
|
||||||
|
|
||||||
|
var single_sync int32
|
||||||
|
|
||||||
func syncroniser() {
|
func syncroniser() {
|
||||||
delay := time.NewTicker(time.Second)
|
|
||||||
for {
|
defer atomic.AddInt32(&single_sync, -1)
|
||||||
select {
|
|
||||||
case <-Exit_Event:
|
if atomic.AddInt32(&single_sync, 1) != 1 {
|
||||||
return
|
return
|
||||||
case <-delay.C:
|
|
||||||
}
|
}
|
||||||
|
|
||||||
calculate_network_time() // calculate time every sec
|
calculate_network_time() // calculate time every sec
|
||||||
|
|
||||||
if !IsSyncing() {
|
|
||||||
trigger_sync() // check whether we are out of sync
|
trigger_sync() // check whether we are out of sync
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update P2P time
|
// update P2P time
|
||||||
@ -893,58 +778,3 @@ func calculate_network_time() {
|
|||||||
|
|
||||||
globals.ClockOffsetP2P = time.Duration(total / count)
|
globals.ClockOffsetP2P = time.Duration(total / count)
|
||||||
}
|
}
|
||||||
|
|
||||||
// will return nil, if no peers available
|
|
||||||
func random_connection() *Connection {
|
|
||||||
unique_map := UniqueConnections()
|
|
||||||
|
|
||||||
var clist []*Connection
|
|
||||||
|
|
||||||
for _, value := range unique_map {
|
|
||||||
clist = append(clist, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(clist) == 0 {
|
|
||||||
return nil
|
|
||||||
} else if len(clist) == 1 {
|
|
||||||
return clist[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort the list random
|
|
||||||
// do random shuffling, can we get away with len/2 random shuffling
|
|
||||||
globals.Global_Random.Shuffle(len(clist), func(i, j int) {
|
|
||||||
clist[i], clist[j] = clist[j], clist[i]
|
|
||||||
})
|
|
||||||
|
|
||||||
return clist[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// this will request a tx
|
|
||||||
func (c *Connection) request_tx(txid [][32]byte, random bool) (err error) {
|
|
||||||
var need ObjectList
|
|
||||||
var oresponse Objects
|
|
||||||
|
|
||||||
need.Tx_list = append(need.Tx_list, txid...)
|
|
||||||
|
|
||||||
connection := c
|
|
||||||
if random {
|
|
||||||
connection = random_connection()
|
|
||||||
}
|
|
||||||
if connection == nil {
|
|
||||||
err = fmt.Errorf("No peer available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fill_common(&need.Common) // fill common info
|
|
||||||
if err = c.RConn.Client.Call("Peer.GetObject", need, &oresponse); err != nil {
|
|
||||||
c.exit()
|
|
||||||
return
|
|
||||||
} else { // process the response
|
|
||||||
if err = c.process_object_response(oresponse, 0, false); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
}
|
|
||||||
|
@ -16,17 +16,18 @@
|
|||||||
|
|
||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
//import "os"
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
import "net"
|
import "net"
|
||||||
import "net/rpc"
|
|
||||||
|
//import "net/url"
|
||||||
import "time"
|
import "time"
|
||||||
import "sort"
|
import "sort"
|
||||||
|
import "sync"
|
||||||
import "strings"
|
import "strings"
|
||||||
import "math/big"
|
import "math/big"
|
||||||
import "strconv"
|
import "strconv"
|
||||||
|
|
||||||
//import "crypto/rsa"
|
import "crypto/sha1"
|
||||||
import "crypto/ecdsa"
|
import "crypto/ecdsa"
|
||||||
import "crypto/elliptic"
|
import "crypto/elliptic"
|
||||||
|
|
||||||
@ -44,6 +45,14 @@ import "github.com/deroproject/derohe/globals"
|
|||||||
import "github.com/deroproject/derohe/metrics"
|
import "github.com/deroproject/derohe/metrics"
|
||||||
import "github.com/deroproject/derohe/blockchain"
|
import "github.com/deroproject/derohe/blockchain"
|
||||||
|
|
||||||
|
import "github.com/xtaci/kcp-go/v5"
|
||||||
|
import "golang.org/x/crypto/pbkdf2"
|
||||||
|
import "golang.org/x/time/rate"
|
||||||
|
|
||||||
|
import "github.com/cenkalti/rpc2"
|
||||||
|
|
||||||
|
//import "github.com/txthinking/socks5"
|
||||||
|
|
||||||
var chain *blockchain.Blockchain // external reference to chain
|
var chain *blockchain.Blockchain // external reference to chain
|
||||||
|
|
||||||
var P2P_Port int // this will be exported while doing handshake
|
var P2P_Port int // this will be exported while doing handshake
|
||||||
@ -58,6 +67,28 @@ var nonbanlist []string // any ips in this list will never be banned
|
|||||||
|
|
||||||
var ClockOffset time.Duration //Clock Offset related to all the peer2 connected
|
var ClockOffset time.Duration //Clock Offset related to all the peer2 connected
|
||||||
|
|
||||||
|
// also backoff is used if we have initiated a connect we will not connect to it again for another 10 secs
|
||||||
|
var backoff = map[string]int64{} // if server receives a connection, then it will not initiate connection to that ip for another 60 secs
|
||||||
|
var backoff_mutex = sync.Mutex{}
|
||||||
|
|
||||||
|
// return true if we should back off else we can connect
|
||||||
|
func shouldwebackoff(ip string) bool {
|
||||||
|
backoff_mutex.Lock()
|
||||||
|
defer backoff_mutex.Unlock()
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
for k, v := range backoff { // random backing off
|
||||||
|
if v < now {
|
||||||
|
delete(backoff, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if backoff[ip] != 0 { // now lets do the test
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize P2P subsystem
|
// Initialize P2P subsystem
|
||||||
func P2P_Init(params map[string]interface{}) error {
|
func P2P_Init(params map[string]interface{}) error {
|
||||||
logger = globals.Logger.WithName("P2P") // all components must use this logger
|
logger = globals.Logger.WithName("P2P") // all components must use this logger
|
||||||
@ -107,9 +138,11 @@ func P2P_Init(params map[string]interface{}) error {
|
|||||||
|
|
||||||
go P2P_Server_v2() // start accepting connections
|
go P2P_Server_v2() // start accepting connections
|
||||||
go P2P_engine() // start outgoing engine
|
go P2P_engine() // start outgoing engine
|
||||||
go syncroniser() // start sync engine
|
globals.Cron.AddFunc("@every 2s", syncroniser) // start sync engine
|
||||||
go chunks_clean_up() // clean up chunks
|
globals.Cron.AddFunc("@every 5s", Connection_Pending_Clear) // clean dead connections
|
||||||
go ping_loop() // ping loop
|
globals.Cron.AddFunc("@every 10s", ping_loop) // ping every one
|
||||||
|
globals.Cron.AddFunc("@every 10s", chunks_clean_up) // clean chunks
|
||||||
|
|
||||||
go time_check_routine() // check whether server time is in sync using ntp
|
go time_check_routine() // check whether server time is in sync using ntp
|
||||||
|
|
||||||
metrics.Set.NewGauge("p2p_peer_count", func() float64 { // set a new gauge
|
metrics.Set.NewGauge("p2p_peer_count", func() float64 { // set a new gauge
|
||||||
@ -215,52 +248,109 @@ func P2P_engine() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func tunekcp(conn *kcp.UDPSession) {
|
||||||
|
conn.SetACKNoDelay(true)
|
||||||
|
conn.SetNoDelay(1, 10, 2, 1) // tuning paramters for local stack
|
||||||
|
}
|
||||||
|
|
||||||
// will try to connect with given endpoint
|
// will try to connect with given endpoint
|
||||||
// will block until the connection dies or is killed
|
// will block until the connection dies or is killed
|
||||||
func connect_with_endpoint(endpoint string, sync_node bool) {
|
func connect_with_endpoint(endpoint string, sync_node bool) {
|
||||||
|
|
||||||
defer globals.Recover(2)
|
defer globals.Recover(2)
|
||||||
|
|
||||||
remote_ip, err := net.ResolveTCPAddr("tcp", endpoint)
|
remote_ip, err := net.ResolveUDPAddr("udp", endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.V(3).Error(err, "Resolve address failed:", "endpoint", endpoint)
|
logger.V(3).Error(err, "Resolve address failed:", "endpoint", endpoint)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if IsAddressInBanList(ParseIPNoError(remote_ip.IP.String())) {
|
||||||
|
logger.V(2).Info("Connecting to banned IP is prohibited", "IP", remote_ip.IP.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// check whether are already connected to this address if yes, return
|
// check whether are already connected to this address if yes, return
|
||||||
if IsAddressConnected(remote_ip.String()) {
|
if IsAddressConnected(ParseIPNoError(remote_ip.String())) {
|
||||||
|
logger.V(4).Info("outgoing address is already connected", "ip", remote_ip.String())
|
||||||
return //nil, fmt.Errorf("Already connected")
|
return //nil, fmt.Errorf("Already connected")
|
||||||
}
|
}
|
||||||
|
|
||||||
// since we may be connecting through socks, grab the remote ip for our purpose rightnow
|
if shouldwebackoff(ParseIPNoError(remote_ip.String())) {
|
||||||
conn, err := globals.Dialer.Dial("tcp", remote_ip.String())
|
logger.V(1).Info("backing off from this connection", "ip", remote_ip.String())
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
backoff_mutex.Lock()
|
||||||
|
backoff[ParseIPNoError(remote_ip.String())] = time.Now().Unix() + 10
|
||||||
|
backoff_mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
var masterkey = pbkdf2.Key(globals.Config.Network_ID.Bytes(), globals.Config.Network_ID.Bytes(), 1024, 32, sha1.New)
|
||||||
|
var blockcipher, _ = kcp.NewAESBlockCrypt(masterkey)
|
||||||
|
|
||||||
|
var conn *kcp.UDPSession
|
||||||
|
|
||||||
|
// since we may be connecting through socks, grab the remote ip for our purpose rightnow
|
||||||
|
//conn, err := globals.Dialer.Dial("tcp", remote_ip.String())
|
||||||
|
if globals.Arguments["--socks-proxy"] == nil {
|
||||||
|
conn, err = kcp.DialWithOptions(remote_ip.String(), blockcipher, 10, 3)
|
||||||
|
} else { // we must move through a socks 5 UDP ASSOCIATE supporting proxy, ssh implementation is partial
|
||||||
|
err = fmt.Errorf("socks proxying is not supported")
|
||||||
|
logger.V(0).Error(err, "Not suported", "server", globals.Arguments["--socks-proxy"])
|
||||||
|
return
|
||||||
|
/*uri, err := url.Parse("socks5://" + globals.Arguments["--socks-proxy"].(string)) // "socks5://demo:demo@192.168.99.100:1080"
|
||||||
|
if err != nil {
|
||||||
|
logger.V(0).Error(err, "Error parsing socks proxy", "server", globals.Arguments["--socks-proxy"])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = uri
|
||||||
|
sserver := uri.Host
|
||||||
|
if uri.Port() != "" {
|
||||||
|
|
||||||
|
host, _, err := net.SplitHostPort(uri.Host)
|
||||||
|
if err != nil {
|
||||||
|
logger.V(0).Error(err, "Error parsing socks proxy", "server", globals.Arguments["--socks-proxy"])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sserver = host + ":"+ uri.Port()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("sserver %s host %s port %s\n", sserver, uri.Host, uri.Port())
|
||||||
|
username := ""
|
||||||
|
password := ""
|
||||||
|
if uri.User != nil {
|
||||||
|
username = uri.User.Username()
|
||||||
|
password,_ = uri.User.Password()
|
||||||
|
}
|
||||||
|
tcpTimeout := 10
|
||||||
|
udpTimeout := 10
|
||||||
|
c, err := socks5.NewClient(sserver, username, password, tcpTimeout, udpTimeout)
|
||||||
|
if err != nil {
|
||||||
|
logger.V(0).Error(err, "Error connecting to socks proxy", "server", globals.Arguments["--socks-proxy"])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
udpconn, err := c.Dial("udp", remote_ip.String())
|
||||||
|
if err != nil {
|
||||||
|
logger.V(0).Error(err, "Error connecting to remote host using socks proxy", "socks", globals.Arguments["--socks-proxy"],"remote",remote_ip.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
conn,err = kcp.NewConn(remote_ip.String(),blockcipher,10,3,udpconn)
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
//conn, err := tls.DialWithDialer(&globals.Dialer, "tcp", remote_ip.String(),&tls.Config{InsecureSkipVerify: true})
|
|
||||||
//conn, err := tls.Dial("tcp", remote_ip.String(),&tls.Config{InsecureSkipVerify: true})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.V(3).Error(err, "Dial failed", "endpoint", endpoint)
|
logger.V(3).Error(err, "Dial failed", "endpoint", endpoint)
|
||||||
Peer_SetFail(remote_ip.String()) // update peer list as we see
|
Peer_SetFail(ParseIPNoError(remote_ip.String())) // update peer list as we see
|
||||||
|
conn.Close()
|
||||||
return //nil, fmt.Errorf("Dial failed err %s", err.Error())
|
return //nil, fmt.Errorf("Dial failed err %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
tcpc := conn.(*net.TCPConn)
|
tunekcp(conn) // set tunings for low latency
|
||||||
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
|
|
||||||
// default on linux: 30 + 8 * 30
|
|
||||||
// default on osx: 30 + 8 * 75
|
|
||||||
tcpc.SetKeepAlive(true)
|
|
||||||
tcpc.SetKeepAlivePeriod(8 * time.Second)
|
|
||||||
tcpc.SetLinger(0) // discard any pending data
|
|
||||||
|
|
||||||
//conn.SetKeepAlive(true) // set keep alive true
|
|
||||||
//conn.SetKeepAlivePeriod(10*time.Second) // keep alive every 10 secs
|
|
||||||
|
|
||||||
// upgrade connection TO TLS ( tls.Dial does NOT support proxy)
|
|
||||||
// TODO we need to choose fastest cipher here ( so both clients/servers are not loaded)
|
// TODO we need to choose fastest cipher here ( so both clients/servers are not loaded)
|
||||||
conn = tls.Client(conn, &tls.Config{InsecureSkipVerify: true})
|
conntls := tls.Client(conn, &tls.Config{InsecureSkipVerify: true})
|
||||||
|
process_outgoing_connection(conn, conntls, remote_ip, false, sync_node)
|
||||||
|
|
||||||
process_connection(conn, remote_ip, false, sync_node)
|
|
||||||
|
|
||||||
//Handle_Connection(conn, remote_ip, false, sync_node) // handle connection
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// maintains a persistant connection to endpoint
|
// maintains a persistant connection to endpoint
|
||||||
@ -322,7 +412,7 @@ func maintain_connection_to_peers() {
|
|||||||
logger.Info("Min outgoing peers", "min-peers", Min_Peers)
|
logger.Info("Min outgoing peers", "min-peers", Min_Peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
delay := time.NewTicker(time.Second)
|
delay := time.NewTicker(200 * time.Millisecond)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -339,7 +429,7 @@ func maintain_connection_to_peers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
peer := find_peer_to_connect(1)
|
peer := find_peer_to_connect(1)
|
||||||
if peer != nil {
|
if peer != nil && !IsAddressConnected(ParseIPNoError(peer.Address)) {
|
||||||
go connect_with_endpoint(peer.Address, false)
|
go connect_with_endpoint(peer.Address, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -347,6 +437,8 @@ func maintain_connection_to_peers() {
|
|||||||
|
|
||||||
func P2P_Server_v2() {
|
func P2P_Server_v2() {
|
||||||
|
|
||||||
|
var accept_limiter = rate.NewLimiter(10.0, 40) // 10 incoming per sec, burst of 40 is okay
|
||||||
|
|
||||||
default_address := "0.0.0.0:0" // be default choose a random port
|
default_address := "0.0.0.0:0" // be default choose a random port
|
||||||
if _, ok := globals.Arguments["--p2p-bind"]; ok && globals.Arguments["--p2p-bind"] != nil {
|
if _, ok := globals.Arguments["--p2p-bind"]; ok && globals.Arguments["--p2p-bind"] != nil {
|
||||||
addr, err := net.ResolveTCPAddr("tcp", globals.Arguments["--p2p-bind"].(string))
|
addr, err := net.ResolveTCPAddr("tcp", globals.Arguments["--p2p-bind"].(string))
|
||||||
@ -363,115 +455,193 @@ func P2P_Server_v2() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srv := rpc2.NewServer()
|
||||||
|
srv.OnConnect(func(c *rpc2.Client) {
|
||||||
|
remote_addr_interface, _ := c.State.Get("addr")
|
||||||
|
remote_addr := remote_addr_interface.(net.Addr)
|
||||||
|
|
||||||
|
conn_interface, _ := c.State.Get("conn")
|
||||||
|
conn := conn_interface.(net.Conn)
|
||||||
|
|
||||||
|
tlsconn_interface, _ := c.State.Get("tlsconn")
|
||||||
|
tlsconn := tlsconn_interface.(net.Conn)
|
||||||
|
|
||||||
|
connection := &Connection{Client: c, Conn: conn, ConnTls: tlsconn, Addr: remote_addr, State: HANDSHAKE_PENDING, Incoming: true}
|
||||||
|
connection.logger = logger.WithName("incoming").WithName(remote_addr.String())
|
||||||
|
|
||||||
|
c.State.Set("c", connection) // set pointer to connection
|
||||||
|
|
||||||
|
//connection.logger.Info("connected OnConnect")
|
||||||
|
go func() {
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
connection.dispatch_test_handshake()
|
||||||
|
}()
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
set_handlers(srv)
|
||||||
|
|
||||||
tlsconfig := &tls.Config{Certificates: []tls.Certificate{generate_random_tls_cert()}}
|
tlsconfig := &tls.Config{Certificates: []tls.Certificate{generate_random_tls_cert()}}
|
||||||
//l, err := tls.Listen("tcp", default_address, tlsconfig) // listen as TLS server
|
//l, err := tls.Listen("tcp", default_address, tlsconfig) // listen as TLS server
|
||||||
|
|
||||||
|
_ = tlsconfig
|
||||||
|
|
||||||
|
var masterkey = pbkdf2.Key(globals.Config.Network_ID.Bytes(), globals.Config.Network_ID.Bytes(), 1024, 32, sha1.New)
|
||||||
|
var blockcipher, _ = kcp.NewAESBlockCrypt(masterkey)
|
||||||
|
|
||||||
// listen to incoming tcp connections tls style
|
// listen to incoming tcp connections tls style
|
||||||
l, err := net.Listen("tcp", default_address) // listen as simple TCP server
|
l, err := kcp.ListenWithOptions(default_address, blockcipher, 10, 3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Could not listen", "address", default_address)
|
logger.Error(err, "Could not listen", "address", default_address)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
P2P_Port = int(l.Addr().(*net.TCPAddr).Port)
|
|
||||||
|
_, P2P_Port_str, _ := net.SplitHostPort(l.Addr().String())
|
||||||
|
P2P_Port, _ = strconv.Atoi(P2P_Port_str)
|
||||||
|
|
||||||
logger.Info("P2P is listening", "address", l.Addr().String())
|
logger.Info("P2P is listening", "address", l.Addr().String())
|
||||||
|
|
||||||
// p2p is shutting down, close the listening socket
|
|
||||||
go func() { <-Exit_Event; l.Close() }()
|
|
||||||
|
|
||||||
// A common pattern is to start a loop to continously accept connections
|
// A common pattern is to start a loop to continously accept connections
|
||||||
for {
|
for {
|
||||||
conn, err := l.Accept() //accept connections using Listener.Accept()
|
conn, err := l.AcceptKCP() //accept connections using Listener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
select {
|
select {
|
||||||
case <-Exit_Event:
|
case <-Exit_Event:
|
||||||
|
l.Close() // p2p is shutting down, close the listening socket
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
logger.Error(err, "Err while accepting incoming connection")
|
logger.V(1).Error(err, "Err while accepting incoming connection")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
raddr := conn.RemoteAddr().(*net.TCPAddr)
|
|
||||||
|
|
||||||
//if incoming IP is banned, disconnect now
|
if !accept_limiter.Allow() { // if rate limiter allows, then only add else drop the connection
|
||||||
if IsAddressInBanList(raddr.IP.String()) {
|
|
||||||
logger.Info("Incoming IP is banned, disconnecting now", "IP", raddr.IP.String())
|
|
||||||
conn.Close()
|
conn.Close()
|
||||||
} else {
|
continue
|
||||||
|
|
||||||
tcpc := conn.(*net.TCPConn)
|
|
||||||
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
|
|
||||||
// default on linux: 30 + 8 * 30
|
|
||||||
// default on osx: 30 + 8 * 75
|
|
||||||
tcpc.SetKeepAlive(true)
|
|
||||||
tcpc.SetKeepAlivePeriod(8 * time.Second)
|
|
||||||
tcpc.SetLinger(0) // discard any pending data
|
|
||||||
|
|
||||||
tlsconn := tls.Server(conn, tlsconfig)
|
|
||||||
go process_connection(tlsconn, raddr, true, false) // handle connection in a different go routine
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raddr := conn.RemoteAddr().(*net.UDPAddr)
|
||||||
|
|
||||||
|
backoff_mutex.Lock()
|
||||||
|
backoff[ParseIPNoError(raddr.String())] = time.Now().Unix() + globals.Global_Random.Int63n(200) // random backing of upto 200 secs
|
||||||
|
backoff_mutex.Unlock()
|
||||||
|
|
||||||
|
logger.V(3).Info("accepting incoming connection", "raddr", raddr.String())
|
||||||
|
|
||||||
|
if IsAddressConnected(ParseIPNoError(raddr.String())) {
|
||||||
|
logger.V(4).Info("incoming address is already connected", "ip", raddr.String())
|
||||||
|
conn.Close()
|
||||||
|
|
||||||
|
} else if IsAddressInBanList(ParseIPNoError(raddr.IP.String())) { //if incoming IP is banned, disconnect now
|
||||||
|
logger.V(2).Info("Incoming IP is banned, disconnecting now", "IP", raddr.IP.String())
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
tunekcp(conn) // tuning paramters for local stack
|
||||||
|
tlsconn := tls.Server(conn, tlsconfig)
|
||||||
|
state := rpc2.NewState()
|
||||||
|
state.Set("addr", raddr)
|
||||||
|
state.Set("conn", conn)
|
||||||
|
state.Set("tlsconn", tlsconn)
|
||||||
|
|
||||||
|
go srv.ServeCodecWithState(NewCBORCodec(tlsconn), state)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handle_connection_panic(c *Connection) {
|
func handle_connection_panic(c *Connection) {
|
||||||
|
defer globals.Recover(2)
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
logger.V(2).Error(nil, "Recovered while handling connection", "r", r, "stack", debug.Stack())
|
logger.V(2).Error(nil, "Recovered while handling connection", "r", r, "stack", debug.Stack())
|
||||||
c.exit()
|
c.exit()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func process_connection(conn net.Conn, remote_addr *net.TCPAddr, incoming, sync_node bool) {
|
func set_handler(base interface{}, methodname string, handler interface{}) {
|
||||||
defer globals.Recover(2)
|
switch o := base.(type) {
|
||||||
|
case *rpc2.Client:
|
||||||
|
o.Handle(methodname, handler)
|
||||||
|
//fmt.Printf("setting client handler %s\n", methodname)
|
||||||
|
case *rpc2.Server:
|
||||||
|
o.Handle(methodname, handler)
|
||||||
|
//fmt.Printf("setting server handler %s\n", methodname)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("object cannot handle such handler %T", base))
|
||||||
|
|
||||||
var rconn *RPC_Connection
|
|
||||||
var err error
|
|
||||||
if incoming {
|
|
||||||
rconn, err = wait_stream_creation_server_side(conn) // do server side processing
|
|
||||||
} else {
|
|
||||||
rconn, err = stream_creation_client_side(conn) // do client side processing
|
|
||||||
}
|
}
|
||||||
if err == nil {
|
}
|
||||||
|
|
||||||
var RPCSERVER = rpc.NewServer()
|
func getc(client *rpc2.Client) *Connection {
|
||||||
c := &Connection{RConn: rconn, Addr: remote_addr, State: HANDSHAKE_PENDING, Incoming: incoming, SyncNode: sync_node}
|
if ci, found := client.State.Get("c"); found {
|
||||||
RPCSERVER.RegisterName("Peer", c) // register the handlers
|
return ci.(*Connection)
|
||||||
|
|
||||||
if incoming {
|
|
||||||
c.logger = logger.WithName("incoming").WithName(remote_addr.String())
|
|
||||||
} else {
|
} else {
|
||||||
|
panic("no connection attached")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// we need the following RPCS to work
|
||||||
|
func set_handlers(o interface{}) {
|
||||||
|
set_handler(o, "Peer.Handshake", func(client *rpc2.Client, args Handshake_Struct, reply *Handshake_Struct) error {
|
||||||
|
return getc(client).Handshake(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.Chain", func(client *rpc2.Client, args Chain_Request_Struct, reply *Chain_Response_Struct) error {
|
||||||
|
return getc(client).Chain(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.ChangeSet", func(client *rpc2.Client, args ChangeList, reply *Changes) error {
|
||||||
|
return getc(client).ChangeSet(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.NotifyINV", func(client *rpc2.Client, args ObjectList, reply *Dummy) error {
|
||||||
|
return getc(client).NotifyINV(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.GetObject", func(client *rpc2.Client, args ObjectList, reply *Objects) error {
|
||||||
|
return getc(client).GetObject(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.TreeSection", func(client *rpc2.Client, args Request_Tree_Section_Struct, reply *Response_Tree_Section_Struct) error {
|
||||||
|
return getc(client).TreeSection(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.NotifyMiniBlock", func(client *rpc2.Client, args Objects, reply *Dummy) error {
|
||||||
|
return getc(client).NotifyMiniBlock(args, reply)
|
||||||
|
})
|
||||||
|
set_handler(o, "Peer.Ping", func(client *rpc2.Client, args Dummy, reply *Dummy) error {
|
||||||
|
return getc(client).Ping(args, reply)
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func process_outgoing_connection(conn net.Conn, tlsconn net.Conn, remote_addr net.Addr, incoming, sync_node bool) {
|
||||||
|
defer globals.Recover(0)
|
||||||
|
|
||||||
|
client := rpc2.NewClientWithCodec(NewCBORCodec(tlsconn))
|
||||||
|
|
||||||
|
c := &Connection{Client: client, Conn: conn, ConnTls: tlsconn, Addr: remote_addr, State: HANDSHAKE_PENDING, Incoming: incoming, SyncNode: sync_node}
|
||||||
|
defer c.exit()
|
||||||
c.logger = logger.WithName("outgoing").WithName(remote_addr.String())
|
c.logger = logger.WithName("outgoing").WithName(remote_addr.String())
|
||||||
}
|
set_handlers(client)
|
||||||
|
|
||||||
|
client.State = rpc2.NewState()
|
||||||
|
client.State.Set("c", c)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
time.Sleep(2 * time.Second)
|
||||||
if r := recover(); r != nil {
|
|
||||||
logger.V(1).Error(nil, "Recovered while handling connection", "r", r, "stack", debug.Stack())
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
//RPCSERVER.ServeConn(rconn.ServerConn) // start single threaded rpc server with GOB encoding
|
|
||||||
RPCSERVER.ServeCodec(NewCBORServerCodec(rconn.ServerConn)) // use CBOR encoding on rpc
|
|
||||||
}()
|
|
||||||
|
|
||||||
c.dispatch_test_handshake()
|
c.dispatch_test_handshake()
|
||||||
|
}()
|
||||||
|
|
||||||
<-rconn.Session.CloseChan()
|
// c.logger.V(4).Info("client running loop")
|
||||||
Connection_Delete(c)
|
client.Run() // see the original
|
||||||
//fmt.Printf("closing connection status err: %s\n",err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
|
|
||||||
|
c.logger.V(4).Info("process_connection finished")
|
||||||
}
|
}
|
||||||
|
|
||||||
// shutdown the p2p component
|
// shutdown the p2p component
|
||||||
func P2P_Shutdown() {
|
func P2P_Shutdown() {
|
||||||
close(Exit_Event) // send signal to all connections to exit
|
//close(Exit_Event) // send signal to all connections to exit
|
||||||
save_peer_list() // save peer list
|
save_peer_list() // save peer list
|
||||||
save_ban_list() // save ban list
|
save_ban_list() // save ban list
|
||||||
|
|
||||||
// TODO we must wait for connections to kill themselves
|
// TODO we must wait for connections to kill themselves
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
logger.Info("P2P Shutdown")
|
logger.Info("P2P Shutdown")
|
||||||
atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
|
atomic.AddUint32(&globals.Subsystem_Active, ^uint32(0)) // this decrement 1 fom subsystem
|
||||||
|
|
||||||
@ -538,28 +708,21 @@ func generate_random_tls_cert() tls.Certificate {
|
|||||||
return tlsCert
|
return tlsCert
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
func ParseIP(s string) (string, error) {
|
||||||
// register all the handlers
|
ip, _, err := net.SplitHostPort(s)
|
||||||
func register_handlers(){
|
if err == nil {
|
||||||
arpc.DefaultHandler.Handle("/handshake",Handshake_Handler)
|
return ip, nil
|
||||||
arpc.DefaultHandler.Handle("/active",func (ctx *arpc.Context) { // set the connection active
|
}
|
||||||
if c,ok := ctx.Client.Get("connection");ok {
|
|
||||||
connection := c.(*Connection)
|
|
||||||
atomic.StoreUint32(&connection.State, ACTIVE)
|
|
||||||
}} )
|
|
||||||
|
|
||||||
arpc.DefaultHandler.HandleConnected(OnConnected_Handler) // all incoming connections will first processed here
|
ip2 := net.ParseIP(s)
|
||||||
arpc.DefaultHandler.HandleDisconnected(OnDisconnected_Handler) // all disconnected
|
if ip2 == nil {
|
||||||
|
return "", fmt.Errorf("invalid IP")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip2.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ParseIPNoError(s string) string {
|
||||||
|
ip, _ := ParseIP(s)
|
||||||
// triggers when new clients connect and
|
return ip
|
||||||
func OnConnected_Handler(c *arpc.Client){
|
|
||||||
dispatch_test_handshake(c, c.Conn.RemoteAddr().(*net.TCPAddr) ,true,false) // client connected we must handshake
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func OnDisconnected_Handler(c *arpc.Client){
|
|
||||||
c.Stop()
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
@ -121,14 +121,17 @@ func clean_up() {
|
|||||||
peer_mutex.Lock()
|
peer_mutex.Lock()
|
||||||
defer peer_mutex.Unlock()
|
defer peer_mutex.Unlock()
|
||||||
for k, v := range peer_map {
|
for k, v := range peer_map {
|
||||||
if v.FailCount >= 8 { // roughly 16 tries, 18 hrs before we discard the peer
|
if IsAddressConnected(ParseIPNoError(v.Address)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v.FailCount >= 8 { // roughly 8 tries before we discard the peer
|
||||||
delete(peer_map, k)
|
delete(peer_map, k)
|
||||||
}
|
}
|
||||||
if v.LastConnected == 0 { // if never connected, purge the peer
|
if v.LastConnected == 0 { // if never connected, purge the peer
|
||||||
delete(peer_map, k)
|
delete(peer_map, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint64(time.Now().UTC().Unix()) > (v.LastConnected + 42000) { // purge all peers which were not connected in
|
if uint64(time.Now().UTC().Unix()) > (v.LastConnected + 3600) { // purge all peers which were not connected in
|
||||||
delete(peer_map, k)
|
delete(peer_map, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -139,7 +142,7 @@ func IsPeerInList(address string) bool {
|
|||||||
peer_mutex.Lock()
|
peer_mutex.Lock()
|
||||||
defer peer_mutex.Unlock()
|
defer peer_mutex.Unlock()
|
||||||
|
|
||||||
if _, ok := peer_map[address]; ok {
|
if _, ok := peer_map[ParseIPNoError(address)]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -148,7 +151,7 @@ func GetPeerInList(address string) *Peer {
|
|||||||
peer_mutex.Lock()
|
peer_mutex.Lock()
|
||||||
defer peer_mutex.Unlock()
|
defer peer_mutex.Unlock()
|
||||||
|
|
||||||
if v, ok := peer_map[address]; ok {
|
if v, ok := peer_map[ParseIPNoError(address)]; ok {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -166,20 +169,20 @@ func Peer_Add(p *Peer) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := peer_map[p.Address]; ok {
|
if v, ok := peer_map[ParseIPNoError(p.Address)]; ok {
|
||||||
v.Lock()
|
v.Lock()
|
||||||
// logger.Infof("Peer already in list adding good count")
|
// logger.Infof("Peer already in list adding good count")
|
||||||
v.GoodCount++
|
v.GoodCount++
|
||||||
v.Unlock()
|
v.Unlock()
|
||||||
} else {
|
} else {
|
||||||
// logger.Infof("Peer adding to list")
|
// logger.Infof("Peer adding to list")
|
||||||
peer_map[p.Address] = p
|
peer_map[ParseIPNoError(p.Address)] = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// a peer marked as fail, will only be connected based on exponential back-off based on powers of 2
|
// a peer marked as fail, will only be connected based on exponential back-off based on powers of 2
|
||||||
func Peer_SetFail(address string) {
|
func Peer_SetFail(address string) {
|
||||||
p := GetPeerInList(address)
|
p := GetPeerInList(ParseIPNoError(address))
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -193,7 +196,7 @@ func Peer_SetFail(address string) {
|
|||||||
// we will only distribute peers which have been successfully connected by us
|
// we will only distribute peers which have been successfully connected by us
|
||||||
func Peer_SetSuccess(address string) {
|
func Peer_SetSuccess(address string) {
|
||||||
//logger.Infof("Setting peer as success")
|
//logger.Infof("Setting peer as success")
|
||||||
p := GetPeerInList(address)
|
p := GetPeerInList(ParseIPNoError(address))
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -233,7 +236,7 @@ func Peer_EnableBan(address string) (err error){
|
|||||||
func Peer_Delete(p *Peer) {
|
func Peer_Delete(p *Peer) {
|
||||||
peer_mutex.Lock()
|
peer_mutex.Lock()
|
||||||
defer peer_mutex.Unlock()
|
defer peer_mutex.Unlock()
|
||||||
delete(peer_map, p.Address)
|
delete(peer_map, ParseIPNoError(p.Address))
|
||||||
}
|
}
|
||||||
|
|
||||||
// prints all the connection info to screen
|
// prints all the connection info to screen
|
||||||
@ -258,7 +261,7 @@ func PeerList_Print() {
|
|||||||
|
|
||||||
for i := range list {
|
for i := range list {
|
||||||
connected := ""
|
connected := ""
|
||||||
if IsAddressConnected(list[i].Address) {
|
if IsAddressConnected(ParseIPNoError(list[i].Address)) {
|
||||||
connected = "ACTIVE"
|
connected = "ACTIVE"
|
||||||
}
|
}
|
||||||
fmt.Printf("%-22s %-6s %4d %5d \n", list[i].Address, connected, list[i].GoodCount, list[i].FailCount)
|
fmt.Printf("%-22s %-6s %4d %5d \n", list[i].Address, connected, list[i].GoodCount, list[i].FailCount)
|
||||||
@ -269,7 +272,7 @@ func PeerList_Print() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function return peer count which have successful handshake
|
// this function return peer count which are in our list
|
||||||
func Peer_Counts() (Count uint64) {
|
func Peer_Counts() (Count uint64) {
|
||||||
peer_mutex.Lock()
|
peer_mutex.Lock()
|
||||||
defer peer_mutex.Unlock()
|
defer peer_mutex.Unlock()
|
||||||
@ -289,7 +292,7 @@ func find_peer_to_connect(version int) *Peer {
|
|||||||
for _, v := range peer_map {
|
for _, v := range peer_map {
|
||||||
if uint64(time.Now().Unix()) > v.BlacklistBefore && // if ip is blacklisted skip it
|
if uint64(time.Now().Unix()) > v.BlacklistBefore && // if ip is blacklisted skip it
|
||||||
uint64(time.Now().Unix()) > v.ConnectAfter &&
|
uint64(time.Now().Unix()) > v.ConnectAfter &&
|
||||||
!IsAddressConnected(v.Address) && v.Whitelist && !IsAddressInBanList(v.Address) {
|
!IsAddressConnected(ParseIPNoError(v.Address)) && v.Whitelist && !IsAddressInBanList(ParseIPNoError(v.Address)) {
|
||||||
v.ConnectAfter = uint64(time.Now().UTC().Unix()) + 10 // minimum 10 secs gap
|
v.ConnectAfter = uint64(time.Now().UTC().Unix()) + 10 // minimum 10 secs gap
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
@ -298,7 +301,7 @@ func find_peer_to_connect(version int) *Peer {
|
|||||||
for _, v := range peer_map {
|
for _, v := range peer_map {
|
||||||
if uint64(time.Now().Unix()) > v.BlacklistBefore && // if ip is blacklisted skip it
|
if uint64(time.Now().Unix()) > v.BlacklistBefore && // if ip is blacklisted skip it
|
||||||
uint64(time.Now().Unix()) > v.ConnectAfter &&
|
uint64(time.Now().Unix()) > v.ConnectAfter &&
|
||||||
!IsAddressConnected(v.Address) && !v.Whitelist && !IsAddressInBanList(v.Address) {
|
!IsAddressConnected(ParseIPNoError(v.Address)) && !v.Whitelist && !IsAddressInBanList(ParseIPNoError(v.Address)) {
|
||||||
v.ConnectAfter = uint64(time.Now().UTC().Unix()) + 10 // minimum 10 secs gap
|
v.ConnectAfter = uint64(time.Now().UTC().Unix()) + 10 // minimum 10 secs gap
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
@ -4,30 +4,30 @@ package p2p
|
|||||||
import "fmt"
|
import "fmt"
|
||||||
import "io"
|
import "io"
|
||||||
import "net"
|
import "net"
|
||||||
import "net/rpc"
|
import "sync"
|
||||||
import "bufio"
|
import "time"
|
||||||
|
import "github.com/cenkalti/rpc2"
|
||||||
import "encoding/binary"
|
import "encoding/binary"
|
||||||
import "github.com/fxamacker/cbor/v2"
|
import "github.com/fxamacker/cbor/v2"
|
||||||
|
|
||||||
import "github.com/deroproject/derohe/config" // only used get constants such as max data per frame
|
import "github.com/deroproject/derohe/config" // only used get constants such as max data per frame
|
||||||
|
|
||||||
// used to represent net/rpc structs
|
// it processes both
|
||||||
type Request struct {
|
type RequestResponse struct {
|
||||||
ServiceMethod string `cbor:"M"` // format: "Service.Method"
|
Method string `cbor:"M"` // format: "Service.Method"
|
||||||
Seq uint64 `cbor:"S"` // sequence number chosen by client
|
|
||||||
}
|
|
||||||
|
|
||||||
type Response struct {
|
|
||||||
ServiceMethod string `cbor:"M"` // echoes that of the Request
|
|
||||||
Seq uint64 `cbor:"S"` // echoes that of the request
|
Seq uint64 `cbor:"S"` // echoes that of the request
|
||||||
Error string `cbor:"E"` // error, if any.
|
Error string `cbor:"E"` // error, if any.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const READ_TIMEOUT = 20 * time.Second
|
||||||
|
const WRITE_TIMEOUT = 20 * time.Second
|
||||||
|
|
||||||
// reads our data, length prefix blocks
|
// reads our data, length prefix blocks
|
||||||
func Read_Data_Frame(r io.Reader, obj interface{}) error {
|
func Read_Data_Frame(r net.Conn, obj interface{}) error {
|
||||||
var frame_length_buf [4]byte
|
var frame_length_buf [4]byte
|
||||||
|
|
||||||
//connection.set_timeout()
|
//connection.set_timeout()
|
||||||
|
r.SetReadDeadline(time.Now().Add(READ_TIMEOUT))
|
||||||
nbyte, err := io.ReadFull(r, frame_length_buf[:])
|
nbyte, err := io.ReadFull(r, frame_length_buf[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -58,7 +58,7 @@ func Read_Data_Frame(r io.Reader, obj interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// reads our data, length prefix blocks
|
// reads our data, length prefix blocks
|
||||||
func Write_Data_Frame(w io.Writer, obj interface{}) error {
|
func Write_Data_Frame(w net.Conn, obj interface{}) error {
|
||||||
var frame_length_buf [4]byte
|
var frame_length_buf [4]byte
|
||||||
data_bytes, err := cbor.Marshal(obj)
|
data_bytes, err := cbor.Marshal(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -66,6 +66,7 @@ func Write_Data_Frame(w io.Writer, obj interface{}) error {
|
|||||||
}
|
}
|
||||||
binary.LittleEndian.PutUint32(frame_length_buf[:], uint32(len(data_bytes)))
|
binary.LittleEndian.PutUint32(frame_length_buf[:], uint32(len(data_bytes)))
|
||||||
|
|
||||||
|
w.SetWriteDeadline(time.Now().Add(WRITE_TIMEOUT))
|
||||||
if _, err = w.Write(frame_length_buf[:]); err != nil {
|
if _, err = w.Write(frame_length_buf[:]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -76,54 +77,68 @@ func Write_Data_Frame(w io.Writer, obj interface{}) error {
|
|||||||
|
|
||||||
// ClientCodec implements the rpc.ClientCodec interface for generic golang objects.
|
// ClientCodec implements the rpc.ClientCodec interface for generic golang objects.
|
||||||
type ClientCodec struct {
|
type ClientCodec struct {
|
||||||
r *bufio.Reader
|
r net.Conn
|
||||||
w io.WriteCloser
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerCodec implements the rpc.ServerCodec interface for generic protobufs.
|
|
||||||
type ServerCodec ClientCodec
|
|
||||||
|
|
||||||
// NewClientCodec returns a ClientCodec for communicating with the ServerCodec
|
// NewClientCodec returns a ClientCodec for communicating with the ServerCodec
|
||||||
// on the other end of the conn.
|
// on the other end of the conn.
|
||||||
func NewCBORClientCodec(conn net.Conn) *ClientCodec {
|
// to support deadlines we use net.conn
|
||||||
return &ClientCodec{bufio.NewReader(conn), conn}
|
func NewCBORCodec(conn net.Conn) *ClientCodec {
|
||||||
}
|
return &ClientCodec{r: conn}
|
||||||
|
|
||||||
// NewServerCodec returns a ServerCodec that communicates with the ClientCodec
|
|
||||||
// on the other end of the given conn.
|
|
||||||
func NewCBORServerCodec(conn net.Conn) *ServerCodec {
|
|
||||||
return &ServerCodec{bufio.NewReader(conn), conn}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRequest writes the 4 byte length from the connection and encodes that many
|
|
||||||
// subsequent bytes into the given object.
|
|
||||||
func (c *ClientCodec) WriteRequest(req *rpc.Request, obj interface{}) error {
|
|
||||||
// Write the header
|
|
||||||
header := Request{ServiceMethod: req.ServiceMethod, Seq: req.Seq}
|
|
||||||
if err := Write_Data_Frame(c.w, header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return Write_Data_Frame(c.w, obj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadResponseHeader reads a 4 byte length from the connection and decodes that many
|
// ReadResponseHeader reads a 4 byte length from the connection and decodes that many
|
||||||
// subsequent bytes into the given object, decodes it, and stores the fields
|
// subsequent bytes into the given object, decodes it, and stores the fields
|
||||||
// in the given request.
|
// in the given request.
|
||||||
func (c *ClientCodec) ReadResponseHeader(resp *rpc.Response) error {
|
func (c *ClientCodec) ReadResponseHeader(resp *rpc2.Response) error {
|
||||||
var header Response
|
var header RequestResponse
|
||||||
if err := Read_Data_Frame(c.r, &header); err != nil {
|
if err := Read_Data_Frame(c.r, &header); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if header.ServiceMethod == "" {
|
//if header.Method == "" {
|
||||||
return fmt.Errorf("header missing method: %s", "no ServiceMethod")
|
// return fmt.Errorf("header missing method: %s", "no Method")
|
||||||
}
|
//}
|
||||||
resp.ServiceMethod = header.ServiceMethod
|
//resp.Method = header.Method
|
||||||
resp.Seq = header.Seq
|
resp.Seq = header.Seq
|
||||||
resp.Error = header.Error
|
resp.Error = header.Error
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close closes the underlying connection.
|
||||||
|
func (c *ClientCodec) Close() error {
|
||||||
|
return c.r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRequestHeader reads the header (which is prefixed by a 4 byte lil endian length
|
||||||
|
// indicating its size) from the connection, decodes it, and stores the fields
|
||||||
|
// in the given request.
|
||||||
|
func (s *ClientCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error {
|
||||||
|
var header RequestResponse
|
||||||
|
if err := Read_Data_Frame(s.r, &header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Method != "" {
|
||||||
|
req.Seq = header.Seq
|
||||||
|
req.Method = header.Method
|
||||||
|
} else {
|
||||||
|
resp.Seq = header.Seq
|
||||||
|
resp.Error = header.Error
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRequestBody reads a 4 byte length from the connection and decodes that many
|
||||||
|
// subsequent bytes into the object
|
||||||
|
func (s *ClientCodec) ReadRequestBody(obj interface{}) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return Read_Data_Frame(s.r, obj)
|
||||||
|
}
|
||||||
|
|
||||||
// ReadResponseBody reads a 4 byte length from the connection and decodes that many
|
// ReadResponseBody reads a 4 byte length from the connection and decodes that many
|
||||||
// subsequent bytes into the given object (which should be a pointer to a
|
// subsequent bytes into the given object (which should be a pointer to a
|
||||||
// struct).
|
// struct).
|
||||||
@ -134,54 +149,32 @@ func (c *ClientCodec) ReadResponseBody(obj interface{}) error {
|
|||||||
return Read_Data_Frame(c.r, obj)
|
return Read_Data_Frame(c.r, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the underlying connection.
|
// WriteRequest writes the 4 byte length from the connection and encodes that many
|
||||||
func (c *ClientCodec) Close() error {
|
// subsequent bytes into the given object.
|
||||||
return c.w.Close()
|
func (c *ClientCodec) WriteRequest(req *rpc2.Request, obj interface{}) error {
|
||||||
}
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
// Close closes the underlying connection.
|
header := RequestResponse{Method: req.Method, Seq: req.Seq}
|
||||||
func (c *ServerCodec) Close() error {
|
if err := Write_Data_Frame(c.r, header); err != nil {
|
||||||
return c.w.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadRequestHeader reads the header (which is prefixed by a 4 byte lil endian length
|
|
||||||
// indicating its size) from the connection, decodes it, and stores the fields
|
|
||||||
// in the given request.
|
|
||||||
func (s *ServerCodec) ReadRequestHeader(req *rpc.Request) error {
|
|
||||||
var header Request
|
|
||||||
if err := Read_Data_Frame(s.r, &header); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if header.ServiceMethod == "" {
|
return Write_Data_Frame(c.r, obj)
|
||||||
return fmt.Errorf("header missing method: %s", "empty ServiceMethod")
|
|
||||||
}
|
|
||||||
req.ServiceMethod = header.ServiceMethod
|
|
||||||
req.Seq = header.Seq
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadRequestBody reads a 4 byte length from the connection and decodes that many
|
|
||||||
// subsequent bytes into the object
|
|
||||||
func (s *ServerCodec) ReadRequestBody(obj interface{}) error {
|
|
||||||
if obj == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return Read_Data_Frame(s.r, obj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteResponse writes the appropriate header. If
|
// WriteResponse writes the appropriate header. If
|
||||||
// the response was invalid, the size of the body of the resp is reported as
|
// the response was invalid, the size of the body of the resp is reported as
|
||||||
// having size zero and is not sent.
|
// having size zero and is not sent.
|
||||||
func (s *ServerCodec) WriteResponse(resp *rpc.Response, obj interface{}) error {
|
func (c *ClientCodec) WriteResponse(resp *rpc2.Response, obj interface{}) error {
|
||||||
// Write the header
|
c.Lock()
|
||||||
header := Response{ServiceMethod: resp.ServiceMethod, Seq: resp.Seq, Error: resp.Error}
|
defer c.Unlock()
|
||||||
|
header := RequestResponse{Seq: resp.Seq, Error: resp.Error}
|
||||||
if err := Write_Data_Frame(s.w, header); err != nil {
|
if err := Write_Data_Frame(c.r, header); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.Error == "" { // only write response object if error is nil
|
if resp.Error == "" { // only write response object if error is nil
|
||||||
return Write_Data_Frame(s.w, obj)
|
return Write_Data_Frame(c.r, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -17,13 +17,13 @@
|
|||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
import "net"
|
||||||
import "bytes"
|
import "bytes"
|
||||||
|
import "context"
|
||||||
|
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
import "github.com/paulbellamy/ratecounter"
|
|
||||||
|
|
||||||
import "github.com/deroproject/derohe/config"
|
import "github.com/deroproject/derohe/config"
|
||||||
import "github.com/deroproject/derohe/globals"
|
import "github.com/deroproject/derohe/globals"
|
||||||
|
|
||||||
@ -56,7 +56,9 @@ func (connection *Connection) dispatch_test_handshake() {
|
|||||||
var request, response Handshake_Struct
|
var request, response Handshake_Struct
|
||||||
request.Fill()
|
request.Fill()
|
||||||
|
|
||||||
if err := connection.RConn.Client.Call("Peer.Handshake", request, &response); err != nil {
|
ctx, _ := context.WithTimeout(context.Background(), 4*time.Second)
|
||||||
|
if err := connection.Client.CallWithContext(ctx, "Peer.Handshake", request, &response); err != nil {
|
||||||
|
connection.logger.V(2).Error(err, "cannot handshake")
|
||||||
connection.exit()
|
connection.exit()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -66,15 +68,10 @@ func (connection *Connection) dispatch_test_handshake() {
|
|||||||
connection.exit()
|
connection.exit()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
connection.request_time.Store(time.Now())
|
|
||||||
connection.SpeedIn = ratecounter.NewRateCounter(60 * time.Second)
|
|
||||||
connection.SpeedOut = ratecounter.NewRateCounter(60 * time.Second)
|
|
||||||
|
|
||||||
connection.update(&response.Common) // update common information
|
connection.update(&response.Common) // update common information
|
||||||
|
if !Connection_Add(connection) { // add connection to pool
|
||||||
if !connection.Incoming { // setup success
|
connection.exit()
|
||||||
Peer_SetSuccess(connection.Addr.String())
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(response.ProtocolVersion) < 128 {
|
if len(response.ProtocolVersion) < 128 {
|
||||||
@ -98,24 +95,15 @@ func (connection *Connection) dispatch_test_handshake() {
|
|||||||
if connection.Port != 0 && connection.Port <= 65535 { // peer is saying it has an open port, handshake is success so add peer
|
if connection.Port != 0 && connection.Port <= 65535 { // peer is saying it has an open port, handshake is success so add peer
|
||||||
|
|
||||||
var p Peer
|
var p Peer
|
||||||
if connection.Addr.IP.To4() != nil { // if ipv4
|
if net.ParseIP(Address(connection)).To4() != nil { // if ipv4
|
||||||
p.Address = fmt.Sprintf("%s:%d", connection.Addr.IP.String(), connection.Port)
|
p.Address = fmt.Sprintf("%s:%d", Address(connection), connection.Port)
|
||||||
} else { // if ipv6
|
} else { // if ipv6
|
||||||
p.Address = fmt.Sprintf("[%s]:%d", connection.Addr.IP.String(), connection.Port)
|
p.Address = fmt.Sprintf("[%s]:%d", Address(connection), connection.Port)
|
||||||
}
|
}
|
||||||
p.ID = connection.Peer_ID
|
p.ID = connection.Peer_ID
|
||||||
|
|
||||||
p.LastConnected = uint64(time.Now().UTC().Unix())
|
p.LastConnected = uint64(time.Now().UTC().Unix())
|
||||||
|
|
||||||
// TODO we should add any flags here if necessary, but they are not
|
|
||||||
// required, since a peer can only be used if connected and if connected
|
|
||||||
// we already have a truly synced view
|
|
||||||
for _, k := range response.Flags {
|
|
||||||
switch k {
|
|
||||||
//case FLAG_MINER:p.Miner = true
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Peer_Add(&p)
|
Peer_Add(&p)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,25 +115,7 @@ func (connection *Connection) dispatch_test_handshake() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Connection_Add(connection) // add connection to pool
|
atomic.StoreUint32(&connection.State, ACTIVE)
|
||||||
|
|
||||||
// mark active
|
|
||||||
var r Dummy
|
|
||||||
fill_common(&r.Common) // fill common info
|
|
||||||
if err := connection.RConn.Client.Call("Peer.Active", r, &r); err != nil {
|
|
||||||
connection.exit()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// mark connection active
|
|
||||||
func (c *Connection) Active(req Dummy, dummy *Dummy) error {
|
|
||||||
defer handle_connection_panic(c)
|
|
||||||
c.update(&req.Common) // update common information
|
|
||||||
atomic.StoreUint32(&c.State, ACTIVE)
|
|
||||||
fill_common(&dummy.Common) // fill common info
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// used to ping pong
|
// used to ping pong
|
||||||
|
@ -91,7 +91,7 @@ func (c *Connection) NotifyINV(request ObjectList, response *Dummy) (err error)
|
|||||||
if dirty { // request inventory only if we want it
|
if dirty { // request inventory only if we want it
|
||||||
var oresponse Objects
|
var oresponse Objects
|
||||||
fill_common(&need.Common) // fill common info
|
fill_common(&need.Common) // fill common info
|
||||||
if err = c.RConn.Client.Call("Peer.GetObject", need, &oresponse); err != nil {
|
if err = c.Client.Call("Peer.GetObject", need, &oresponse); err != nil {
|
||||||
c.logger.V(2).Error(err, "Call failed GetObject", "need_objects", need)
|
c.logger.V(2).Error(err, "Call failed GetObject", "need_objects", need)
|
||||||
c.exit()
|
c.exit()
|
||||||
return
|
return
|
||||||
@ -113,8 +113,8 @@ func (c *Connection) NotifyINV(request ObjectList, response *Dummy) (err error)
|
|||||||
func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err error) {
|
func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err error) {
|
||||||
defer handle_connection_panic(c)
|
defer handle_connection_panic(c)
|
||||||
|
|
||||||
if len(request.MiniBlocks) != 1 {
|
if len(request.MiniBlocks) >= 5 {
|
||||||
err = fmt.Errorf("Notify Block can notify single block")
|
err = fmt.Errorf("Notify Block can notify max 5 miniblocks")
|
||||||
c.logger.V(3).Error(err, "Should be banned")
|
c.logger.V(3).Error(err, "Should be banned")
|
||||||
c.exit()
|
c.exit()
|
||||||
return err
|
return err
|
||||||
@ -122,20 +122,20 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
|
|||||||
fill_common_T1(&request.Common)
|
fill_common_T1(&request.Common)
|
||||||
c.update(&request.Common) // update common information
|
c.update(&request.Common) // update common information
|
||||||
|
|
||||||
var mbl_arrays [][]byte
|
var mbls []block.MiniBlock
|
||||||
if len(c.previous_mbl) > 0 {
|
|
||||||
mbl_arrays = append(mbl_arrays, c.previous_mbl)
|
|
||||||
}
|
|
||||||
mbl_arrays = append(mbl_arrays, request.MiniBlocks...)
|
|
||||||
|
|
||||||
for i := range mbl_arrays {
|
for i := range request.MiniBlocks {
|
||||||
var mbl block.MiniBlock
|
var mbl block.MiniBlock
|
||||||
var ok bool
|
if err = mbl.Deserialize(request.MiniBlocks[i]); err != nil {
|
||||||
|
|
||||||
if err = mbl.Deserialize(mbl_arrays[i]); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
mbls = append(mbls, mbl)
|
||||||
|
}
|
||||||
|
|
||||||
|
var valid_found bool
|
||||||
|
|
||||||
|
for _, mbl := range mbls {
|
||||||
|
var ok bool
|
||||||
if mbl.Timestamp > uint64(globals.Time().UTC().UnixMilli())+50 { // 50 ms passing allowed
|
if mbl.Timestamp > uint64(globals.Time().UTC().UnixMilli())+50 { // 50 ms passing allowed
|
||||||
return errormsg.ErrInvalidTimestamp
|
return errormsg.ErrInvalidTimestamp
|
||||||
}
|
}
|
||||||
@ -153,8 +153,7 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
|
|||||||
|
|
||||||
// first check whether the incoming minblock can be added to sub chains
|
// first check whether the incoming minblock can be added to sub chains
|
||||||
if !chain.MiniBlocks.IsConnected(mbl) {
|
if !chain.MiniBlocks.IsConnected(mbl) {
|
||||||
c.previous_mbl = mbl.Serialize()
|
c.logger.V(3).Error(err, "Disconnected miniblock", "mbl", mbl.String())
|
||||||
c.logger.V(3).Error(err, "Disconnected miniblock","mbl",mbl.String())
|
|
||||||
//return fmt.Errorf("Disconnected miniblock")
|
//return fmt.Errorf("Disconnected miniblock")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -213,9 +212,13 @@ func (c *Connection) NotifyMiniBlock(request Objects, response *Dummy) (err erro
|
|||||||
if err, ok = chain.InsertMiniBlock(mbl); !ok {
|
if err, ok = chain.InsertMiniBlock(mbl); !ok {
|
||||||
return err
|
return err
|
||||||
} else { // rebroadcast miniblock
|
} else { // rebroadcast miniblock
|
||||||
defer broadcast_MiniBlock(mbl, c.Peer_ID, request.Sent) // do not send back to the original peer
|
valid_found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if valid_found {
|
||||||
|
Peer_SetSuccess(c.Addr.String())
|
||||||
|
broadcast_MiniBlock(mbls, c.Peer_ID, request.Sent) // do not send back to the original peer
|
||||||
|
}
|
||||||
fill_common(&response.Common) // fill common info
|
fill_common(&response.Common) // fill common info
|
||||||
fill_common_T0T1T2(&request.Common, &response.Common) // fill time related information
|
fill_common_T0T1T2(&request.Common, &response.Common) // fill time related information
|
||||||
return nil
|
return nil
|
||||||
|
@ -55,7 +55,7 @@ func time_check_routine() {
|
|||||||
server := timeservers[random.Int()%len(timeservers)]
|
server := timeservers[random.Int()%len(timeservers)]
|
||||||
|
|
||||||
if response, err := ntp.Query(server); err != nil {
|
if response, err := ntp.Query(server); err != nil {
|
||||||
logger.V(2).Error(err, "error while querying time", "server", server)
|
//logger.V(2).Error(err, "error while querying time", "server", server)
|
||||||
} else if response.Validate() == nil {
|
} else if response.Validate() == nil {
|
||||||
|
|
||||||
if response.ClockOffset.Seconds() > -.05 && response.ClockOffset.Seconds() < .05 {
|
if response.ClockOffset.Seconds() > -.05 && response.ClockOffset.Seconds() < .05 {
|
||||||
@ -86,6 +86,7 @@ func time_check_routine() {
|
|||||||
if response.ClockOffset.Seconds() > -1.0 && response.ClockOffset.Seconds() < 1.0 { // chrony can maintain upto 5 ms, ntps can maintain upto 10
|
if response.ClockOffset.Seconds() > -1.0 && response.ClockOffset.Seconds() < 1.0 { // chrony can maintain upto 5 ms, ntps can maintain upto 10
|
||||||
timeinsync = true
|
timeinsync = true
|
||||||
} else {
|
} else {
|
||||||
|
timeinsync = false
|
||||||
logger.V(1).Error(nil, "Your system time deviation is more than 1 secs (%s)."+
|
logger.V(1).Error(nil, "Your system time deviation is more than 1 secs (%s)."+
|
||||||
"\nYou may experience chain sync issues and/or other side-effects."+
|
"\nYou may experience chain sync issues and/or other side-effects."+
|
||||||
"\nIf you are mining, your blocks may get rejected."+
|
"\nIf you are mining, your blocks may get rejected."+
|
||||||
|
22
vendor/github.com/cenkalti/hub/.gitignore
generated
vendored
Normal file
22
vendor/github.com/cenkalti/hub/.gitignore
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
5
vendor/github.com/cenkalti/hub/.travis.yml
generated
vendored
Normal file
5
vendor/github.com/cenkalti/hub/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
language: go
|
||||||
|
go: 1.13
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
20
vendor/github.com/cenkalti/hub/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/hub/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Cenk Altı
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
5
vendor/github.com/cenkalti/hub/README.md
generated
vendored
Normal file
5
vendor/github.com/cenkalti/hub/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
hub
|
||||||
|
===
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub)
|
||||||
|
[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub)
|
32
vendor/github.com/cenkalti/hub/example_test.go
generated
vendored
Normal file
32
vendor/github.com/cenkalti/hub/example_test.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cenk/hub"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Different event kinds
|
||||||
|
const (
|
||||||
|
happenedA hub.Kind = iota
|
||||||
|
happenedB
|
||||||
|
happenedC
|
||||||
|
)
|
||||||
|
|
||||||
|
// Our custom event type
|
||||||
|
type EventA struct {
|
||||||
|
arg1, arg2 int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement hub.Event interface
|
||||||
|
func (e EventA) Kind() hub.Kind { return happenedA }
|
||||||
|
|
||||||
|
func Example() {
|
||||||
|
hub.Subscribe(happenedA, func(e hub.Event) {
|
||||||
|
a := e.(EventA) // Cast to concrete type
|
||||||
|
fmt.Println(a.arg1 + a.arg2)
|
||||||
|
})
|
||||||
|
|
||||||
|
hub.Publish(EventA{2, 3})
|
||||||
|
// Output: 5
|
||||||
|
}
|
82
vendor/github.com/cenkalti/hub/hub.go
generated
vendored
Normal file
82
vendor/github.com/cenkalti/hub/hub.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Package hub provides a simple event dispatcher for publish/subscribe pattern.
|
||||||
|
package hub
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type Kind int
|
||||||
|
|
||||||
|
// Event is an interface for published events.
|
||||||
|
type Event interface {
|
||||||
|
Kind() Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hub is an event dispatcher, publishes events to the subscribers
|
||||||
|
// which are subscribed for a specific event type.
|
||||||
|
// Optimized for publish calls.
|
||||||
|
// The handlers may be called in order different than they are registered.
|
||||||
|
type Hub struct {
|
||||||
|
subscribers map[Kind][]handler
|
||||||
|
m sync.RWMutex
|
||||||
|
seq uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
f func(Event)
|
||||||
|
id uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe registers f for the event of a specific kind.
|
||||||
|
func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) {
|
||||||
|
var cancelled bool
|
||||||
|
h.m.Lock()
|
||||||
|
h.seq++
|
||||||
|
id := h.seq
|
||||||
|
if h.subscribers == nil {
|
||||||
|
h.subscribers = make(map[Kind][]handler)
|
||||||
|
}
|
||||||
|
h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f})
|
||||||
|
h.m.Unlock()
|
||||||
|
return func() {
|
||||||
|
h.m.Lock()
|
||||||
|
if cancelled {
|
||||||
|
h.m.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cancelled = true
|
||||||
|
a := h.subscribers[kind]
|
||||||
|
for i, f := range a {
|
||||||
|
if f.id == id {
|
||||||
|
a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(a) == 0 {
|
||||||
|
delete(h.subscribers, kind)
|
||||||
|
}
|
||||||
|
h.m.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish an event to the subscribers.
|
||||||
|
func (h *Hub) Publish(e Event) {
|
||||||
|
h.m.RLock()
|
||||||
|
if handlers, ok := h.subscribers[e.Kind()]; ok {
|
||||||
|
for _, h := range handlers {
|
||||||
|
h.f(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.m.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultHub is the default Hub used by Publish and Subscribe.
|
||||||
|
var DefaultHub Hub
|
||||||
|
|
||||||
|
// Subscribe registers f for the event of a specific kind in the DefaultHub.
|
||||||
|
func Subscribe(kind Kind, f func(Event)) (cancel func()) {
|
||||||
|
return DefaultHub.Subscribe(kind, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish an event to the subscribers in DefaultHub.
|
||||||
|
func Publish(e Event) {
|
||||||
|
DefaultHub.Publish(e)
|
||||||
|
}
|
40
vendor/github.com/cenkalti/hub/hub_test.go
generated
vendored
Normal file
40
vendor/github.com/cenkalti/hub/hub_test.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
const testKind Kind = 1
|
||||||
|
const testValue = "foo"
|
||||||
|
|
||||||
|
type testEvent string
|
||||||
|
|
||||||
|
func (e testEvent) Kind() Kind {
|
||||||
|
return testKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPubSub(t *testing.T) {
|
||||||
|
var h Hub
|
||||||
|
var s string
|
||||||
|
|
||||||
|
h.Subscribe(testKind, func(e Event) { s = string(e.(testEvent)) })
|
||||||
|
h.Publish(testEvent(testValue))
|
||||||
|
|
||||||
|
if s != testValue {
|
||||||
|
t.Errorf("invalid value: %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCancel(t *testing.T) {
|
||||||
|
var h Hub
|
||||||
|
var called int
|
||||||
|
var f = func(e Event) { called += 1 }
|
||||||
|
|
||||||
|
_ = h.Subscribe(testKind, f)
|
||||||
|
cancel := h.Subscribe(testKind, f)
|
||||||
|
h.Publish(testEvent(testValue)) // 2 calls to f
|
||||||
|
cancel()
|
||||||
|
h.Publish(testEvent(testValue)) // 1 call to f
|
||||||
|
|
||||||
|
if called != 3 {
|
||||||
|
t.Errorf("unexpected call count: %d", called)
|
||||||
|
}
|
||||||
|
}
|
23
vendor/github.com/cenkalti/rpc2/.gitignore
generated
vendored
Normal file
23
vendor/github.com/cenkalti/rpc2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
9
vendor/github.com/cenkalti/rpc2/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/cenkalti/rpc2/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.15
|
||||||
|
- tip
|
||||||
|
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
21
vendor/github.com/cenkalti/rpc2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/cenkalti/rpc2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Cenk Altı
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
82
vendor/github.com/cenkalti/rpc2/README.md
generated
vendored
Normal file
82
vendor/github.com/cenkalti/rpc2/README.md
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
rpc2
|
||||||
|
====
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2)
|
||||||
|
[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2)
|
||||||
|
|
||||||
|
rpc2 is a fork of net/rpc package in the standard library.
|
||||||
|
The main goal is to add bi-directional support to calls.
|
||||||
|
That means server can call the methods of client.
|
||||||
|
This is not possible with net/rpc package.
|
||||||
|
In order to do this it adds a `*Client` argument to method signatures.
|
||||||
|
|
||||||
|
Install
|
||||||
|
--------
|
||||||
|
|
||||||
|
go get github.com/cenkalti/rpc2
|
||||||
|
|
||||||
|
Example server
|
||||||
|
---------------
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/cenkalti/rpc2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Args struct{ A, B int }
|
||||||
|
type Reply int
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
srv := rpc2.NewServer()
|
||||||
|
srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error {
|
||||||
|
|
||||||
|
// Reversed call (server to client)
|
||||||
|
var rep Reply
|
||||||
|
client.Call("mult", Args{2, 3}, &rep)
|
||||||
|
fmt.Println("mult result:", rep)
|
||||||
|
|
||||||
|
*reply = Reply(args.A + args.B)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
lis, _ := net.Listen("tcp", "127.0.0.1:5000")
|
||||||
|
srv.Accept(lis)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example Client
|
||||||
|
---------------
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/cenkalti/rpc2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Args struct{ A, B int }
|
||||||
|
type Reply int
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
conn, _ := net.Dial("tcp", "127.0.0.1:5000")
|
||||||
|
|
||||||
|
clt := rpc2.NewClient(conn)
|
||||||
|
clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error {
|
||||||
|
*reply = Reply(args.A * args.B)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
go clt.Run()
|
||||||
|
|
||||||
|
var rep Reply
|
||||||
|
clt.Call("add", Args{1, 2}, &rep)
|
||||||
|
fmt.Println("add result:", rep)
|
||||||
|
}
|
||||||
|
```
|
364
vendor/github.com/cenkalti/rpc2/client.go
generated
vendored
Normal file
364
vendor/github.com/cenkalti/rpc2/client.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
|||||||
|
// Package rpc2 provides bi-directional RPC client and server similar to net/rpc.
|
||||||
|
package rpc2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client represents an RPC Client.
|
||||||
|
// There may be multiple outstanding Calls associated
|
||||||
|
// with a single Client, and a Client may be used by
|
||||||
|
// multiple goroutines simultaneously.
|
||||||
|
type Client struct {
|
||||||
|
mutex sync.Mutex // protects pending, seq, request
|
||||||
|
sending sync.Mutex
|
||||||
|
request Request // temp area used in send()
|
||||||
|
seq uint64
|
||||||
|
pending map[uint64]*Call
|
||||||
|
closing bool
|
||||||
|
shutdown bool
|
||||||
|
server bool
|
||||||
|
codec Codec
|
||||||
|
handlers map[string]*handler
|
||||||
|
disconnect chan struct{}
|
||||||
|
State *State // additional information to associate with client
|
||||||
|
blocking bool // whether to block request handling
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new Client to handle requests to the
|
||||||
|
// set of services at the other end of the connection.
|
||||||
|
// It adds a buffer to the write side of the connection so
|
||||||
|
// the header and payload are sent as a unit.
|
||||||
|
func NewClient(conn io.ReadWriteCloser) *Client {
|
||||||
|
return NewClientWithCodec(NewGobCodec(conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClientWithCodec is like NewClient but uses the specified
|
||||||
|
// codec to encode requests and decode responses.
|
||||||
|
func NewClientWithCodec(codec Codec) *Client {
|
||||||
|
return &Client{
|
||||||
|
codec: codec,
|
||||||
|
pending: make(map[uint64]*Call),
|
||||||
|
handlers: make(map[string]*handler),
|
||||||
|
disconnect: make(chan struct{}),
|
||||||
|
seq: 1, // 0 means notification.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBlocking puts the client in blocking mode.
|
||||||
|
// In blocking mode, received requests are processes synchronously.
|
||||||
|
// If you have methods that may take a long time, other subsequent requests may time out.
|
||||||
|
func (c *Client) SetBlocking(blocking bool) {
|
||||||
|
c.blocking = blocking
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the client's read loop.
|
||||||
|
// You must run this method before calling any methods on the server.
|
||||||
|
func (c *Client) Run() {
|
||||||
|
c.readLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisconnectNotify returns a channel that is closed
|
||||||
|
// when the client connection has gone away.
|
||||||
|
func (c *Client) DisconnectNotify() chan struct{} {
|
||||||
|
return c.disconnect
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
|
||||||
|
func (c *Client) Handle(method string, handlerFunc interface{}) {
|
||||||
|
addHandler(c.handlers, method, handlerFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readLoop reads messages from codec.
|
||||||
|
// It reads a reqeust or a response to the previous request.
|
||||||
|
// If the message is request, calls the handler function.
|
||||||
|
// If the message is response, sends the reply to the associated call.
|
||||||
|
func (c *Client) readLoop() {
|
||||||
|
var err error
|
||||||
|
var req Request
|
||||||
|
var resp Response
|
||||||
|
for err == nil {
|
||||||
|
req = Request{}
|
||||||
|
resp = Response{}
|
||||||
|
if err = c.codec.ReadHeader(&req, &resp); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Method != "" {
|
||||||
|
// request comes to server
|
||||||
|
if err = c.readRequest(&req); err != nil {
|
||||||
|
debugln("rpc2: error reading request:", err.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// response comes to client
|
||||||
|
if err = c.readResponse(&resp); err != nil {
|
||||||
|
debugln("rpc2: error reading response:", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Terminate pending calls.
|
||||||
|
c.sending.Lock()
|
||||||
|
c.mutex.Lock()
|
||||||
|
c.shutdown = true
|
||||||
|
closing := c.closing
|
||||||
|
if err == io.EOF {
|
||||||
|
if closing {
|
||||||
|
err = ErrShutdown
|
||||||
|
} else {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, call := range c.pending {
|
||||||
|
call.Error = err
|
||||||
|
call.done()
|
||||||
|
}
|
||||||
|
c.mutex.Unlock()
|
||||||
|
c.sending.Unlock()
|
||||||
|
if err != io.EOF && !closing && !c.server {
|
||||||
|
debugln("rpc2: client protocol error:", err)
|
||||||
|
}
|
||||||
|
close(c.disconnect)
|
||||||
|
if !closing {
|
||||||
|
c.codec.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) {
|
||||||
|
// Invoke the method, providing a new value for the reply.
|
||||||
|
replyv := reflect.New(method.replyType.Elem())
|
||||||
|
|
||||||
|
returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv})
|
||||||
|
|
||||||
|
// Do not send response if request is a notification.
|
||||||
|
if req.Seq == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The return value for the method is an error.
|
||||||
|
errInter := returnValues[0].Interface()
|
||||||
|
errmsg := ""
|
||||||
|
if errInter != nil {
|
||||||
|
errmsg = errInter.(error).Error()
|
||||||
|
}
|
||||||
|
resp := &Response{
|
||||||
|
Seq: req.Seq,
|
||||||
|
Error: errmsg,
|
||||||
|
}
|
||||||
|
if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil {
|
||||||
|
debugln("rpc2: error writing response:", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) readRequest(req *Request) error {
|
||||||
|
method, ok := c.handlers[req.Method]
|
||||||
|
if !ok {
|
||||||
|
resp := &Response{
|
||||||
|
Seq: req.Seq,
|
||||||
|
Error: "rpc2: can't find method " + req.Method,
|
||||||
|
}
|
||||||
|
return c.codec.WriteResponse(resp, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the argument value.
|
||||||
|
var argv reflect.Value
|
||||||
|
argIsValue := false // if true, need to indirect before calling.
|
||||||
|
if method.argType.Kind() == reflect.Ptr {
|
||||||
|
argv = reflect.New(method.argType.Elem())
|
||||||
|
} else {
|
||||||
|
argv = reflect.New(method.argType)
|
||||||
|
argIsValue = true
|
||||||
|
}
|
||||||
|
// argv guaranteed to be a pointer now.
|
||||||
|
if err := c.codec.ReadRequestBody(argv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if argIsValue {
|
||||||
|
argv = argv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.blocking {
|
||||||
|
c.handleRequest(*req, method, argv)
|
||||||
|
} else {
|
||||||
|
go c.handleRequest(*req, method, argv)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) readResponse(resp *Response) error {
|
||||||
|
seq := resp.Seq
|
||||||
|
c.mutex.Lock()
|
||||||
|
call := c.pending[seq]
|
||||||
|
delete(c.pending, seq)
|
||||||
|
c.mutex.Unlock()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
switch {
|
||||||
|
case call == nil:
|
||||||
|
// We've got no pending call. That usually means that
|
||||||
|
// WriteRequest partially failed, and call was already
|
||||||
|
// removed; response is a server telling us about an
|
||||||
|
// error reading request body. We should still attempt
|
||||||
|
// to read error body, but there's no one to give it to.
|
||||||
|
err = c.codec.ReadResponseBody(nil)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.New("reading error body: " + err.Error())
|
||||||
|
}
|
||||||
|
case resp.Error != "":
|
||||||
|
// We've got an error response. Give this to the request;
|
||||||
|
// any subsequent requests will get the ReadResponseBody
|
||||||
|
// error if there is one.
|
||||||
|
call.Error = ServerError(resp.Error)
|
||||||
|
err = c.codec.ReadResponseBody(nil)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.New("reading error body: " + err.Error())
|
||||||
|
}
|
||||||
|
call.done()
|
||||||
|
default:
|
||||||
|
err = c.codec.ReadResponseBody(call.Reply)
|
||||||
|
if err != nil {
|
||||||
|
call.Error = errors.New("reading body " + err.Error())
|
||||||
|
}
|
||||||
|
call.done()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close waits for active calls to finish and closes the codec.
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
c.mutex.Lock()
|
||||||
|
if c.shutdown || c.closing {
|
||||||
|
c.mutex.Unlock()
|
||||||
|
return ErrShutdown
|
||||||
|
}
|
||||||
|
c.closing = true
|
||||||
|
c.mutex.Unlock()
|
||||||
|
return c.codec.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go invokes the function asynchronously. It returns the Call structure representing
|
||||||
|
// the invocation. The done channel will signal when the call is complete by returning
|
||||||
|
// the same Call object. If done is nil, Go will allocate a new channel.
|
||||||
|
// If non-nil, done must be buffered or Go will deliberately crash.
|
||||||
|
func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call {
|
||||||
|
call := new(Call)
|
||||||
|
call.Method = method
|
||||||
|
call.Args = args
|
||||||
|
call.Reply = reply
|
||||||
|
if done == nil {
|
||||||
|
done = make(chan *Call, 10) // buffered.
|
||||||
|
} else {
|
||||||
|
// If caller passes done != nil, it must arrange that
|
||||||
|
// done has enough buffer for the number of simultaneous
|
||||||
|
// RPCs that will be using that channel. If the channel
|
||||||
|
// is totally unbuffered, it's best not to run at all.
|
||||||
|
if cap(done) == 0 {
|
||||||
|
log.Panic("rpc2: done channel is unbuffered")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
call.Done = done
|
||||||
|
c.send(call)
|
||||||
|
return call
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallWithContext invokes the named function, waits for it to complete, and
|
||||||
|
// returns its error status, or an error from Context timeout.
|
||||||
|
func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error {
|
||||||
|
call := c.Go(method, args, reply, make(chan *Call, 1))
|
||||||
|
select {
|
||||||
|
case <-call.Done:
|
||||||
|
return call.Error
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call invokes the named function, waits for it to complete, and returns its error status.
|
||||||
|
func (c *Client) Call(method string, args interface{}, reply interface{}) error {
|
||||||
|
return c.CallWithContext(context.Background(), method, args, reply)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (call *Call) done() {
|
||||||
|
select {
|
||||||
|
case call.Done <- call:
|
||||||
|
// ok
|
||||||
|
default:
|
||||||
|
// We don't want to block here. It is the caller's responsibility to make
|
||||||
|
// sure the channel has enough buffer space. See comment in Go().
|
||||||
|
debugln("rpc2: discarding Call reply due to insufficient Done chan capacity")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerError represents an error that has been returned from
|
||||||
|
// the remote side of the RPC connection.
|
||||||
|
type ServerError string
|
||||||
|
|
||||||
|
func (e ServerError) Error() string {
|
||||||
|
return string(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrShutdown is returned when the connection is closing or closed.
|
||||||
|
var ErrShutdown = errors.New("connection is shut down")
|
||||||
|
|
||||||
|
// Call represents an active RPC.
|
||||||
|
type Call struct {
|
||||||
|
Method string // The name of the service and method to call.
|
||||||
|
Args interface{} // The argument to the function (*struct).
|
||||||
|
Reply interface{} // The reply from the function (*struct).
|
||||||
|
Error error // After completion, the error status.
|
||||||
|
Done chan *Call // Strobes when call is complete.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) send(call *Call) {
|
||||||
|
c.sending.Lock()
|
||||||
|
defer c.sending.Unlock()
|
||||||
|
|
||||||
|
// Register this call.
|
||||||
|
c.mutex.Lock()
|
||||||
|
if c.shutdown || c.closing {
|
||||||
|
call.Error = ErrShutdown
|
||||||
|
c.mutex.Unlock()
|
||||||
|
call.done()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
seq := c.seq
|
||||||
|
c.seq++
|
||||||
|
c.pending[seq] = call
|
||||||
|
c.mutex.Unlock()
|
||||||
|
|
||||||
|
// Encode and send the request.
|
||||||
|
c.request.Seq = seq
|
||||||
|
c.request.Method = call.Method
|
||||||
|
err := c.codec.WriteRequest(&c.request, call.Args)
|
||||||
|
if err != nil {
|
||||||
|
c.mutex.Lock()
|
||||||
|
call = c.pending[seq]
|
||||||
|
delete(c.pending, seq)
|
||||||
|
c.mutex.Unlock()
|
||||||
|
if call != nil {
|
||||||
|
call.Error = err
|
||||||
|
call.done()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify sends a request to the receiver but does not wait for a return value.
|
||||||
|
func (c *Client) Notify(method string, args interface{}) error {
|
||||||
|
c.sending.Lock()
|
||||||
|
defer c.sending.Unlock()
|
||||||
|
|
||||||
|
if c.shutdown || c.closing {
|
||||||
|
return ErrShutdown
|
||||||
|
}
|
||||||
|
|
||||||
|
c.request.Seq = 0
|
||||||
|
c.request.Method = method
|
||||||
|
return c.codec.WriteRequest(&c.request, args)
|
||||||
|
}
|
125
vendor/github.com/cenkalti/rpc2/codec.go
generated
vendored
Normal file
125
vendor/github.com/cenkalti/rpc2/codec.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package rpc2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/gob"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Codec implements reading and writing of RPC requests and responses.
|
||||||
|
// The client calls ReadHeader to read a message header.
|
||||||
|
// The implementation must populate either Request or Response argument.
|
||||||
|
// Depending on which argument is populated, ReadRequestBody or
|
||||||
|
// ReadResponseBody is called right after ReadHeader.
|
||||||
|
// ReadRequestBody and ReadResponseBody may be called with a nil
|
||||||
|
// argument to force the body to be read and then discarded.
|
||||||
|
type Codec interface {
|
||||||
|
// ReadHeader must read a message and populate either the request
|
||||||
|
// or the response by inspecting the incoming message.
|
||||||
|
ReadHeader(*Request, *Response) error
|
||||||
|
|
||||||
|
// ReadRequestBody into args argument of handler function.
|
||||||
|
ReadRequestBody(interface{}) error
|
||||||
|
|
||||||
|
// ReadResponseBody into reply argument of handler function.
|
||||||
|
ReadResponseBody(interface{}) error
|
||||||
|
|
||||||
|
// WriteRequest must be safe for concurrent use by multiple goroutines.
|
||||||
|
WriteRequest(*Request, interface{}) error
|
||||||
|
|
||||||
|
// WriteResponse must be safe for concurrent use by multiple goroutines.
|
||||||
|
WriteResponse(*Response, interface{}) error
|
||||||
|
|
||||||
|
// Close is called when client/server finished with the connection.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request is a header written before every RPC call.
|
||||||
|
type Request struct {
|
||||||
|
Seq uint64 // sequence number chosen by client
|
||||||
|
Method string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response is a header written before every RPC return.
|
||||||
|
type Response struct {
|
||||||
|
Seq uint64 // echoes that of the request
|
||||||
|
Error string // error, if any.
|
||||||
|
}
|
||||||
|
|
||||||
|
type gobCodec struct {
|
||||||
|
rwc io.ReadWriteCloser
|
||||||
|
dec *gob.Decoder
|
||||||
|
enc *gob.Encoder
|
||||||
|
encBuf *bufio.Writer
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type message struct {
|
||||||
|
Seq uint64
|
||||||
|
Method string
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn.
|
||||||
|
func NewGobCodec(conn io.ReadWriteCloser) Codec {
|
||||||
|
buf := bufio.NewWriter(conn)
|
||||||
|
return &gobCodec{
|
||||||
|
rwc: conn,
|
||||||
|
dec: gob.NewDecoder(conn),
|
||||||
|
enc: gob.NewEncoder(buf),
|
||||||
|
encBuf: buf,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) ReadHeader(req *Request, resp *Response) error {
|
||||||
|
var msg message
|
||||||
|
if err := c.dec.Decode(&msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.Method != "" {
|
||||||
|
req.Seq = msg.Seq
|
||||||
|
req.Method = msg.Method
|
||||||
|
} else {
|
||||||
|
resp.Seq = msg.Seq
|
||||||
|
resp.Error = msg.Error
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) ReadRequestBody(body interface{}) error {
|
||||||
|
return c.dec.Decode(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) ReadResponseBody(body interface{}) error {
|
||||||
|
return c.dec.Decode(body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
if err = c.enc.Encode(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = c.enc.Encode(body); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return c.encBuf.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
if err = c.enc.Encode(r); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = c.enc.Encode(body); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return c.encBuf.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *gobCodec) Close() error {
|
||||||
|
return c.rwc.Close()
|
||||||
|
}
|
12
vendor/github.com/cenkalti/rpc2/debug.go
generated
vendored
Normal file
12
vendor/github.com/cenkalti/rpc2/debug.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package rpc2
|
||||||
|
|
||||||
|
import "log"
|
||||||
|
|
||||||
|
// DebugLog controls the printing of internal and I/O errors.
|
||||||
|
var DebugLog = false
|
||||||
|
|
||||||
|
func debugln(v ...interface{}) {
|
||||||
|
if DebugLog {
|
||||||
|
log.Println(v...)
|
||||||
|
}
|
||||||
|
}
|
226
vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
generated
vendored
Normal file
226
vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package.
|
||||||
|
//
|
||||||
|
// Beside struct types, JSONCodec allows using positional arguments.
|
||||||
|
// Use []interface{} as the type of argument when sending and receiving methods.
|
||||||
|
//
|
||||||
|
// Positional arguments example:
|
||||||
|
// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error {
|
||||||
|
// *result = args[0].(float64) + args[1].(float64)
|
||||||
|
// return nil
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// var result float64
|
||||||
|
// client.Call("add", []interface{}{1, 2}, &result)
|
||||||
|
//
|
||||||
|
package jsonrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cenkalti/rpc2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type jsonCodec struct {
|
||||||
|
dec *json.Decoder // for reading JSON values
|
||||||
|
enc *json.Encoder // for writing JSON values
|
||||||
|
c io.Closer
|
||||||
|
|
||||||
|
// temporary work space
|
||||||
|
msg message
|
||||||
|
serverRequest serverRequest
|
||||||
|
clientResponse clientResponse
|
||||||
|
|
||||||
|
// JSON-RPC clients can use arbitrary json values as request IDs.
|
||||||
|
// Package rpc expects uint64 request IDs.
|
||||||
|
// We assign uint64 sequence numbers to incoming requests
|
||||||
|
// but save the original request ID in the pending map.
|
||||||
|
// When rpc responds, we use the sequence number in
|
||||||
|
// the response to find the original request ID.
|
||||||
|
mutex sync.Mutex // protects seq, pending
|
||||||
|
pending map[uint64]*json.RawMessage
|
||||||
|
seq uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn.
|
||||||
|
func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec {
|
||||||
|
return &jsonCodec{
|
||||||
|
dec: json.NewDecoder(conn),
|
||||||
|
enc: json.NewEncoder(conn),
|
||||||
|
c: conn,
|
||||||
|
pending: make(map[uint64]*json.RawMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverRequest and clientResponse combined
|
||||||
|
type message struct {
|
||||||
|
Method string `json:"method"`
|
||||||
|
Params *json.RawMessage `json:"params"`
|
||||||
|
Id *json.RawMessage `json:"id"`
|
||||||
|
Result *json.RawMessage `json:"result"`
|
||||||
|
Error interface{} `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal to
|
||||||
|
type serverRequest struct {
|
||||||
|
Method string `json:"method"`
|
||||||
|
Params *json.RawMessage `json:"params"`
|
||||||
|
Id *json.RawMessage `json:"id"`
|
||||||
|
}
|
||||||
|
type clientResponse struct {
|
||||||
|
Id uint64 `json:"id"`
|
||||||
|
Result *json.RawMessage `json:"result"`
|
||||||
|
Error interface{} `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// to Marshal
|
||||||
|
type serverResponse struct {
|
||||||
|
Id *json.RawMessage `json:"id"`
|
||||||
|
Result interface{} `json:"result"`
|
||||||
|
Error interface{} `json:"error"`
|
||||||
|
}
|
||||||
|
type clientRequest struct {
|
||||||
|
Method string `json:"method"`
|
||||||
|
Params interface{} `json:"params"`
|
||||||
|
Id *uint64 `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error {
|
||||||
|
c.msg = message{}
|
||||||
|
if err := c.dec.Decode(&c.msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.msg.Method != "" {
|
||||||
|
// request comes to server
|
||||||
|
c.serverRequest.Id = c.msg.Id
|
||||||
|
c.serverRequest.Method = c.msg.Method
|
||||||
|
c.serverRequest.Params = c.msg.Params
|
||||||
|
|
||||||
|
req.Method = c.serverRequest.Method
|
||||||
|
|
||||||
|
// JSON request id can be any JSON value;
|
||||||
|
// RPC package expects uint64. Translate to
|
||||||
|
// internal uint64 and save JSON on the side.
|
||||||
|
if c.serverRequest.Id == nil {
|
||||||
|
// Notification
|
||||||
|
} else {
|
||||||
|
c.mutex.Lock()
|
||||||
|
c.seq++
|
||||||
|
c.pending[c.seq] = c.serverRequest.Id
|
||||||
|
c.serverRequest.Id = nil
|
||||||
|
req.Seq = c.seq
|
||||||
|
c.mutex.Unlock()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// response comes to client
|
||||||
|
err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientResponse.Result = c.msg.Result
|
||||||
|
c.clientResponse.Error = c.msg.Error
|
||||||
|
|
||||||
|
resp.Error = ""
|
||||||
|
resp.Seq = c.clientResponse.Id
|
||||||
|
if c.clientResponse.Error != nil || c.clientResponse.Result == nil {
|
||||||
|
x, ok := c.clientResponse.Error.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid error %v", c.clientResponse.Error)
|
||||||
|
}
|
||||||
|
if x == "" {
|
||||||
|
x = "unspecified error"
|
||||||
|
}
|
||||||
|
resp.Error = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errMissingParams = errors.New("jsonrpc: request body missing params")
|
||||||
|
|
||||||
|
func (c *jsonCodec) ReadRequestBody(x interface{}) error {
|
||||||
|
if x == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c.serverRequest.Params == nil {
|
||||||
|
return errMissingParams
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Check if x points to a slice of any kind
|
||||||
|
rt := reflect.TypeOf(x)
|
||||||
|
if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice {
|
||||||
|
// If it's a slice, unmarshal as is
|
||||||
|
err = json.Unmarshal(*c.serverRequest.Params, x)
|
||||||
|
} else {
|
||||||
|
// Anything else unmarshal into a slice containing x
|
||||||
|
params := &[]interface{}{x}
|
||||||
|
err = json.Unmarshal(*c.serverRequest.Params, params)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *jsonCodec) ReadResponseBody(x interface{}) error {
|
||||||
|
if x == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return json.Unmarshal(*c.clientResponse.Result, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error {
|
||||||
|
req := &clientRequest{Method: r.Method}
|
||||||
|
|
||||||
|
// Check if param is a slice of any kind
|
||||||
|
if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice {
|
||||||
|
// If it's a slice, leave as is
|
||||||
|
req.Params = param
|
||||||
|
} else {
|
||||||
|
// Put anything else into a slice
|
||||||
|
req.Params = []interface{}{param}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Seq == 0 {
|
||||||
|
// Notification
|
||||||
|
req.Id = nil
|
||||||
|
} else {
|
||||||
|
seq := r.Seq
|
||||||
|
req.Id = &seq
|
||||||
|
}
|
||||||
|
return c.enc.Encode(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
var null = json.RawMessage([]byte("null"))
|
||||||
|
|
||||||
|
func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error {
|
||||||
|
c.mutex.Lock()
|
||||||
|
b, ok := c.pending[r.Seq]
|
||||||
|
if !ok {
|
||||||
|
c.mutex.Unlock()
|
||||||
|
return errors.New("invalid sequence number in response")
|
||||||
|
}
|
||||||
|
delete(c.pending, r.Seq)
|
||||||
|
c.mutex.Unlock()
|
||||||
|
|
||||||
|
if b == nil {
|
||||||
|
// Invalid request so no id. Use JSON null.
|
||||||
|
b = &null
|
||||||
|
}
|
||||||
|
resp := serverResponse{Id: b}
|
||||||
|
if r.Error == "" {
|
||||||
|
resp.Result = x
|
||||||
|
} else {
|
||||||
|
resp.Error = r.Error
|
||||||
|
}
|
||||||
|
return c.enc.Encode(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *jsonCodec) Close() error {
|
||||||
|
return c.c.Close()
|
||||||
|
}
|
182
vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc_test.go
generated
vendored
Normal file
182
vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc_test.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
package jsonrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/rpc2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
network = "tcp4"
|
||||||
|
addr = "127.0.0.1:5000"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestJSONRPC(t *testing.T) {
|
||||||
|
type Args struct{ A, B int }
|
||||||
|
type Reply int
|
||||||
|
|
||||||
|
lis, err := net.Listen(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := rpc2.NewServer()
|
||||||
|
srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error {
|
||||||
|
*reply = Reply(args.A + args.B)
|
||||||
|
|
||||||
|
var rep Reply
|
||||||
|
err := client.Call("mult", Args{2, 3}, &rep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rep != 6 {
|
||||||
|
t.Fatalf("not expected: %d", rep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
srv.Handle("addPos", func(client *rpc2.Client, args []interface{}, result *float64) error {
|
||||||
|
*result = args[0].(float64) + args[1].(float64)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
srv.Handle("rawArgs", func(client *rpc2.Client, args []json.RawMessage, reply *[]string) error {
|
||||||
|
for _, p := range args {
|
||||||
|
var str string
|
||||||
|
json.Unmarshal(p, &str)
|
||||||
|
*reply = append(*reply, str)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
srv.Handle("typedArgs", func(client *rpc2.Client, args []int, reply *[]string) error {
|
||||||
|
for _, p := range args {
|
||||||
|
*reply = append(*reply, fmt.Sprintf("%d", p))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
srv.Handle("nilArgs", func(client *rpc2.Client, args []interface{}, reply *[]string) error {
|
||||||
|
for _, v := range args {
|
||||||
|
if v == nil {
|
||||||
|
*reply = append(*reply, "nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
number := make(chan int, 1)
|
||||||
|
srv.Handle("set", func(client *rpc2.Client, i int, _ *struct{}) error {
|
||||||
|
number <- i
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
conn, err := lis.Accept()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
srv.ServeCodec(NewJSONCodec(conn))
|
||||||
|
}()
|
||||||
|
|
||||||
|
conn, err := net.Dial(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clt := rpc2.NewClientWithCodec(NewJSONCodec(conn))
|
||||||
|
clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error {
|
||||||
|
*reply = Reply(args.A * args.B)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
go clt.Run()
|
||||||
|
|
||||||
|
// Test Call.
|
||||||
|
var rep Reply
|
||||||
|
err = clt.Call("add", Args{1, 2}, &rep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if rep != 3 {
|
||||||
|
t.Fatalf("not expected: %d", rep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test notification.
|
||||||
|
err = clt.Notify("set", 6)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case i := <-number:
|
||||||
|
if i != 6 {
|
||||||
|
t.Fatalf("unexpected number: %d", i)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("did not get notification")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test undefined method.
|
||||||
|
err = clt.Call("foo", 1, &rep)
|
||||||
|
if err.Error() != "rpc2: can't find method foo" {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test Positional arguments.
|
||||||
|
var result float64
|
||||||
|
err = clt.Call("addPos", []interface{}{1, 2}, &result)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if result != 3 {
|
||||||
|
t.Fatalf("not expected: %f", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
testArgs := func(expected, reply []string) error {
|
||||||
|
if len(reply) != len(expected) {
|
||||||
|
return fmt.Errorf("incorrect reply length: %d", len(reply))
|
||||||
|
}
|
||||||
|
for i := range expected {
|
||||||
|
if reply[i] != expected[i] {
|
||||||
|
return fmt.Errorf("not expected reply[%d]: %s", i, reply[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test raw arguments (partial unmarshal)
|
||||||
|
var reply []string
|
||||||
|
var expected []string = []string{"arg1", "arg2"}
|
||||||
|
rawArgs := json.RawMessage(`["arg1", "arg2"]`)
|
||||||
|
err = clt.Call("rawArgs", rawArgs, &reply)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = testArgs(expected, reply); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test typed arguments
|
||||||
|
reply = []string{}
|
||||||
|
expected = []string{"1", "2"}
|
||||||
|
typedArgs := []int{1, 2}
|
||||||
|
err = clt.Call("typedArgs", typedArgs, &reply)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = testArgs(expected, reply); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test nil args
|
||||||
|
reply = []string{}
|
||||||
|
expected = []string{"nil"}
|
||||||
|
err = clt.Call("nilArgs", nil, &reply)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = testArgs(expected, reply); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
98
vendor/github.com/cenkalti/rpc2/rpc2_test.go
generated
vendored
Normal file
98
vendor/github.com/cenkalti/rpc2/rpc2_test.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package rpc2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
network = "tcp4"
|
||||||
|
addr = "127.0.0.1:5000"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTCPGOB(t *testing.T) {
|
||||||
|
type Args struct{ A, B int }
|
||||||
|
type Reply int
|
||||||
|
|
||||||
|
lis, err := net.Listen(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := NewServer()
|
||||||
|
srv.Handle("add", func(client *Client, args *Args, reply *Reply) error {
|
||||||
|
*reply = Reply(args.A + args.B)
|
||||||
|
|
||||||
|
var rep Reply
|
||||||
|
err := client.Call("mult", Args{2, 3}, &rep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rep != 6 {
|
||||||
|
t.Fatalf("not expected: %d", rep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
number := make(chan int, 1)
|
||||||
|
srv.Handle("set", func(client *Client, i int, _ *struct{}) error {
|
||||||
|
number <- i
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
go srv.Accept(lis)
|
||||||
|
|
||||||
|
conn, err := net.Dial(network, addr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clt := NewClient(conn)
|
||||||
|
clt.Handle("mult", func(client *Client, args *Args, reply *Reply) error {
|
||||||
|
*reply = Reply(args.A * args.B)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
go clt.Run()
|
||||||
|
defer clt.Close()
|
||||||
|
|
||||||
|
// Test Call.
|
||||||
|
var rep Reply
|
||||||
|
err = clt.Call("add", Args{1, 2}, &rep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if rep != 3 {
|
||||||
|
t.Fatalf("not expected: %d", rep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test notification.
|
||||||
|
err = clt.Notify("set", 6)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case i := <-number:
|
||||||
|
if i != 6 {
|
||||||
|
t.Fatalf("unexpected number: %d", i)
|
||||||
|
}
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Fatal("did not get notification")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test blocked request
|
||||||
|
clt.SetBlocking(true)
|
||||||
|
err = clt.Call("add", Args{1, 2}, &rep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if rep != 3 {
|
||||||
|
t.Fatalf("not expected: %d", rep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test undefined method.
|
||||||
|
err = clt.Call("foo", 1, &rep)
|
||||||
|
if err.Error() != "rpc2: can't find method foo" {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
181
vendor/github.com/cenkalti/rpc2/server.go
generated
vendored
Normal file
181
vendor/github.com/cenkalti/rpc2/server.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
package rpc2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/cenkalti/hub"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Precompute the reflect type for error. Can't use error directly
|
||||||
|
// because Typeof takes an empty interface value. This is annoying.
|
||||||
|
var typeOfError = reflect.TypeOf((*error)(nil)).Elem()
|
||||||
|
var typeOfClient = reflect.TypeOf((*Client)(nil))
|
||||||
|
|
||||||
|
const (
|
||||||
|
clientConnected hub.Kind = iota
|
||||||
|
clientDisconnected
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server responds to RPC requests made by Client.
|
||||||
|
type Server struct {
|
||||||
|
handlers map[string]*handler
|
||||||
|
eventHub *hub.Hub
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
fn reflect.Value
|
||||||
|
argType reflect.Type
|
||||||
|
replyType reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
type connectionEvent struct {
|
||||||
|
Client *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type disconnectionEvent struct {
|
||||||
|
Client *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (connectionEvent) Kind() hub.Kind { return clientConnected }
|
||||||
|
func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected }
|
||||||
|
|
||||||
|
// NewServer returns a new Server.
|
||||||
|
func NewServer() *Server {
|
||||||
|
return &Server{
|
||||||
|
handlers: make(map[string]*handler),
|
||||||
|
eventHub: &hub.Hub{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics.
|
||||||
|
func (s *Server) Handle(method string, handlerFunc interface{}) {
|
||||||
|
addHandler(s.handlers, method, handlerFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) {
|
||||||
|
if _, ok := handlers[mname]; ok {
|
||||||
|
panic("rpc2: multiple registrations for " + mname)
|
||||||
|
}
|
||||||
|
|
||||||
|
method := reflect.ValueOf(handlerFunc)
|
||||||
|
mtype := method.Type()
|
||||||
|
// Method needs three ins: *client, *args, *reply.
|
||||||
|
if mtype.NumIn() != 3 {
|
||||||
|
log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn())
|
||||||
|
}
|
||||||
|
// First arg must be a pointer to rpc2.Client.
|
||||||
|
clientType := mtype.In(0)
|
||||||
|
if clientType.Kind() != reflect.Ptr {
|
||||||
|
log.Panicln("method", mname, "client type not a pointer:", clientType)
|
||||||
|
}
|
||||||
|
if clientType != typeOfClient {
|
||||||
|
log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client")
|
||||||
|
}
|
||||||
|
// Second arg need not be a pointer.
|
||||||
|
argType := mtype.In(1)
|
||||||
|
if !isExportedOrBuiltinType(argType) {
|
||||||
|
log.Panicln(mname, "argument type not exported:", argType)
|
||||||
|
}
|
||||||
|
// Third arg must be a pointer.
|
||||||
|
replyType := mtype.In(2)
|
||||||
|
if replyType.Kind() != reflect.Ptr {
|
||||||
|
log.Panicln("method", mname, "reply type not a pointer:", replyType)
|
||||||
|
}
|
||||||
|
// Reply type must be exported.
|
||||||
|
if !isExportedOrBuiltinType(replyType) {
|
||||||
|
log.Panicln("method", mname, "reply type not exported:", replyType)
|
||||||
|
}
|
||||||
|
// Method needs one out.
|
||||||
|
if mtype.NumOut() != 1 {
|
||||||
|
log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut())
|
||||||
|
}
|
||||||
|
// The return type of the method must be error.
|
||||||
|
if returnType := mtype.Out(0); returnType != typeOfError {
|
||||||
|
log.Panicln("method", mname, "returns", returnType.String(), "not error")
|
||||||
|
}
|
||||||
|
handlers[mname] = &handler{
|
||||||
|
fn: method,
|
||||||
|
argType: argType,
|
||||||
|
replyType: replyType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is this type exported or a builtin?
|
||||||
|
func isExportedOrBuiltinType(t reflect.Type) bool {
|
||||||
|
for t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
// PkgPath will be non-empty even for an exported type,
|
||||||
|
// so we need to check the type name as well.
|
||||||
|
return isExported(t.Name()) || t.PkgPath() == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is this an exported - upper case - name?
|
||||||
|
func isExported(name string) bool {
|
||||||
|
rune, _ := utf8.DecodeRuneInString(name)
|
||||||
|
return unicode.IsUpper(rune)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConnect registers a function to run when a client connects.
|
||||||
|
func (s *Server) OnConnect(f func(*Client)) {
|
||||||
|
s.eventHub.Subscribe(clientConnected, func(e hub.Event) {
|
||||||
|
go f(e.(connectionEvent).Client)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnDisconnect registers a function to run when a client disconnects.
|
||||||
|
func (s *Server) OnDisconnect(f func(*Client)) {
|
||||||
|
s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) {
|
||||||
|
go f(e.(disconnectionEvent).Client)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept accepts connections on the listener and serves requests
|
||||||
|
// for each incoming connection. Accept blocks; the caller typically
|
||||||
|
// invokes it in a go statement.
|
||||||
|
func (s *Server) Accept(lis net.Listener) {
|
||||||
|
for {
|
||||||
|
conn, err := lis.Accept()
|
||||||
|
if err != nil {
|
||||||
|
log.Print("rpc.Serve: accept:", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go s.ServeConn(conn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeConn runs the server on a single connection.
|
||||||
|
// ServeConn blocks, serving the connection until the client hangs up.
|
||||||
|
// The caller typically invokes ServeConn in a go statement.
|
||||||
|
// ServeConn uses the gob wire format (see package gob) on the
|
||||||
|
// connection. To use an alternate codec, use ServeCodec.
|
||||||
|
func (s *Server) ServeConn(conn io.ReadWriteCloser) {
|
||||||
|
s.ServeCodec(NewGobCodec(conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeCodec is like ServeConn but uses the specified codec to
|
||||||
|
// decode requests and encode responses.
|
||||||
|
func (s *Server) ServeCodec(codec Codec) {
|
||||||
|
s.ServeCodecWithState(codec, NewState())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeCodecWithState is like ServeCodec but also gives the ability to
|
||||||
|
// associate a state variable with the client that persists across RPC calls.
|
||||||
|
func (s *Server) ServeCodecWithState(codec Codec, state *State) {
|
||||||
|
defer codec.Close()
|
||||||
|
|
||||||
|
// Client also handles the incoming connections.
|
||||||
|
c := NewClientWithCodec(codec)
|
||||||
|
c.server = true
|
||||||
|
c.handlers = s.handlers
|
||||||
|
c.State = state
|
||||||
|
|
||||||
|
s.eventHub.Publish(connectionEvent{c})
|
||||||
|
c.Run()
|
||||||
|
s.eventHub.Publish(disconnectionEvent{c})
|
||||||
|
}
|
25
vendor/github.com/cenkalti/rpc2/state.go
generated
vendored
Normal file
25
vendor/github.com/cenkalti/rpc2/state.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package rpc2
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type State struct {
|
||||||
|
store map[string]interface{}
|
||||||
|
m sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewState() *State {
|
||||||
|
return &State{store: make(map[string]interface{})}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) Get(key string) (value interface{}, ok bool) {
|
||||||
|
s.m.RLock()
|
||||||
|
value, ok = s.store[key]
|
||||||
|
s.m.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *State) Set(key string, value interface{}) {
|
||||||
|
s.m.Lock()
|
||||||
|
s.store[key] = value
|
||||||
|
s.m.Unlock()
|
||||||
|
}
|
9
vendor/github.com/patrickmn/go-cache/CONTRIBUTORS
generated
vendored
Normal file
9
vendor/github.com/patrickmn/go-cache/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
This is a list of people who have contributed code to go-cache. They, or their
|
||||||
|
employers, are the copyright holders of the contributed code. Contributed code
|
||||||
|
is subject to the license restrictions listed in LICENSE (as they were when the
|
||||||
|
code was contributed.)
|
||||||
|
|
||||||
|
Dustin Sallings <dustin@spy.net>
|
||||||
|
Jason Mooberry <jasonmoo@me.com>
|
||||||
|
Sergey Shepelev <temotor@gmail.com>
|
||||||
|
Alex Edwards <ajmedwards@gmail.com>
|
19
vendor/github.com/patrickmn/go-cache/LICENSE
generated
vendored
Normal file
19
vendor/github.com/patrickmn/go-cache/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2012-2019 Patrick Mylund Nielsen and the go-cache contributors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
83
vendor/github.com/patrickmn/go-cache/README.md
generated
vendored
Normal file
83
vendor/github.com/patrickmn/go-cache/README.md
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# go-cache
|
||||||
|
|
||||||
|
go-cache is an in-memory key:value store/cache similar to memcached that is
|
||||||
|
suitable for applications running on a single machine. Its major advantage is
|
||||||
|
that, being essentially a thread-safe `map[string]interface{}` with expiration
|
||||||
|
times, it doesn't need to serialize or transmit its contents over the network.
|
||||||
|
|
||||||
|
Any object can be stored, for a given duration or forever, and the cache can be
|
||||||
|
safely used by multiple goroutines.
|
||||||
|
|
||||||
|
Although go-cache isn't meant to be used as a persistent datastore, the entire
|
||||||
|
cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
|
||||||
|
items map to serialize, and `NewFrom()` to create a cache from a deserialized
|
||||||
|
one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
`go get github.com/patrickmn/go-cache`
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create a cache with a default expiration time of 5 minutes, and which
|
||||||
|
// purges expired items every 10 minutes
|
||||||
|
c := cache.New(5*time.Minute, 10*time.Minute)
|
||||||
|
|
||||||
|
// Set the value of the key "foo" to "bar", with the default expiration time
|
||||||
|
c.Set("foo", "bar", cache.DefaultExpiration)
|
||||||
|
|
||||||
|
// Set the value of the key "baz" to 42, with no expiration time
|
||||||
|
// (the item won't be removed until it is re-set, or removed using
|
||||||
|
// c.Delete("baz")
|
||||||
|
c.Set("baz", 42, cache.NoExpiration)
|
||||||
|
|
||||||
|
// Get the string associated with the key "foo" from the cache
|
||||||
|
foo, found := c.Get("foo")
|
||||||
|
if found {
|
||||||
|
fmt.Println(foo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since Go is statically typed, and cache values can be anything, type
|
||||||
|
// assertion is needed when values are being passed to functions that don't
|
||||||
|
// take arbitrary types, (i.e. interface{}). The simplest way to do this for
|
||||||
|
// values which will only be used once--e.g. for passing to another
|
||||||
|
// function--is:
|
||||||
|
foo, found := c.Get("foo")
|
||||||
|
if found {
|
||||||
|
MyFunction(foo.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This gets tedious if the value is used several times in the same function.
|
||||||
|
// You might do either of the following instead:
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo := x.(string)
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
// or
|
||||||
|
var foo string
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo = x.(string)
|
||||||
|
}
|
||||||
|
// ...
|
||||||
|
// foo can then be passed around freely as a string
|
||||||
|
|
||||||
|
// Want performance? Store pointers!
|
||||||
|
c.Set("foo", &MyStruct, cache.DefaultExpiration)
|
||||||
|
if x, found := c.Get("foo"); found {
|
||||||
|
foo := x.(*MyStruct)
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reference
|
||||||
|
|
||||||
|
`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)
|
1161
vendor/github.com/patrickmn/go-cache/cache.go
generated
vendored
Normal file
1161
vendor/github.com/patrickmn/go-cache/cache.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1771
vendor/github.com/patrickmn/go-cache/cache_test.go
generated
vendored
Normal file
1771
vendor/github.com/patrickmn/go-cache/cache_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
vendor/github.com/patrickmn/go-cache/sharded.go
generated
vendored
Normal file
192
vendor/github.com/patrickmn/go-cache/sharded.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
insecurerand "math/rand"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is an experimental and unexported (for now) attempt at making a cache
|
||||||
|
// with better algorithmic complexity than the standard one, namely by
|
||||||
|
// preventing write locks of the entire cache when an item is added. As of the
|
||||||
|
// time of writing, the overhead of selecting buckets results in cache
|
||||||
|
// operations being about twice as slow as for the standard cache with small
|
||||||
|
// total cache sizes, and faster for larger ones.
|
||||||
|
//
|
||||||
|
// See cache_test.go for a few benchmarks.
|
||||||
|
|
||||||
|
type unexportedShardedCache struct {
|
||||||
|
*shardedCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedCache struct {
|
||||||
|
seed uint32
|
||||||
|
m uint32
|
||||||
|
cs []*cache
|
||||||
|
janitor *shardedJanitor
|
||||||
|
}
|
||||||
|
|
||||||
|
// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
|
||||||
|
func djb33(seed uint32, k string) uint32 {
|
||||||
|
var (
|
||||||
|
l = uint32(len(k))
|
||||||
|
d = 5381 + seed + l
|
||||||
|
i = uint32(0)
|
||||||
|
)
|
||||||
|
// Why is all this 5x faster than a for loop?
|
||||||
|
if l >= 4 {
|
||||||
|
for i < l-4 {
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
d = (d * 33) ^ uint32(k[i+2])
|
||||||
|
d = (d * 33) ^ uint32(k[i+3])
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch l - i {
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
case 3:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
case 4:
|
||||||
|
d = (d * 33) ^ uint32(k[i])
|
||||||
|
d = (d * 33) ^ uint32(k[i+1])
|
||||||
|
d = (d * 33) ^ uint32(k[i+2])
|
||||||
|
}
|
||||||
|
return d ^ (d >> 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) bucket(k string) *cache {
|
||||||
|
return sc.cs[djb33(sc.seed, k)%sc.m]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
|
||||||
|
sc.bucket(k).Set(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.bucket(k).Add(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
|
||||||
|
return sc.bucket(k).Replace(k, x, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Get(k string) (interface{}, bool) {
|
||||||
|
return sc.bucket(k).Get(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Increment(k string, n int64) error {
|
||||||
|
return sc.bucket(k).Increment(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) IncrementFloat(k string, n float64) error {
|
||||||
|
return sc.bucket(k).IncrementFloat(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Decrement(k string, n int64) error {
|
||||||
|
return sc.bucket(k).Decrement(k, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Delete(k string) {
|
||||||
|
sc.bucket(k).Delete(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) DeleteExpired() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.DeleteExpired()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the items in the cache. This may include items that have expired,
|
||||||
|
// but have not yet been cleaned up. If this is significant, the Expiration
|
||||||
|
// fields of the items should be checked. Note that explicit synchronization
|
||||||
|
// is needed to use a cache and its corresponding Items() return values at
|
||||||
|
// the same time, as the maps are shared.
|
||||||
|
func (sc *shardedCache) Items() []map[string]Item {
|
||||||
|
res := make([]map[string]Item, len(sc.cs))
|
||||||
|
for i, v := range sc.cs {
|
||||||
|
res[i] = v.Items()
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *shardedCache) Flush() {
|
||||||
|
for _, v := range sc.cs {
|
||||||
|
v.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type shardedJanitor struct {
|
||||||
|
Interval time.Duration
|
||||||
|
stop chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *shardedJanitor) Run(sc *shardedCache) {
|
||||||
|
j.stop = make(chan bool)
|
||||||
|
tick := time.Tick(j.Interval)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick:
|
||||||
|
sc.DeleteExpired()
|
||||||
|
case <-j.stop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopShardedJanitor(sc *unexportedShardedCache) {
|
||||||
|
sc.janitor.stop <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func runShardedJanitor(sc *shardedCache, ci time.Duration) {
|
||||||
|
j := &shardedJanitor{
|
||||||
|
Interval: ci,
|
||||||
|
}
|
||||||
|
sc.janitor = j
|
||||||
|
go j.Run(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShardedCache(n int, de time.Duration) *shardedCache {
|
||||||
|
max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
|
||||||
|
rnd, err := rand.Int(rand.Reader, max)
|
||||||
|
var seed uint32
|
||||||
|
if err != nil {
|
||||||
|
os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
|
||||||
|
seed = insecurerand.Uint32()
|
||||||
|
} else {
|
||||||
|
seed = uint32(rnd.Uint64())
|
||||||
|
}
|
||||||
|
sc := &shardedCache{
|
||||||
|
seed: seed,
|
||||||
|
m: uint32(n),
|
||||||
|
cs: make([]*cache, n),
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
c := &cache{
|
||||||
|
defaultExpiration: de,
|
||||||
|
items: map[string]Item{},
|
||||||
|
}
|
||||||
|
sc.cs[i] = c
|
||||||
|
}
|
||||||
|
return sc
|
||||||
|
}
|
||||||
|
|
||||||
|
func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
|
||||||
|
if defaultExpiration == 0 {
|
||||||
|
defaultExpiration = -1
|
||||||
|
}
|
||||||
|
sc := newShardedCache(shards, defaultExpiration)
|
||||||
|
SC := &unexportedShardedCache{sc}
|
||||||
|
if cleanupInterval > 0 {
|
||||||
|
runShardedJanitor(sc, cleanupInterval)
|
||||||
|
runtime.SetFinalizer(SC, stopShardedJanitor)
|
||||||
|
}
|
||||||
|
return SC
|
||||||
|
}
|
85
vendor/github.com/patrickmn/go-cache/sharded_test.go
generated
vendored
Normal file
85
vendor/github.com/patrickmn/go-cache/sharded_test.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// func TestDjb33(t *testing.T) {
|
||||||
|
// }
|
||||||
|
|
||||||
|
var shardedKeys = []string{
|
||||||
|
"f",
|
||||||
|
"fo",
|
||||||
|
"foo",
|
||||||
|
"barf",
|
||||||
|
"barfo",
|
||||||
|
"foobar",
|
||||||
|
"bazbarf",
|
||||||
|
"bazbarfo",
|
||||||
|
"bazbarfoo",
|
||||||
|
"foobarbazq",
|
||||||
|
"foobarbazqu",
|
||||||
|
"foobarbazquu",
|
||||||
|
"foobarbazquux",
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShardedCache(t *testing.T) {
|
||||||
|
tc := unexportedNewSharded(DefaultExpiration, 0, 13)
|
||||||
|
for _, v := range shardedKeys {
|
||||||
|
tc.Set(v, "value", DefaultExpiration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShardedCacheGetExpiring(b *testing.B) {
|
||||||
|
benchmarkShardedCacheGet(b, 5*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShardedCacheGetNotExpiring(b *testing.B) {
|
||||||
|
benchmarkShardedCacheGet(b, NoExpiration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkShardedCacheGet(b *testing.B, exp time.Duration) {
|
||||||
|
b.StopTimer()
|
||||||
|
tc := unexportedNewSharded(exp, 0, 10)
|
||||||
|
tc.Set("foobarba", "zquux", DefaultExpiration)
|
||||||
|
b.StartTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
tc.Get("foobarba")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShardedCacheGetManyConcurrentExpiring(b *testing.B) {
|
||||||
|
benchmarkShardedCacheGetManyConcurrent(b, 5*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShardedCacheGetManyConcurrentNotExpiring(b *testing.B) {
|
||||||
|
benchmarkShardedCacheGetManyConcurrent(b, NoExpiration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkShardedCacheGetManyConcurrent(b *testing.B, exp time.Duration) {
|
||||||
|
b.StopTimer()
|
||||||
|
n := 10000
|
||||||
|
tsc := unexportedNewSharded(exp, 0, 20)
|
||||||
|
keys := make([]string, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
k := "foo" + strconv.Itoa(i)
|
||||||
|
keys[i] = k
|
||||||
|
tsc.Set(k, "bar", DefaultExpiration)
|
||||||
|
}
|
||||||
|
each := b.N / n
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
wg.Add(n)
|
||||||
|
for _, v := range keys {
|
||||||
|
go func(k string) {
|
||||||
|
for j := 0; j < each; j++ {
|
||||||
|
tsc.Get(k)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(v)
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
wg.Wait()
|
||||||
|
}
|
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
Normal file
22
vendor/github.com/robfig/cron/v3/.gitignore
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/robfig/cron/v3/.travis.yml
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
language: go
|
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
Normal file
21
vendor/github.com/robfig/cron/v3/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
Copyright (C) 2012 Rob Figueiredo
|
||||||
|
All Rights Reserved.
|
||||||
|
|
||||||
|
MIT LICENSE
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
Normal file
125
vendor/github.com/robfig/cron/v3/README.md
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
|
||||||
|
[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
|
||||||
|
|
||||||
|
# cron
|
||||||
|
|
||||||
|
Cron V3 has been released!
|
||||||
|
|
||||||
|
To download the specific tagged release, run:
|
||||||
|
```bash
|
||||||
|
go get github.com/robfig/cron/v3@v3.0.0
|
||||||
|
```
|
||||||
|
Import it in your program as:
|
||||||
|
```go
|
||||||
|
import "github.com/robfig/cron/v3"
|
||||||
|
```
|
||||||
|
It requires Go 1.11 or later due to usage of Go Modules.
|
||||||
|
|
||||||
|
Refer to the documentation here:
|
||||||
|
http://godoc.org/github.com/robfig/cron
|
||||||
|
|
||||||
|
The rest of this document describes the the advances in v3 and a list of
|
||||||
|
breaking changes for users that wish to upgrade from an earlier version.
|
||||||
|
|
||||||
|
## Upgrading to v3 (June 2019)
|
||||||
|
|
||||||
|
cron v3 is a major upgrade to the library that addresses all outstanding bugs,
|
||||||
|
feature requests, and rough edges. It is based on a merge of master which
|
||||||
|
contains various fixes to issues found over the years and the v2 branch which
|
||||||
|
contains some backwards-incompatible features like the ability to remove cron
|
||||||
|
jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
|
||||||
|
the timezone support, and fixes a number of bugs.
|
||||||
|
|
||||||
|
New features:
|
||||||
|
|
||||||
|
- Support for Go modules. Callers must now import this library as
|
||||||
|
`github.com/robfig/cron/v3`, instead of `gopkg.in/...`
|
||||||
|
|
||||||
|
- Fixed bugs:
|
||||||
|
- 0f01e6b parser: fix combining of Dow and Dom (#70)
|
||||||
|
- dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
|
||||||
|
- eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
|
||||||
|
- 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
|
||||||
|
- 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
|
||||||
|
|
||||||
|
- Standard cron spec parsing by default (first field is "minute"), with an easy
|
||||||
|
way to opt into the seconds field (quartz-compatible). Although, note that the
|
||||||
|
year field (optional in Quartz) is not supported.
|
||||||
|
|
||||||
|
- Extensible, key/value logging via an interface that complies with
|
||||||
|
the https://github.com/go-logr/logr project.
|
||||||
|
|
||||||
|
- The new Chain & JobWrapper types allow you to install "interceptors" to add
|
||||||
|
cross-cutting behavior like the following:
|
||||||
|
- Recover any panics from jobs
|
||||||
|
- Delay a job's execution if the previous run hasn't completed yet
|
||||||
|
- Skip a job's execution if the previous run hasn't completed yet
|
||||||
|
- Log each job's invocations
|
||||||
|
- Notification when jobs are completed
|
||||||
|
|
||||||
|
It is backwards incompatible with both v1 and v2. These updates are required:
|
||||||
|
|
||||||
|
- The v1 branch accepted an optional seconds field at the beginning of the cron
|
||||||
|
spec. This is non-standard and has led to a lot of confusion. The new default
|
||||||
|
parser conforms to the standard as described by [the Cron wikipedia page].
|
||||||
|
|
||||||
|
UPDATING: To retain the old behavior, construct your Cron with a custom
|
||||||
|
parser:
|
||||||
|
```go
|
||||||
|
// Seconds field, required
|
||||||
|
cron.New(cron.WithSeconds())
|
||||||
|
|
||||||
|
// Seconds field, optional
|
||||||
|
cron.New(cron.WithParser(cron.NewParser(
|
||||||
|
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
||||||
|
)))
|
||||||
|
```
|
||||||
|
- The Cron type now accepts functional options on construction rather than the
|
||||||
|
previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
|
||||||
|
|
||||||
|
UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
|
||||||
|
updated to provide those values on construction.
|
||||||
|
|
||||||
|
- CRON_TZ is now the recommended way to specify the timezone of a single
|
||||||
|
schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
|
||||||
|
will continue to be supported since it is unambiguous and easy to do so.
|
||||||
|
|
||||||
|
UPDATING: No update is required.
|
||||||
|
|
||||||
|
- By default, cron will no longer recover panics in jobs that it runs.
|
||||||
|
Recovering can be surprising (see issue #192) and seems to be at odds with
|
||||||
|
typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
|
||||||
|
has been removed to accommodate the more general JobWrapper type.
|
||||||
|
|
||||||
|
UPDATING: To opt into panic recovery and configure the panic logger:
|
||||||
|
```go
|
||||||
|
cron.New(cron.WithChain(
|
||||||
|
cron.Recover(logger), // or use cron.DefaultLogger
|
||||||
|
))
|
||||||
|
```
|
||||||
|
- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
|
||||||
|
removed, since it is duplicative with the leveled logging.
|
||||||
|
|
||||||
|
UPDATING: Callers should use `WithLogger` and specify a logger that does not
|
||||||
|
discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
|
||||||
|
```go
|
||||||
|
cron.New(
|
||||||
|
cron.WithLogger(cron.VerbosePrintfLogger(logger)))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Background - Cron spec format
|
||||||
|
|
||||||
|
There are two cron spec formats in common usage:
|
||||||
|
|
||||||
|
- The "standard" cron format, described on [the Cron wikipedia page] and used by
|
||||||
|
the cron Linux system utility.
|
||||||
|
|
||||||
|
- The cron format used by [the Quartz Scheduler], commonly used for scheduled
|
||||||
|
jobs in Java software
|
||||||
|
|
||||||
|
[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
|
||||||
|
[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
|
||||||
|
|
||||||
|
The original version of this package included an optional "seconds" field, which
|
||||||
|
made it incompatible with both of these formats. Now, the "standard" format is
|
||||||
|
the default format accepted, and the Quartz format is opt-in.
|
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
Normal file
92
vendor/github.com/robfig/cron/v3/chain.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JobWrapper decorates the given Job with some behavior.
|
||||||
|
type JobWrapper func(Job) Job
|
||||||
|
|
||||||
|
// Chain is a sequence of JobWrappers that decorates submitted jobs with
|
||||||
|
// cross-cutting behaviors like logging or synchronization.
|
||||||
|
type Chain struct {
|
||||||
|
wrappers []JobWrapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChain returns a Chain consisting of the given JobWrappers.
|
||||||
|
func NewChain(c ...JobWrapper) Chain {
|
||||||
|
return Chain{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then decorates the given job with all JobWrappers in the chain.
|
||||||
|
//
|
||||||
|
// This:
|
||||||
|
// NewChain(m1, m2, m3).Then(job)
|
||||||
|
// is equivalent to:
|
||||||
|
// m1(m2(m3(job)))
|
||||||
|
func (c Chain) Then(j Job) Job {
|
||||||
|
for i := range c.wrappers {
|
||||||
|
j = c.wrappers[len(c.wrappers)-i-1](j)
|
||||||
|
}
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recover panics in wrapped jobs and log them with the provided logger.
|
||||||
|
func Recover(logger Logger) JobWrapper {
|
||||||
|
return func(j Job) Job {
|
||||||
|
return FuncJob(func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
const size = 64 << 10
|
||||||
|
buf := make([]byte, size)
|
||||||
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
err, ok := r.(error)
|
||||||
|
if !ok {
|
||||||
|
err = fmt.Errorf("%v", r)
|
||||||
|
}
|
||||||
|
logger.Error(err, "panic", "stack", "...\n"+string(buf))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
j.Run()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
|
||||||
|
// previous one is complete. Jobs running after a delay of more than a minute
|
||||||
|
// have the delay logged at Info.
|
||||||
|
func DelayIfStillRunning(logger Logger) JobWrapper {
|
||||||
|
return func(j Job) Job {
|
||||||
|
var mu sync.Mutex
|
||||||
|
return FuncJob(func() {
|
||||||
|
start := time.Now()
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if dur := time.Since(start); dur > time.Minute {
|
||||||
|
logger.Info("delay", "duration", dur)
|
||||||
|
}
|
||||||
|
j.Run()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
|
||||||
|
// still running. It logs skips to the given logger at Info level.
|
||||||
|
func SkipIfStillRunning(logger Logger) JobWrapper {
|
||||||
|
return func(j Job) Job {
|
||||||
|
var ch = make(chan struct{}, 1)
|
||||||
|
ch <- struct{}{}
|
||||||
|
return FuncJob(func() {
|
||||||
|
select {
|
||||||
|
case v := <-ch:
|
||||||
|
defer func() { ch <- v }()
|
||||||
|
j.Run()
|
||||||
|
default:
|
||||||
|
logger.Info("skip")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
242
vendor/github.com/robfig/cron/v3/chain_test.go
generated
vendored
Normal file
242
vendor/github.com/robfig/cron/v3/chain_test.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func appendingJob(slice *[]int, value int) Job {
|
||||||
|
var m sync.Mutex
|
||||||
|
return FuncJob(func() {
|
||||||
|
m.Lock()
|
||||||
|
*slice = append(*slice, value)
|
||||||
|
m.Unlock()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendingWrapper(slice *[]int, value int) JobWrapper {
|
||||||
|
return func(j Job) Job {
|
||||||
|
return FuncJob(func() {
|
||||||
|
appendingJob(slice, value).Run()
|
||||||
|
j.Run()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChain(t *testing.T) {
|
||||||
|
var nums []int
|
||||||
|
var (
|
||||||
|
append1 = appendingWrapper(&nums, 1)
|
||||||
|
append2 = appendingWrapper(&nums, 2)
|
||||||
|
append3 = appendingWrapper(&nums, 3)
|
||||||
|
append4 = appendingJob(&nums, 4)
|
||||||
|
)
|
||||||
|
NewChain(append1, append2, append3).Then(append4).Run()
|
||||||
|
if !reflect.DeepEqual(nums, []int{1, 2, 3, 4}) {
|
||||||
|
t.Error("unexpected order of calls:", nums)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChainRecover(t *testing.T) {
|
||||||
|
panickingJob := FuncJob(func() {
|
||||||
|
panic("panickingJob panics")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("panic exits job by default", func(t *testing.T) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err == nil {
|
||||||
|
t.Errorf("panic expected, but none received")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
NewChain().Then(panickingJob).
|
||||||
|
Run()
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Recovering JobWrapper recovers", func(t *testing.T) {
|
||||||
|
NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))).
|
||||||
|
Then(panickingJob).
|
||||||
|
Run()
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("composed with the *IfStillRunning wrappers", func(t *testing.T) {
|
||||||
|
NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))).
|
||||||
|
Then(panickingJob).
|
||||||
|
Run()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type countJob struct {
|
||||||
|
m sync.Mutex
|
||||||
|
started int
|
||||||
|
done int
|
||||||
|
delay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *countJob) Run() {
|
||||||
|
j.m.Lock()
|
||||||
|
j.started++
|
||||||
|
j.m.Unlock()
|
||||||
|
time.Sleep(j.delay)
|
||||||
|
j.m.Lock()
|
||||||
|
j.done++
|
||||||
|
j.m.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *countJob) Started() int {
|
||||||
|
defer j.m.Unlock()
|
||||||
|
j.m.Lock()
|
||||||
|
return j.started
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *countJob) Done() int {
|
||||||
|
defer j.m.Unlock()
|
||||||
|
j.m.Lock()
|
||||||
|
return j.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChainDelayIfStillRunning(t *testing.T) {
|
||||||
|
|
||||||
|
t.Run("runs immediately", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete.
|
||||||
|
if c := j.Done(); c != 1 {
|
||||||
|
t.Errorf("expected job run once, immediately, got %d", c)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("second run immediate if first done", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go func() {
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
}()
|
||||||
|
time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete.
|
||||||
|
if c := j.Done(); c != 2 {
|
||||||
|
t.Errorf("expected job run twice, immediately, got %d", c)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("second run delayed if first not done", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
j.delay = 10 * time.Millisecond
|
||||||
|
wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go func() {
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// After 5ms, the first job is still in progress, and the second job was
|
||||||
|
// run but should be waiting for it to finish.
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
started, done := j.Started(), j.Done()
|
||||||
|
if started != 1 || done != 0 {
|
||||||
|
t.Error("expected first job started, but not finished, got", started, done)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the second job completes.
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
started, done = j.Started(), j.Done()
|
||||||
|
if started != 2 || done != 2 {
|
||||||
|
t.Error("expected both jobs done, got", started, done)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChainSkipIfStillRunning(t *testing.T) {
|
||||||
|
|
||||||
|
t.Run("runs immediately", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete.
|
||||||
|
if c := j.Done(); c != 1 {
|
||||||
|
t.Errorf("expected job run once, immediately, got %d", c)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("second run immediate if first done", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go func() {
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
}()
|
||||||
|
time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete.
|
||||||
|
if c := j.Done(); c != 2 {
|
||||||
|
t.Errorf("expected job run twice, immediately, got %d", c)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("second run skipped if first not done", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
j.delay = 10 * time.Millisecond
|
||||||
|
wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
go func() {
|
||||||
|
go wrappedJob.Run()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
go wrappedJob.Run()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// After 5ms, the first job is still in progress, and the second job was
|
||||||
|
// aleady skipped.
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
started, done := j.Started(), j.Done()
|
||||||
|
if started != 1 || done != 0 {
|
||||||
|
t.Error("expected first job started, but not finished, got", started, done)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the first job completes and second does not run.
|
||||||
|
time.Sleep(25 * time.Millisecond)
|
||||||
|
started, done = j.Started(), j.Done()
|
||||||
|
if started != 1 || done != 1 {
|
||||||
|
t.Error("expected second job skipped, got", started, done)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skip 10 jobs on rapid fire", func(t *testing.T) {
|
||||||
|
var j countJob
|
||||||
|
j.delay = 10 * time.Millisecond
|
||||||
|
wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j)
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
go wrappedJob.Run()
|
||||||
|
}
|
||||||
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
done := j.Done()
|
||||||
|
if done != 1 {
|
||||||
|
t.Error("expected 1 jobs executed, 10 jobs dropped, got", done)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("different jobs independent", func(t *testing.T) {
|
||||||
|
var j1, j2 countJob
|
||||||
|
j1.delay = 10 * time.Millisecond
|
||||||
|
j2.delay = 10 * time.Millisecond
|
||||||
|
chain := NewChain(SkipIfStillRunning(DiscardLogger))
|
||||||
|
wrappedJob1 := chain.Then(&j1)
|
||||||
|
wrappedJob2 := chain.Then(&j2)
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
go wrappedJob1.Run()
|
||||||
|
go wrappedJob2.Run()
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
var (
|
||||||
|
done1 = j1.Done()
|
||||||
|
done2 = j2.Done()
|
||||||
|
)
|
||||||
|
if done1 != 1 || done2 != 1 {
|
||||||
|
t.Error("expected both jobs executed once, got", done1, "and", done2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
Normal file
27
vendor/github.com/robfig/cron/v3/constantdelay.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
|
||||||
|
// It does not support jobs more frequent than once a second.
|
||||||
|
type ConstantDelaySchedule struct {
|
||||||
|
Delay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every returns a crontab Schedule that activates once every duration.
|
||||||
|
// Delays of less than a second are not supported (will round up to 1 second).
|
||||||
|
// Any fields less than a Second are truncated.
|
||||||
|
func Every(duration time.Duration) ConstantDelaySchedule {
|
||||||
|
if duration < time.Second {
|
||||||
|
duration = time.Second
|
||||||
|
}
|
||||||
|
return ConstantDelaySchedule{
|
||||||
|
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next time this should be run.
|
||||||
|
// This rounds so that the next activation time will be on the second.
|
||||||
|
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
|
||||||
|
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||||
|
}
|
54
vendor/github.com/robfig/cron/v3/constantdelay_test.go
generated
vendored
Normal file
54
vendor/github.com/robfig/cron/v3/constantdelay_test.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConstantDelayNext(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
time string
|
||||||
|
delay time.Duration
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// Simple cases
|
||||||
|
{"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"},
|
||||||
|
{"Mon Jul 9 14:59 2012", 15 * time.Minute, "Mon Jul 9 15:14 2012"},
|
||||||
|
{"Mon Jul 9 14:59:59 2012", 15 * time.Minute, "Mon Jul 9 15:14:59 2012"},
|
||||||
|
|
||||||
|
// Wrap around hours
|
||||||
|
{"Mon Jul 9 15:45 2012", 35 * time.Minute, "Mon Jul 9 16:20 2012"},
|
||||||
|
|
||||||
|
// Wrap around days
|
||||||
|
{"Mon Jul 9 23:46 2012", 14 * time.Minute, "Tue Jul 10 00:00 2012"},
|
||||||
|
{"Mon Jul 9 23:45 2012", 35 * time.Minute, "Tue Jul 10 00:20 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", 44*time.Minute + 24*time.Second, "Tue Jul 10 00:20:15 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", 25*time.Hour + 44*time.Minute + 24*time.Second, "Thu Jul 11 01:20:15 2012"},
|
||||||
|
|
||||||
|
// Wrap around months
|
||||||
|
{"Mon Jul 9 23:35 2012", 91*24*time.Hour + 25*time.Minute, "Thu Oct 9 00:00 2012"},
|
||||||
|
|
||||||
|
// Wrap around minute, hour, day, month, and year
|
||||||
|
{"Mon Dec 31 23:59:45 2012", 15 * time.Second, "Tue Jan 1 00:00:00 2013"},
|
||||||
|
|
||||||
|
// Round to nearest second on the delay
|
||||||
|
{"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"},
|
||||||
|
|
||||||
|
// Round up to 1 second if the duration is less.
|
||||||
|
{"Mon Jul 9 14:45:00 2012", 15 * time.Millisecond, "Mon Jul 9 14:45:01 2012"},
|
||||||
|
|
||||||
|
// Round to nearest second when calculating the next time.
|
||||||
|
{"Mon Jul 9 14:45:00.005 2012", 15 * time.Minute, "Mon Jul 9 15:00 2012"},
|
||||||
|
|
||||||
|
// Round to nearest second for both.
|
||||||
|
{"Mon Jul 9 14:45:00.005 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range tests {
|
||||||
|
actual := Every(c.delay).Next(getTime(c.time))
|
||||||
|
expected := getTime(c.expected)
|
||||||
|
if actual != expected {
|
||||||
|
t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.delay, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
Normal file
355
vendor/github.com/robfig/cron/v3/cron.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cron keeps track of any number of entries, invoking the associated func as
|
||||||
|
// specified by the schedule. It may be started, stopped, and the entries may
|
||||||
|
// be inspected while running.
|
||||||
|
type Cron struct {
|
||||||
|
entries []*Entry
|
||||||
|
chain Chain
|
||||||
|
stop chan struct{}
|
||||||
|
add chan *Entry
|
||||||
|
remove chan EntryID
|
||||||
|
snapshot chan chan []Entry
|
||||||
|
running bool
|
||||||
|
logger Logger
|
||||||
|
runningMu sync.Mutex
|
||||||
|
location *time.Location
|
||||||
|
parser ScheduleParser
|
||||||
|
nextID EntryID
|
||||||
|
jobWaiter sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduleParser is an interface for schedule spec parsers that return a Schedule
|
||||||
|
type ScheduleParser interface {
|
||||||
|
Parse(spec string) (Schedule, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Job is an interface for submitted cron jobs.
|
||||||
|
type Job interface {
|
||||||
|
Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedule describes a job's duty cycle.
|
||||||
|
type Schedule interface {
|
||||||
|
// Next returns the next activation time, later than the given time.
|
||||||
|
// Next is invoked initially, and then each time the job is run.
|
||||||
|
Next(time.Time) time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// EntryID identifies an entry within a Cron instance
|
||||||
|
type EntryID int
|
||||||
|
|
||||||
|
// Entry consists of a schedule and the func to execute on that schedule.
|
||||||
|
type Entry struct {
|
||||||
|
// ID is the cron-assigned ID of this entry, which may be used to look up a
|
||||||
|
// snapshot or remove it.
|
||||||
|
ID EntryID
|
||||||
|
|
||||||
|
// Schedule on which this job should be run.
|
||||||
|
Schedule Schedule
|
||||||
|
|
||||||
|
// Next time the job will run, or the zero time if Cron has not been
|
||||||
|
// started or this entry's schedule is unsatisfiable
|
||||||
|
Next time.Time
|
||||||
|
|
||||||
|
// Prev is the last time this job was run, or the zero time if never.
|
||||||
|
Prev time.Time
|
||||||
|
|
||||||
|
// WrappedJob is the thing to run when the Schedule is activated.
|
||||||
|
WrappedJob Job
|
||||||
|
|
||||||
|
// Job is the thing that was submitted to cron.
|
||||||
|
// It is kept around so that user code that needs to get at the job later,
|
||||||
|
// e.g. via Entries() can do so.
|
||||||
|
Job Job
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid returns true if this is not the zero entry.
|
||||||
|
func (e Entry) Valid() bool { return e.ID != 0 }
|
||||||
|
|
||||||
|
// byTime is a wrapper for sorting the entry array by time
|
||||||
|
// (with zero time at the end).
|
||||||
|
type byTime []*Entry
|
||||||
|
|
||||||
|
func (s byTime) Len() int { return len(s) }
|
||||||
|
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s byTime) Less(i, j int) bool {
|
||||||
|
// Two zero times should return false.
|
||||||
|
// Otherwise, zero is "greater" than any other time.
|
||||||
|
// (To sort it at the end of the list.)
|
||||||
|
if s[i].Next.IsZero() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if s[j].Next.IsZero() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return s[i].Next.Before(s[j].Next)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Cron job runner, modified by the given options.
|
||||||
|
//
|
||||||
|
// Available Settings
|
||||||
|
//
|
||||||
|
// Time Zone
|
||||||
|
// Description: The time zone in which schedules are interpreted
|
||||||
|
// Default: time.Local
|
||||||
|
//
|
||||||
|
// Parser
|
||||||
|
// Description: Parser converts cron spec strings into cron.Schedules.
|
||||||
|
// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
|
||||||
|
//
|
||||||
|
// Chain
|
||||||
|
// Description: Wrap submitted jobs to customize behavior.
|
||||||
|
// Default: A chain that recovers panics and logs them to stderr.
|
||||||
|
//
|
||||||
|
// See "cron.With*" to modify the default behavior.
|
||||||
|
func New(opts ...Option) *Cron {
|
||||||
|
c := &Cron{
|
||||||
|
entries: nil,
|
||||||
|
chain: NewChain(),
|
||||||
|
add: make(chan *Entry),
|
||||||
|
stop: make(chan struct{}),
|
||||||
|
snapshot: make(chan chan []Entry),
|
||||||
|
remove: make(chan EntryID),
|
||||||
|
running: false,
|
||||||
|
runningMu: sync.Mutex{},
|
||||||
|
logger: DefaultLogger,
|
||||||
|
location: time.Local,
|
||||||
|
parser: standardParser,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(c)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// FuncJob is a wrapper that turns a func() into a cron.Job
|
||||||
|
type FuncJob func()
|
||||||
|
|
||||||
|
func (f FuncJob) Run() { f() }
|
||||||
|
|
||||||
|
// AddFunc adds a func to the Cron to be run on the given schedule.
|
||||||
|
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||||
|
// An opaque ID is returned that can be used to later remove it.
|
||||||
|
func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
|
||||||
|
return c.AddJob(spec, FuncJob(cmd))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddJob adds a Job to the Cron to be run on the given schedule.
|
||||||
|
// The spec is parsed using the time zone of this Cron instance as the default.
|
||||||
|
// An opaque ID is returned that can be used to later remove it.
|
||||||
|
func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
|
||||||
|
schedule, err := c.parser.Parse(spec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return c.Schedule(schedule, cmd), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedule adds a Job to the Cron to be run on the given schedule.
|
||||||
|
// The job is wrapped with the configured Chain.
|
||||||
|
func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
defer c.runningMu.Unlock()
|
||||||
|
c.nextID++
|
||||||
|
entry := &Entry{
|
||||||
|
ID: c.nextID,
|
||||||
|
Schedule: schedule,
|
||||||
|
WrappedJob: c.chain.Then(cmd),
|
||||||
|
Job: cmd,
|
||||||
|
}
|
||||||
|
if !c.running {
|
||||||
|
c.entries = append(c.entries, entry)
|
||||||
|
} else {
|
||||||
|
c.add <- entry
|
||||||
|
}
|
||||||
|
return entry.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entries returns a snapshot of the cron entries.
|
||||||
|
func (c *Cron) Entries() []Entry {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
defer c.runningMu.Unlock()
|
||||||
|
if c.running {
|
||||||
|
replyChan := make(chan []Entry, 1)
|
||||||
|
c.snapshot <- replyChan
|
||||||
|
return <-replyChan
|
||||||
|
}
|
||||||
|
return c.entrySnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Location gets the time zone location
|
||||||
|
func (c *Cron) Location() *time.Location {
|
||||||
|
return c.location
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
|
||||||
|
func (c *Cron) Entry(id EntryID) Entry {
|
||||||
|
for _, entry := range c.Entries() {
|
||||||
|
if id == entry.ID {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Entry{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an entry from being run in the future.
|
||||||
|
func (c *Cron) Remove(id EntryID) {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
defer c.runningMu.Unlock()
|
||||||
|
if c.running {
|
||||||
|
c.remove <- id
|
||||||
|
} else {
|
||||||
|
c.removeEntry(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the cron scheduler in its own goroutine, or no-op if already started.
|
||||||
|
func (c *Cron) Start() {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
defer c.runningMu.Unlock()
|
||||||
|
if c.running {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.running = true
|
||||||
|
go c.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the cron scheduler, or no-op if already running.
|
||||||
|
func (c *Cron) Run() {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
if c.running {
|
||||||
|
c.runningMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.running = true
|
||||||
|
c.runningMu.Unlock()
|
||||||
|
c.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the scheduler.. this is private just due to the need to synchronize
|
||||||
|
// access to the 'running' state variable.
|
||||||
|
func (c *Cron) run() {
|
||||||
|
c.logger.Info("start")
|
||||||
|
|
||||||
|
// Figure out the next activation times for each entry.
|
||||||
|
now := c.now()
|
||||||
|
for _, entry := range c.entries {
|
||||||
|
entry.Next = entry.Schedule.Next(now)
|
||||||
|
c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Determine the next entry to run.
|
||||||
|
sort.Sort(byTime(c.entries))
|
||||||
|
|
||||||
|
var timer *time.Timer
|
||||||
|
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
|
||||||
|
// If there are no entries yet, just sleep - it still handles new entries
|
||||||
|
// and stop requests.
|
||||||
|
timer = time.NewTimer(100000 * time.Hour)
|
||||||
|
} else {
|
||||||
|
timer = time.NewTimer(c.entries[0].Next.Sub(now))
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case now = <-timer.C:
|
||||||
|
now = now.In(c.location)
|
||||||
|
c.logger.Info("wake", "now", now)
|
||||||
|
|
||||||
|
// Run every entry whose next time was less than now
|
||||||
|
for _, e := range c.entries {
|
||||||
|
if e.Next.After(now) || e.Next.IsZero() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.startJob(e.WrappedJob)
|
||||||
|
e.Prev = e.Next
|
||||||
|
e.Next = e.Schedule.Next(now)
|
||||||
|
c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
|
||||||
|
}
|
||||||
|
|
||||||
|
case newEntry := <-c.add:
|
||||||
|
timer.Stop()
|
||||||
|
now = c.now()
|
||||||
|
newEntry.Next = newEntry.Schedule.Next(now)
|
||||||
|
c.entries = append(c.entries, newEntry)
|
||||||
|
c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
|
||||||
|
|
||||||
|
case replyChan := <-c.snapshot:
|
||||||
|
replyChan <- c.entrySnapshot()
|
||||||
|
continue
|
||||||
|
|
||||||
|
case <-c.stop:
|
||||||
|
timer.Stop()
|
||||||
|
c.logger.Info("stop")
|
||||||
|
return
|
||||||
|
|
||||||
|
case id := <-c.remove:
|
||||||
|
timer.Stop()
|
||||||
|
now = c.now()
|
||||||
|
c.removeEntry(id)
|
||||||
|
c.logger.Info("removed", "entry", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startJob runs the given job in a new goroutine.
|
||||||
|
func (c *Cron) startJob(j Job) {
|
||||||
|
c.jobWaiter.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer c.jobWaiter.Done()
|
||||||
|
j.Run()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// now returns current time in c location
|
||||||
|
func (c *Cron) now() time.Time {
|
||||||
|
return time.Now().In(c.location)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
|
||||||
|
// A context is returned so the caller can wait for running jobs to complete.
|
||||||
|
func (c *Cron) Stop() context.Context {
|
||||||
|
c.runningMu.Lock()
|
||||||
|
defer c.runningMu.Unlock()
|
||||||
|
if c.running {
|
||||||
|
c.stop <- struct{}{}
|
||||||
|
c.running = false
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
c.jobWaiter.Wait()
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// entrySnapshot returns a copy of the current cron entry list.
|
||||||
|
func (c *Cron) entrySnapshot() []Entry {
|
||||||
|
var entries = make([]Entry, len(c.entries))
|
||||||
|
for i, e := range c.entries {
|
||||||
|
entries[i] = *e
|
||||||
|
}
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cron) removeEntry(id EntryID) {
|
||||||
|
var entries []*Entry
|
||||||
|
for _, e := range c.entries {
|
||||||
|
if e.ID != id {
|
||||||
|
entries = append(entries, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.entries = entries
|
||||||
|
}
|
702
vendor/github.com/robfig/cron/v3/cron_test.go
generated
vendored
Normal file
702
vendor/github.com/robfig/cron/v3/cron_test.go
generated
vendored
Normal file
@ -0,0 +1,702 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Many tests schedule a job for every second, and then wait at most a second
|
||||||
|
// for it to run. This amount is just slightly larger than 1 second to
|
||||||
|
// compensate for a few milliseconds of runtime.
|
||||||
|
const OneSecond = 1*time.Second + 50*time.Millisecond
|
||||||
|
|
||||||
|
type syncWriter struct {
|
||||||
|
wr bytes.Buffer
|
||||||
|
m sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *syncWriter) Write(data []byte) (n int, err error) {
|
||||||
|
sw.m.Lock()
|
||||||
|
n, err = sw.wr.Write(data)
|
||||||
|
sw.m.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sw *syncWriter) String() string {
|
||||||
|
sw.m.Lock()
|
||||||
|
defer sw.m.Unlock()
|
||||||
|
return sw.wr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufLogger(sw *syncWriter) Logger {
|
||||||
|
return PrintfLogger(log.New(sw, "", log.LstdFlags))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFuncPanicRecovery(t *testing.T) {
|
||||||
|
var buf syncWriter
|
||||||
|
cron := New(WithParser(secondParser),
|
||||||
|
WithChain(Recover(newBufLogger(&buf))))
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
cron.AddFunc("* * * * * ?", func() {
|
||||||
|
panic("YOLO")
|
||||||
|
})
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
if !strings.Contains(buf.String(), "YOLO") {
|
||||||
|
t.Error("expected a panic to be logged, got none")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DummyJob struct{}
|
||||||
|
|
||||||
|
func (d DummyJob) Run() {
|
||||||
|
panic("YOLO")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJobPanicRecovery(t *testing.T) {
|
||||||
|
var job DummyJob
|
||||||
|
|
||||||
|
var buf syncWriter
|
||||||
|
cron := New(WithParser(secondParser),
|
||||||
|
WithChain(Recover(newBufLogger(&buf))))
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
cron.AddJob("* * * * * ?", job)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
if !strings.Contains(buf.String(), "YOLO") {
|
||||||
|
t.Error("expected a panic to be logged, got none")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start and stop cron with no entries.
|
||||||
|
func TestNoEntries(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Fatal("expected cron will be stopped immediately")
|
||||||
|
case <-stop(cron):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start, stop, then add an entry. Verify entry doesn't run.
|
||||||
|
func TestStopCausesJobsToNotRun(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
cron.Stop()
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
// No job ran!
|
||||||
|
case <-wait(wg):
|
||||||
|
t.Fatal("expected stopped cron does not run any job")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a job, start cron, expect it runs.
|
||||||
|
func TestAddBeforeRunning(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
// Give cron 2 seconds to run our job (which is always activated).
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Fatal("expected job runs")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start cron, add a job, expect it runs.
|
||||||
|
func TestAddWhileRunning(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Fatal("expected job runs")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test for #34. Adding a job after calling start results in multiple job invocations
|
||||||
|
func TestAddWhileRunningWithDelay(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
var calls int64
|
||||||
|
cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) })
|
||||||
|
|
||||||
|
<-time.After(OneSecond)
|
||||||
|
if atomic.LoadInt64(&calls) != 1 {
|
||||||
|
t.Errorf("called %d times, expected 1\n", calls)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a job, remove a job, start cron, expect nothing runs.
|
||||||
|
func TestRemoveBeforeRunning(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
cron.Remove(id)
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
// Success, shouldn't run
|
||||||
|
case <-wait(wg):
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start cron, add a job, remove it, expect it doesn't run.
|
||||||
|
func TestRemoveWhileRunning(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
cron.Remove(id)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
case <-wait(wg):
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test timing with Entries.
|
||||||
|
func TestSnapshotEntries(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := New()
|
||||||
|
cron.AddFunc("@every 2s", func() { wg.Done() })
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
// Cron should fire in 2 seconds. After 1 second, call Entries.
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
cron.Entries()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Even though Entries was called, the cron should fire at the 2 second mark.
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Error("expected job runs at 2 second mark")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the entries are correctly sorted.
|
||||||
|
// Add a bunch of long-in-the-future entries, and an immediate entry, and ensure
|
||||||
|
// that the immediate entry runs immediately.
|
||||||
|
// Also: Test that multiple jobs run in the same instant.
|
||||||
|
func TestMultipleEntries(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("0 0 0 1 1 ?", func() {})
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
id1, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() })
|
||||||
|
id2, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() })
|
||||||
|
cron.AddFunc("0 0 0 31 12 ?", func() {})
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
|
||||||
|
cron.Remove(id1)
|
||||||
|
cron.Start()
|
||||||
|
cron.Remove(id2)
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Error("expected job run in proper order")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test running the same job twice.
|
||||||
|
func TestRunningJobTwice(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("0 0 0 1 1 ?", func() {})
|
||||||
|
cron.AddFunc("0 0 0 31 12 ?", func() {})
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(2 * OneSecond):
|
||||||
|
t.Error("expected job fires 2 times")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunningMultipleSchedules(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("0 0 0 1 1 ?", func() {})
|
||||||
|
cron.AddFunc("0 0 0 31 12 ?", func() {})
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
cron.Schedule(Every(time.Minute), FuncJob(func() {}))
|
||||||
|
cron.Schedule(Every(time.Second), FuncJob(func() { wg.Done() }))
|
||||||
|
cron.Schedule(Every(time.Hour), FuncJob(func() {}))
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(2 * OneSecond):
|
||||||
|
t.Error("expected job fires 2 times")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the cron is run in the local time zone (as opposed to UTC).
|
||||||
|
func TestLocalTimezone(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
// FIX: Issue #205
|
||||||
|
// This calculation doesn't work in seconds 58 or 59.
|
||||||
|
// Take the easy way out and sleep.
|
||||||
|
if now.Second() >= 58 {
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
now = time.Now()
|
||||||
|
}
|
||||||
|
spec := fmt.Sprintf("%d,%d %d %d %d %d ?",
|
||||||
|
now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month())
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc(spec, func() { wg.Done() })
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond * 2):
|
||||||
|
t.Error("expected job fires 2 times")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the cron is run in the given time zone (as opposed to local).
|
||||||
|
func TestNonLocalTimezone(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
loc, err := time.LoadLocation("Atlantic/Cape_Verde")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Failed to load time zone Atlantic/Cape_Verde: %+v", err)
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().In(loc)
|
||||||
|
// FIX: Issue #205
|
||||||
|
// This calculation doesn't work in seconds 58 or 59.
|
||||||
|
// Take the easy way out and sleep.
|
||||||
|
if now.Second() >= 58 {
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
now = time.Now().In(loc)
|
||||||
|
}
|
||||||
|
spec := fmt.Sprintf("%d,%d %d %d %d %d ?",
|
||||||
|
now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month())
|
||||||
|
|
||||||
|
cron := New(WithLocation(loc), WithParser(secondParser))
|
||||||
|
cron.AddFunc(spec, func() { wg.Done() })
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond * 2):
|
||||||
|
t.Error("expected job fires 2 times")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that calling stop before start silently returns without
|
||||||
|
// blocking the stop channel.
|
||||||
|
func TestStopWithoutStart(t *testing.T) {
|
||||||
|
cron := New()
|
||||||
|
cron.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
type testJob struct {
|
||||||
|
wg *sync.WaitGroup
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t testJob) Run() {
|
||||||
|
t.wg.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that adding an invalid job spec returns an error
|
||||||
|
func TestInvalidJobSpec(t *testing.T) {
|
||||||
|
cron := New()
|
||||||
|
_, err := cron.AddJob("this will not parse", nil)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected an error with invalid spec, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test blocking run method behaves as Start()
|
||||||
|
func TestBlockingRun(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * ?", func() { wg.Done() })
|
||||||
|
|
||||||
|
var unblockChan = make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
cron.Run()
|
||||||
|
close(unblockChan)
|
||||||
|
}()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.Error("expected job fires")
|
||||||
|
case <-unblockChan:
|
||||||
|
t.Error("expected that Run() blocks")
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that double-running is a no-op
|
||||||
|
func TestStartNoop(t *testing.T) {
|
||||||
|
var tickChan = make(chan struct{}, 2)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * ?", func() {
|
||||||
|
tickChan <- struct{}{}
|
||||||
|
})
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
// Wait for the first firing to ensure the runner is going
|
||||||
|
<-tickChan
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
|
||||||
|
<-tickChan
|
||||||
|
|
||||||
|
// Fail if this job fires again in a short period, indicating a double-run
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
case <-tickChan:
|
||||||
|
t.Error("expected job fires exactly twice")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple test using Runnables.
|
||||||
|
func TestJob(t *testing.T) {
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddJob("0 0 0 30 Feb ?", testJob{wg, "job0"})
|
||||||
|
cron.AddJob("0 0 0 1 1 ?", testJob{wg, "job1"})
|
||||||
|
job2, _ := cron.AddJob("* * * * * ?", testJob{wg, "job2"})
|
||||||
|
cron.AddJob("1 0 0 1 1 ?", testJob{wg, "job3"})
|
||||||
|
cron.Schedule(Every(5*time.Second+5*time.Nanosecond), testJob{wg, "job4"})
|
||||||
|
job5 := cron.Schedule(Every(5*time.Minute), testJob{wg, "job5"})
|
||||||
|
|
||||||
|
// Test getting an Entry pre-Start.
|
||||||
|
if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" {
|
||||||
|
t.Error("wrong job retrieved:", actualName)
|
||||||
|
}
|
||||||
|
if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" {
|
||||||
|
t.Error("wrong job retrieved:", actualName)
|
||||||
|
}
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(OneSecond):
|
||||||
|
t.FailNow()
|
||||||
|
case <-wait(wg):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the entries are in the right order.
|
||||||
|
expecteds := []string{"job2", "job4", "job5", "job1", "job3", "job0"}
|
||||||
|
|
||||||
|
var actuals []string
|
||||||
|
for _, entry := range cron.Entries() {
|
||||||
|
actuals = append(actuals, entry.Job.(testJob).name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, expected := range expecteds {
|
||||||
|
if actuals[i] != expected {
|
||||||
|
t.Fatalf("Jobs not in the right order. (expected) %s != %s (actual)", expecteds, actuals)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test getting Entries.
|
||||||
|
if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" {
|
||||||
|
t.Error("wrong job retrieved:", actualName)
|
||||||
|
}
|
||||||
|
if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" {
|
||||||
|
t.Error("wrong job retrieved:", actualName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Issue #206
|
||||||
|
// Ensure that the next run of a job after removing an entry is accurate.
|
||||||
|
func TestScheduleAfterRemoval(t *testing.T) {
|
||||||
|
var wg1 sync.WaitGroup
|
||||||
|
var wg2 sync.WaitGroup
|
||||||
|
wg1.Add(1)
|
||||||
|
wg2.Add(1)
|
||||||
|
|
||||||
|
// The first time this job is run, set a timer and remove the other job
|
||||||
|
// 750ms later. Correct behavior would be to still run the job again in
|
||||||
|
// 250ms, but the bug would cause it to run instead 1s later.
|
||||||
|
|
||||||
|
var calls int
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
cron := newWithSeconds()
|
||||||
|
hourJob := cron.Schedule(Every(time.Hour), FuncJob(func() {}))
|
||||||
|
cron.Schedule(Every(time.Second), FuncJob(func() {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
switch calls {
|
||||||
|
case 0:
|
||||||
|
wg1.Done()
|
||||||
|
calls++
|
||||||
|
case 1:
|
||||||
|
time.Sleep(750 * time.Millisecond)
|
||||||
|
cron.Remove(hourJob)
|
||||||
|
calls++
|
||||||
|
case 2:
|
||||||
|
calls++
|
||||||
|
wg2.Done()
|
||||||
|
case 3:
|
||||||
|
panic("unexpected 3rd call")
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
|
||||||
|
// the first run might be any length of time 0 - 1s, since the schedule
|
||||||
|
// rounds to the second. wait for the first run to true up.
|
||||||
|
wg1.Wait()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(2 * OneSecond):
|
||||||
|
t.Error("expected job fires 2 times")
|
||||||
|
case <-wait(&wg2):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ZeroSchedule struct{}
|
||||||
|
|
||||||
|
func (*ZeroSchedule) Next(time.Time) time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that job without time does not run
|
||||||
|
func TestJobWithZeroTimeDoesNotRun(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
var calls int64
|
||||||
|
cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) })
|
||||||
|
cron.Schedule(new(ZeroSchedule), FuncJob(func() { t.Error("expected zero task will not run") }))
|
||||||
|
cron.Start()
|
||||||
|
defer cron.Stop()
|
||||||
|
<-time.After(OneSecond)
|
||||||
|
if atomic.LoadInt64(&calls) != 1 {
|
||||||
|
t.Errorf("called %d times, expected 1\n", calls)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStopAndWait(t *testing.T) {
|
||||||
|
t.Run("nothing running, returns immediately", func(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
ctx := cron.Stop()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
t.Error("context was not done immediately")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("repeated calls to Stop", func(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.Start()
|
||||||
|
_ = cron.Stop()
|
||||||
|
time.Sleep(time.Millisecond)
|
||||||
|
ctx := cron.Stop()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
t.Error("context was not done immediately")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("a couple fast jobs added, still returns immediately", func(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
cron.Start()
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
ctx := cron.Stop()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
t.Error("context was not done immediately")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("a couple fast jobs and a slow job added, waits for slow job", func(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
cron.Start()
|
||||||
|
cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) })
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
ctx := cron.Stop()
|
||||||
|
|
||||||
|
// Verify that it is not done for at least 750ms
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Error("context was done too quickly immediately")
|
||||||
|
case <-time.After(750 * time.Millisecond):
|
||||||
|
// expected, because the job sleeping for 1 second is still running
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that it IS done in the next 500ms (giving 250ms buffer)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// expected
|
||||||
|
case <-time.After(1500 * time.Millisecond):
|
||||||
|
t.Error("context not done after job should have completed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("repeated calls to stop, waiting for completion and after", func(t *testing.T) {
|
||||||
|
cron := newWithSeconds()
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) })
|
||||||
|
cron.Start()
|
||||||
|
cron.AddFunc("* * * * * *", func() {})
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
ctx := cron.Stop()
|
||||||
|
ctx2 := cron.Stop()
|
||||||
|
|
||||||
|
// Verify that it is not done for at least 1500ms
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
t.Error("context was done too quickly immediately")
|
||||||
|
case <-ctx2.Done():
|
||||||
|
t.Error("context2 was done too quickly immediately")
|
||||||
|
case <-time.After(1500 * time.Millisecond):
|
||||||
|
// expected, because the job sleeping for 2 seconds is still running
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that it IS done in the next 1s (giving 500ms buffer)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// expected
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
t.Error("context not done after job should have completed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that ctx2 is also done.
|
||||||
|
select {
|
||||||
|
case <-ctx2.Done():
|
||||||
|
// expected
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
t.Error("context2 not done even though context1 is")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that a new context retrieved from stop is immediately done.
|
||||||
|
ctx3 := cron.Stop()
|
||||||
|
select {
|
||||||
|
case <-ctx3.Done():
|
||||||
|
// expected
|
||||||
|
case <-time.After(time.Millisecond):
|
||||||
|
t.Error("context not done even when cron Stop is completed")
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiThreadedStartAndStop(t *testing.T) {
|
||||||
|
cron := New()
|
||||||
|
go cron.Run()
|
||||||
|
time.Sleep(2 * time.Millisecond)
|
||||||
|
cron.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func wait(wg *sync.WaitGroup) chan bool {
|
||||||
|
ch := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
ch <- true
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
func stop(cron *Cron) chan bool {
|
||||||
|
ch := make(chan bool)
|
||||||
|
go func() {
|
||||||
|
cron.Stop()
|
||||||
|
ch <- true
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// newWithSeconds returns a Cron with the seconds field enabled.
|
||||||
|
func newWithSeconds() *Cron {
|
||||||
|
return New(WithParser(secondParser), WithChain())
|
||||||
|
}
|
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
Normal file
231
vendor/github.com/robfig/cron/v3/doc.go
generated
vendored
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
/*
|
||||||
|
Package cron implements a cron spec parser and job runner.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
|
||||||
|
To download the specific tagged release, run:
|
||||||
|
|
||||||
|
go get github.com/robfig/cron/v3@v3.0.0
|
||||||
|
|
||||||
|
Import it in your program as:
|
||||||
|
|
||||||
|
import "github.com/robfig/cron/v3"
|
||||||
|
|
||||||
|
It requires Go 1.11 or later due to usage of Go Modules.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
|
||||||
|
Callers may register Funcs to be invoked on a given schedule. Cron will run
|
||||||
|
them in their own goroutines.
|
||||||
|
|
||||||
|
c := cron.New()
|
||||||
|
c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
|
||||||
|
c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
|
||||||
|
c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
|
||||||
|
c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
|
||||||
|
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
|
||||||
|
c.Start()
|
||||||
|
..
|
||||||
|
// Funcs are invoked in their own goroutine, asynchronously.
|
||||||
|
...
|
||||||
|
// Funcs may also be added to a running Cron
|
||||||
|
c.AddFunc("@daily", func() { fmt.Println("Every day") })
|
||||||
|
..
|
||||||
|
// Inspect the cron job entries' next and previous run times.
|
||||||
|
inspect(c.Entries())
|
||||||
|
..
|
||||||
|
c.Stop() // Stop the scheduler (does not stop any jobs already running).
|
||||||
|
|
||||||
|
CRON Expression Format
|
||||||
|
|
||||||
|
A cron expression represents a set of times, using 5 space-separated fields.
|
||||||
|
|
||||||
|
Field name | Mandatory? | Allowed values | Allowed special characters
|
||||||
|
---------- | ---------- | -------------- | --------------------------
|
||||||
|
Minutes | Yes | 0-59 | * / , -
|
||||||
|
Hours | Yes | 0-23 | * / , -
|
||||||
|
Day of month | Yes | 1-31 | * / , - ?
|
||||||
|
Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||||
|
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||||
|
|
||||||
|
Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
|
||||||
|
"sun" are equally accepted.
|
||||||
|
|
||||||
|
The specific interpretation of the format is based on the Cron Wikipedia page:
|
||||||
|
https://en.wikipedia.org/wiki/Cron
|
||||||
|
|
||||||
|
Alternative Formats
|
||||||
|
|
||||||
|
Alternative Cron expression formats support other fields like seconds. You can
|
||||||
|
implement that by creating a custom Parser as follows.
|
||||||
|
|
||||||
|
cron.New(
|
||||||
|
cron.WithParser(
|
||||||
|
cron.NewParser(
|
||||||
|
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
|
||||||
|
|
||||||
|
Since adding Seconds is the most common modification to the standard cron spec,
|
||||||
|
cron provides a builtin function to do that, which is equivalent to the custom
|
||||||
|
parser you saw earlier, except that its seconds field is REQUIRED:
|
||||||
|
|
||||||
|
cron.New(cron.WithSeconds())
|
||||||
|
|
||||||
|
That emulates Quartz, the most popular alternative Cron schedule format:
|
||||||
|
http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
|
||||||
|
|
||||||
|
Special Characters
|
||||||
|
|
||||||
|
Asterisk ( * )
|
||||||
|
|
||||||
|
The asterisk indicates that the cron expression will match for all values of the
|
||||||
|
field; e.g., using an asterisk in the 5th field (month) would indicate every
|
||||||
|
month.
|
||||||
|
|
||||||
|
Slash ( / )
|
||||||
|
|
||||||
|
Slashes are used to describe increments of ranges. For example 3-59/15 in the
|
||||||
|
1st field (minutes) would indicate the 3rd minute of the hour and every 15
|
||||||
|
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
|
||||||
|
that is, an increment over the largest possible range of the field. The form
|
||||||
|
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
|
||||||
|
increment until the end of that specific range. It does not wrap around.
|
||||||
|
|
||||||
|
Comma ( , )
|
||||||
|
|
||||||
|
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
|
||||||
|
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
|
||||||
|
|
||||||
|
Hyphen ( - )
|
||||||
|
|
||||||
|
Hyphens are used to define ranges. For example, 9-17 would indicate every
|
||||||
|
hour between 9am and 5pm inclusive.
|
||||||
|
|
||||||
|
Question mark ( ? )
|
||||||
|
|
||||||
|
Question mark may be used instead of '*' for leaving either day-of-month or
|
||||||
|
day-of-week blank.
|
||||||
|
|
||||||
|
Predefined schedules
|
||||||
|
|
||||||
|
You may use one of several pre-defined schedules in place of a cron expression.
|
||||||
|
|
||||||
|
Entry | Description | Equivalent To
|
||||||
|
----- | ----------- | -------------
|
||||||
|
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
|
||||||
|
@monthly | Run once a month, midnight, first of month | 0 0 1 * *
|
||||||
|
@weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
|
||||||
|
@daily (or @midnight) | Run once a day, midnight | 0 0 * * *
|
||||||
|
@hourly | Run once an hour, beginning of hour | 0 * * * *
|
||||||
|
|
||||||
|
Intervals
|
||||||
|
|
||||||
|
You may also schedule a job to execute at fixed intervals, starting at the time it's added
|
||||||
|
or cron is run. This is supported by formatting the cron spec like this:
|
||||||
|
|
||||||
|
@every <duration>
|
||||||
|
|
||||||
|
where "duration" is a string accepted by time.ParseDuration
|
||||||
|
(http://golang.org/pkg/time/#ParseDuration).
|
||||||
|
|
||||||
|
For example, "@every 1h30m10s" would indicate a schedule that activates after
|
||||||
|
1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||||
|
|
||||||
|
Note: The interval does not take the job runtime into account. For example,
|
||||||
|
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
|
||||||
|
it will have only 2 minutes of idle time between each run.
|
||||||
|
|
||||||
|
Time zones
|
||||||
|
|
||||||
|
By default, all interpretation and scheduling is done in the machine's local
|
||||||
|
time zone (time.Local). You can specify a different time zone on construction:
|
||||||
|
|
||||||
|
cron.New(
|
||||||
|
cron.WithLocation(time.UTC))
|
||||||
|
|
||||||
|
Individual cron schedules may also override the time zone they are to be
|
||||||
|
interpreted in by providing an additional space-separated field at the beginning
|
||||||
|
of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
# Runs at 6am in time.Local
|
||||||
|
cron.New().AddFunc("0 6 * * ?", ...)
|
||||||
|
|
||||||
|
# Runs at 6am in America/New_York
|
||||||
|
nyc, _ := time.LoadLocation("America/New_York")
|
||||||
|
c := cron.New(cron.WithLocation(nyc))
|
||||||
|
c.AddFunc("0 6 * * ?", ...)
|
||||||
|
|
||||||
|
# Runs at 6am in Asia/Tokyo
|
||||||
|
cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||||
|
|
||||||
|
# Runs at 6am in Asia/Tokyo
|
||||||
|
c := cron.New(cron.WithLocation(nyc))
|
||||||
|
c.SetLocation("America/New_York")
|
||||||
|
c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
|
||||||
|
|
||||||
|
The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
|
||||||
|
|
||||||
|
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
|
||||||
|
not be run!
|
||||||
|
|
||||||
|
Job Wrappers
|
||||||
|
|
||||||
|
A Cron runner may be configured with a chain of job wrappers to add
|
||||||
|
cross-cutting functionality to all submitted jobs. For example, they may be used
|
||||||
|
to achieve the following effects:
|
||||||
|
|
||||||
|
- Recover any panics from jobs (activated by default)
|
||||||
|
- Delay a job's execution if the previous run hasn't completed yet
|
||||||
|
- Skip a job's execution if the previous run hasn't completed yet
|
||||||
|
- Log each job's invocations
|
||||||
|
|
||||||
|
Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
|
||||||
|
|
||||||
|
cron.New(cron.WithChain(
|
||||||
|
cron.SkipIfStillRunning(logger),
|
||||||
|
))
|
||||||
|
|
||||||
|
Install wrappers for individual jobs by explicitly wrapping them:
|
||||||
|
|
||||||
|
job = cron.NewChain(
|
||||||
|
cron.SkipIfStillRunning(logger),
|
||||||
|
).Then(job)
|
||||||
|
|
||||||
|
Thread safety
|
||||||
|
|
||||||
|
Since the Cron service runs concurrently with the calling code, some amount of
|
||||||
|
care must be taken to ensure proper synchronization.
|
||||||
|
|
||||||
|
All cron methods are designed to be correctly synchronized as long as the caller
|
||||||
|
ensures that invocations have a clear happens-before ordering between them.
|
||||||
|
|
||||||
|
Logging
|
||||||
|
|
||||||
|
Cron defines a Logger interface that is a subset of the one defined in
|
||||||
|
github.com/go-logr/logr. It has two logging levels (Info and Error), and
|
||||||
|
parameters are key/value pairs. This makes it possible for cron logging to plug
|
||||||
|
into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
|
||||||
|
to wrap the standard library *log.Logger.
|
||||||
|
|
||||||
|
For additional insight into Cron operations, verbose logging may be activated
|
||||||
|
which will record job runs, scheduling decisions, and added or removed jobs.
|
||||||
|
Activate it with a one-off logger as follows:
|
||||||
|
|
||||||
|
cron.New(
|
||||||
|
cron.WithLogger(
|
||||||
|
cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
|
||||||
|
|
||||||
|
|
||||||
|
Implementation
|
||||||
|
|
||||||
|
Cron entries are stored in an array, sorted by their next activation time. Cron
|
||||||
|
sleeps until the next job is due to be run.
|
||||||
|
|
||||||
|
Upon waking:
|
||||||
|
- it runs each entry that is active on that second
|
||||||
|
- it calculates the next run times for the jobs that were run
|
||||||
|
- it re-sorts the array of entries by next activation time.
|
||||||
|
- it goes to sleep until the soonest job.
|
||||||
|
*/
|
||||||
|
package cron
|
3
vendor/github.com/robfig/cron/v3/go.mod
generated
vendored
Normal file
3
vendor/github.com/robfig/cron/v3/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module github.com/robfig/cron/v3
|
||||||
|
|
||||||
|
go 1.12
|
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
Normal file
86
vendor/github.com/robfig/cron/v3/logger.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultLogger is used by Cron if none is specified.
|
||||||
|
var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
|
||||||
|
|
||||||
|
// DiscardLogger can be used by callers to discard all log messages.
|
||||||
|
var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
|
||||||
|
|
||||||
|
// Logger is the interface used in this package for logging, so that any backend
|
||||||
|
// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
|
||||||
|
type Logger interface {
|
||||||
|
// Info logs routine messages about cron's operation.
|
||||||
|
Info(msg string, keysAndValues ...interface{})
|
||||||
|
// Error logs an error condition.
|
||||||
|
Error(err error, msg string, keysAndValues ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
|
||||||
|
// into an implementation of the Logger interface which logs errors only.
|
||||||
|
func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||||
|
return printfLogger{l, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
|
||||||
|
// "log") into an implementation of the Logger interface which logs everything.
|
||||||
|
func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
|
||||||
|
return printfLogger{l, true}
|
||||||
|
}
|
||||||
|
|
||||||
|
type printfLogger struct {
|
||||||
|
logger interface{ Printf(string, ...interface{}) }
|
||||||
|
logInfo bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||||
|
if pl.logInfo {
|
||||||
|
keysAndValues = formatTimes(keysAndValues)
|
||||||
|
pl.logger.Printf(
|
||||||
|
formatString(len(keysAndValues)),
|
||||||
|
append([]interface{}{msg}, keysAndValues...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||||
|
keysAndValues = formatTimes(keysAndValues)
|
||||||
|
pl.logger.Printf(
|
||||||
|
formatString(len(keysAndValues)+2),
|
||||||
|
append([]interface{}{msg, "error", err}, keysAndValues...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatString returns a logfmt-like format string for the number of
|
||||||
|
// key/values.
|
||||||
|
func formatString(numKeysAndValues int) string {
|
||||||
|
var sb strings.Builder
|
||||||
|
sb.WriteString("%s")
|
||||||
|
if numKeysAndValues > 0 {
|
||||||
|
sb.WriteString(", ")
|
||||||
|
}
|
||||||
|
for i := 0; i < numKeysAndValues/2; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
sb.WriteString(", ")
|
||||||
|
}
|
||||||
|
sb.WriteString("%v=%v")
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatTimes formats any time.Time values as RFC3339.
|
||||||
|
func formatTimes(keysAndValues []interface{}) []interface{} {
|
||||||
|
var formattedArgs []interface{}
|
||||||
|
for _, arg := range keysAndValues {
|
||||||
|
if t, ok := arg.(time.Time); ok {
|
||||||
|
arg = t.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
formattedArgs = append(formattedArgs, arg)
|
||||||
|
}
|
||||||
|
return formattedArgs
|
||||||
|
}
|
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
Normal file
45
vendor/github.com/robfig/cron/v3/option.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Option represents a modification to the default behavior of a Cron.
|
||||||
|
type Option func(*Cron)
|
||||||
|
|
||||||
|
// WithLocation overrides the timezone of the cron instance.
|
||||||
|
func WithLocation(loc *time.Location) Option {
|
||||||
|
return func(c *Cron) {
|
||||||
|
c.location = loc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSeconds overrides the parser used for interpreting job schedules to
|
||||||
|
// include a seconds field as the first one.
|
||||||
|
func WithSeconds() Option {
|
||||||
|
return WithParser(NewParser(
|
||||||
|
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithParser overrides the parser used for interpreting job schedules.
|
||||||
|
func WithParser(p ScheduleParser) Option {
|
||||||
|
return func(c *Cron) {
|
||||||
|
c.parser = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithChain specifies Job wrappers to apply to all jobs added to this cron.
|
||||||
|
// Refer to the Chain* functions in this package for provided wrappers.
|
||||||
|
func WithChain(wrappers ...JobWrapper) Option {
|
||||||
|
return func(c *Cron) {
|
||||||
|
c.chain = NewChain(wrappers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogger uses the provided logger.
|
||||||
|
func WithLogger(logger Logger) Option {
|
||||||
|
return func(c *Cron) {
|
||||||
|
c.logger = logger
|
||||||
|
}
|
||||||
|
}
|
42
vendor/github.com/robfig/cron/v3/option_test.go
generated
vendored
Normal file
42
vendor/github.com/robfig/cron/v3/option_test.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithLocation(t *testing.T) {
|
||||||
|
c := New(WithLocation(time.UTC))
|
||||||
|
if c.location != time.UTC {
|
||||||
|
t.Errorf("expected UTC, got %v", c.location)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithParser(t *testing.T) {
|
||||||
|
var parser = NewParser(Dow)
|
||||||
|
c := New(WithParser(parser))
|
||||||
|
if c.parser != parser {
|
||||||
|
t.Error("expected provided parser")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithVerboseLogger(t *testing.T) {
|
||||||
|
var buf syncWriter
|
||||||
|
var logger = log.New(&buf, "", log.LstdFlags)
|
||||||
|
c := New(WithLogger(VerbosePrintfLogger(logger)))
|
||||||
|
if c.logger.(printfLogger).logger != logger {
|
||||||
|
t.Error("expected provided logger")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AddFunc("@every 1s", func() {})
|
||||||
|
c.Start()
|
||||||
|
time.Sleep(OneSecond)
|
||||||
|
c.Stop()
|
||||||
|
out := buf.String()
|
||||||
|
if !strings.Contains(out, "schedule,") ||
|
||||||
|
!strings.Contains(out, "run,") {
|
||||||
|
t.Error("expected to see some actions, got:", out)
|
||||||
|
}
|
||||||
|
}
|
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
Normal file
434
vendor/github.com/robfig/cron/v3/parser.go
generated
vendored
Normal file
@ -0,0 +1,434 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configuration options for creating a parser. Most options specify which
|
||||||
|
// fields should be included, while others enable features. If a field is not
|
||||||
|
// included the parser will assume a default value. These options do not change
|
||||||
|
// the order fields are parse in.
|
||||||
|
type ParseOption int
|
||||||
|
|
||||||
|
const (
|
||||||
|
Second ParseOption = 1 << iota // Seconds field, default 0
|
||||||
|
SecondOptional // Optional seconds field, default 0
|
||||||
|
Minute // Minutes field, default 0
|
||||||
|
Hour // Hours field, default 0
|
||||||
|
Dom // Day of month field, default *
|
||||||
|
Month // Month field, default *
|
||||||
|
Dow // Day of week field, default *
|
||||||
|
DowOptional // Optional day of week field, default *
|
||||||
|
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
|
||||||
|
)
|
||||||
|
|
||||||
|
var places = []ParseOption{
|
||||||
|
Second,
|
||||||
|
Minute,
|
||||||
|
Hour,
|
||||||
|
Dom,
|
||||||
|
Month,
|
||||||
|
Dow,
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaults = []string{
|
||||||
|
"0",
|
||||||
|
"0",
|
||||||
|
"0",
|
||||||
|
"*",
|
||||||
|
"*",
|
||||||
|
"*",
|
||||||
|
}
|
||||||
|
|
||||||
|
// A custom Parser that can be configured.
|
||||||
|
type Parser struct {
|
||||||
|
options ParseOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParser creates a Parser with custom options.
|
||||||
|
//
|
||||||
|
// It panics if more than one Optional is given, since it would be impossible to
|
||||||
|
// correctly infer which optional is provided or missing in general.
|
||||||
|
//
|
||||||
|
// Examples
|
||||||
|
//
|
||||||
|
// // Standard parser without descriptors
|
||||||
|
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
|
||||||
|
// sched, err := specParser.Parse("0 0 15 */3 *")
|
||||||
|
//
|
||||||
|
// // Same as above, just excludes time fields
|
||||||
|
// specParser := NewParser(Dom | Month | Dow)
|
||||||
|
// sched, err := specParser.Parse("15 */3 *")
|
||||||
|
//
|
||||||
|
// // Same as above, just makes Dow optional
|
||||||
|
// specParser := NewParser(Dom | Month | DowOptional)
|
||||||
|
// sched, err := specParser.Parse("15 */3")
|
||||||
|
//
|
||||||
|
func NewParser(options ParseOption) Parser {
|
||||||
|
optionals := 0
|
||||||
|
if options&DowOptional > 0 {
|
||||||
|
optionals++
|
||||||
|
}
|
||||||
|
if options&SecondOptional > 0 {
|
||||||
|
optionals++
|
||||||
|
}
|
||||||
|
if optionals > 1 {
|
||||||
|
panic("multiple optionals may not be configured")
|
||||||
|
}
|
||||||
|
return Parser{options}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse returns a new crontab schedule representing the given spec.
|
||||||
|
// It returns a descriptive error if the spec is not valid.
|
||||||
|
// It accepts crontab specs and features configured by NewParser.
|
||||||
|
func (p Parser) Parse(spec string) (Schedule, error) {
|
||||||
|
if len(spec) == 0 {
|
||||||
|
return nil, fmt.Errorf("empty spec string")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract timezone if present
|
||||||
|
var loc = time.Local
|
||||||
|
if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
|
||||||
|
var err error
|
||||||
|
i := strings.Index(spec, " ")
|
||||||
|
eq := strings.Index(spec, "=")
|
||||||
|
if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
|
||||||
|
return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
|
||||||
|
}
|
||||||
|
spec = strings.TrimSpace(spec[i:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle named schedules (descriptors), if configured
|
||||||
|
if strings.HasPrefix(spec, "@") {
|
||||||
|
if p.options&Descriptor == 0 {
|
||||||
|
return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
|
||||||
|
}
|
||||||
|
return parseDescriptor(spec, loc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split on whitespace.
|
||||||
|
fields := strings.Fields(spec)
|
||||||
|
|
||||||
|
// Validate & fill in any omitted or optional fields
|
||||||
|
var err error
|
||||||
|
fields, err = normalizeFields(fields, p.options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
field := func(field string, r bounds) uint64 {
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var bits uint64
|
||||||
|
bits, err = getField(field, r)
|
||||||
|
return bits
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
second = field(fields[0], seconds)
|
||||||
|
minute = field(fields[1], minutes)
|
||||||
|
hour = field(fields[2], hours)
|
||||||
|
dayofmonth = field(fields[3], dom)
|
||||||
|
month = field(fields[4], months)
|
||||||
|
dayofweek = field(fields[5], dow)
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: second,
|
||||||
|
Minute: minute,
|
||||||
|
Hour: hour,
|
||||||
|
Dom: dayofmonth,
|
||||||
|
Month: month,
|
||||||
|
Dow: dayofweek,
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeFields takes a subset set of the time fields and returns the full set
|
||||||
|
// with defaults (zeroes) populated for unset fields.
|
||||||
|
//
|
||||||
|
// As part of performing this function, it also validates that the provided
|
||||||
|
// fields are compatible with the configured options.
|
||||||
|
func normalizeFields(fields []string, options ParseOption) ([]string, error) {
|
||||||
|
// Validate optionals & add their field to options
|
||||||
|
optionals := 0
|
||||||
|
if options&SecondOptional > 0 {
|
||||||
|
options |= Second
|
||||||
|
optionals++
|
||||||
|
}
|
||||||
|
if options&DowOptional > 0 {
|
||||||
|
options |= Dow
|
||||||
|
optionals++
|
||||||
|
}
|
||||||
|
if optionals > 1 {
|
||||||
|
return nil, fmt.Errorf("multiple optionals may not be configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Figure out how many fields we need
|
||||||
|
max := 0
|
||||||
|
for _, place := range places {
|
||||||
|
if options&place > 0 {
|
||||||
|
max++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
min := max - optionals
|
||||||
|
|
||||||
|
// Validate number of fields
|
||||||
|
if count := len(fields); count < min || count > max {
|
||||||
|
if min == max {
|
||||||
|
return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate the optional field if not provided
|
||||||
|
if min < max && len(fields) == min {
|
||||||
|
switch {
|
||||||
|
case options&DowOptional > 0:
|
||||||
|
fields = append(fields, defaults[5]) // TODO: improve access to default
|
||||||
|
case options&SecondOptional > 0:
|
||||||
|
fields = append([]string{defaults[0]}, fields...)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown optional field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate all fields not part of options with their defaults
|
||||||
|
n := 0
|
||||||
|
expandedFields := make([]string, len(places))
|
||||||
|
copy(expandedFields, defaults)
|
||||||
|
for i, place := range places {
|
||||||
|
if options&place > 0 {
|
||||||
|
expandedFields[i] = fields[n]
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return expandedFields, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var standardParser = NewParser(
|
||||||
|
Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseStandard returns a new crontab schedule representing the given
|
||||||
|
// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
|
||||||
|
// representing: minute, hour, day of month, month and day of week, in that
|
||||||
|
// order. It returns a descriptive error if the spec is not valid.
|
||||||
|
//
|
||||||
|
// It accepts
|
||||||
|
// - Standard crontab specs, e.g. "* * * * ?"
|
||||||
|
// - Descriptors, e.g. "@midnight", "@every 1h30m"
|
||||||
|
func ParseStandard(standardSpec string) (Schedule, error) {
|
||||||
|
return standardParser.Parse(standardSpec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getField returns an Int with the bits set representing all of the times that
|
||||||
|
// the field represents or error parsing field value. A "field" is a comma-separated
|
||||||
|
// list of "ranges".
|
||||||
|
func getField(field string, r bounds) (uint64, error) {
|
||||||
|
var bits uint64
|
||||||
|
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
|
||||||
|
for _, expr := range ranges {
|
||||||
|
bit, err := getRange(expr, r)
|
||||||
|
if err != nil {
|
||||||
|
return bits, err
|
||||||
|
}
|
||||||
|
bits |= bit
|
||||||
|
}
|
||||||
|
return bits, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRange returns the bits indicated by the given expression:
|
||||||
|
// number | number "-" number [ "/" number ]
|
||||||
|
// or error parsing range.
|
||||||
|
func getRange(expr string, r bounds) (uint64, error) {
|
||||||
|
var (
|
||||||
|
start, end, step uint
|
||||||
|
rangeAndStep = strings.Split(expr, "/")
|
||||||
|
lowAndHigh = strings.Split(rangeAndStep[0], "-")
|
||||||
|
singleDigit = len(lowAndHigh) == 1
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
var extra uint64
|
||||||
|
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
|
||||||
|
start = r.min
|
||||||
|
end = r.max
|
||||||
|
extra = starBit
|
||||||
|
} else {
|
||||||
|
start, err = parseIntOrName(lowAndHigh[0], r.names)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
switch len(lowAndHigh) {
|
||||||
|
case 1:
|
||||||
|
end = start
|
||||||
|
case 2:
|
||||||
|
end, err = parseIntOrName(lowAndHigh[1], r.names)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("too many hyphens: %s", expr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(rangeAndStep) {
|
||||||
|
case 1:
|
||||||
|
step = 1
|
||||||
|
case 2:
|
||||||
|
step, err = mustParseInt(rangeAndStep[1])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling: "N/step" means "N-max/step".
|
||||||
|
if singleDigit {
|
||||||
|
end = r.max
|
||||||
|
}
|
||||||
|
if step > 1 {
|
||||||
|
extra = 0
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("too many slashes: %s", expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if start < r.min {
|
||||||
|
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
|
||||||
|
}
|
||||||
|
if end > r.max {
|
||||||
|
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
|
||||||
|
}
|
||||||
|
if start > end {
|
||||||
|
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
|
||||||
|
}
|
||||||
|
if step == 0 {
|
||||||
|
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return getBits(start, end, step) | extra, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseIntOrName returns the (possibly-named) integer contained in expr.
|
||||||
|
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
|
||||||
|
if names != nil {
|
||||||
|
if namedInt, ok := names[strings.ToLower(expr)]; ok {
|
||||||
|
return namedInt, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mustParseInt(expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustParseInt parses the given expression as an int or returns an error.
|
||||||
|
func mustParseInt(expr string) (uint, error) {
|
||||||
|
num, err := strconv.Atoi(expr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
|
||||||
|
}
|
||||||
|
if num < 0 {
|
||||||
|
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint(num), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBits sets all bits in the range [min, max], modulo the given step size.
|
||||||
|
func getBits(min, max, step uint) uint64 {
|
||||||
|
var bits uint64
|
||||||
|
|
||||||
|
// If step is 1, use shifts.
|
||||||
|
if step == 1 {
|
||||||
|
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else, use a simple loop.
|
||||||
|
for i := min; i <= max; i += step {
|
||||||
|
bits |= 1 << i
|
||||||
|
}
|
||||||
|
return bits
|
||||||
|
}
|
||||||
|
|
||||||
|
// all returns all bits within the given bounds. (plus the star bit)
|
||||||
|
func all(r bounds) uint64 {
|
||||||
|
return getBits(r.min, r.max, 1) | starBit
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
|
||||||
|
func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
|
||||||
|
switch descriptor {
|
||||||
|
case "@yearly", "@annually":
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: 1 << hours.min,
|
||||||
|
Dom: 1 << dom.min,
|
||||||
|
Month: 1 << months.min,
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
case "@monthly":
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: 1 << hours.min,
|
||||||
|
Dom: 1 << dom.min,
|
||||||
|
Month: all(months),
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
case "@weekly":
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: 1 << hours.min,
|
||||||
|
Dom: all(dom),
|
||||||
|
Month: all(months),
|
||||||
|
Dow: 1 << dow.min,
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
case "@daily", "@midnight":
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: 1 << hours.min,
|
||||||
|
Dom: all(dom),
|
||||||
|
Month: all(months),
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
case "@hourly":
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: all(hours),
|
||||||
|
Dom: all(dom),
|
||||||
|
Month: all(months),
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: loc,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const every = "@every "
|
||||||
|
if strings.HasPrefix(descriptor, every) {
|
||||||
|
duration, err := time.ParseDuration(descriptor[len(every):])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
|
||||||
|
}
|
||||||
|
return Every(duration), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
|
||||||
|
}
|
383
vendor/github.com/robfig/cron/v3/parser_test.go
generated
vendored
Normal file
383
vendor/github.com/robfig/cron/v3/parser_test.go
generated
vendored
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var secondParser = NewParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor)
|
||||||
|
|
||||||
|
func TestRange(t *testing.T) {
|
||||||
|
zero := uint64(0)
|
||||||
|
ranges := []struct {
|
||||||
|
expr string
|
||||||
|
min, max uint
|
||||||
|
expected uint64
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{"5", 0, 7, 1 << 5, ""},
|
||||||
|
{"0", 0, 7, 1 << 0, ""},
|
||||||
|
{"7", 0, 7, 1 << 7, ""},
|
||||||
|
|
||||||
|
{"5-5", 0, 7, 1 << 5, ""},
|
||||||
|
{"5-6", 0, 7, 1<<5 | 1<<6, ""},
|
||||||
|
{"5-7", 0, 7, 1<<5 | 1<<6 | 1<<7, ""},
|
||||||
|
|
||||||
|
{"5-6/2", 0, 7, 1 << 5, ""},
|
||||||
|
{"5-7/2", 0, 7, 1<<5 | 1<<7, ""},
|
||||||
|
{"5-7/1", 0, 7, 1<<5 | 1<<6 | 1<<7, ""},
|
||||||
|
|
||||||
|
{"*", 1, 3, 1<<1 | 1<<2 | 1<<3 | starBit, ""},
|
||||||
|
{"*/2", 1, 3, 1<<1 | 1<<3, ""},
|
||||||
|
|
||||||
|
{"5--5", 0, 0, zero, "too many hyphens"},
|
||||||
|
{"jan-x", 0, 0, zero, "failed to parse int from"},
|
||||||
|
{"2-x", 1, 5, zero, "failed to parse int from"},
|
||||||
|
{"*/-12", 0, 0, zero, "negative number"},
|
||||||
|
{"*//2", 0, 0, zero, "too many slashes"},
|
||||||
|
{"1", 3, 5, zero, "below minimum"},
|
||||||
|
{"6", 3, 5, zero, "above maximum"},
|
||||||
|
{"5-3", 3, 5, zero, "beyond end of range"},
|
||||||
|
{"*/0", 0, 0, zero, "should be a positive number"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range ranges {
|
||||||
|
actual, err := getRange(c.expr, bounds{c.min, c.max, nil})
|
||||||
|
if len(c.err) != 0 && (err == nil || !strings.Contains(err.Error(), c.err)) {
|
||||||
|
t.Errorf("%s => expected %v, got %v", c.expr, c.err, err)
|
||||||
|
}
|
||||||
|
if len(c.err) == 0 && err != nil {
|
||||||
|
t.Errorf("%s => unexpected error %v", c.expr, err)
|
||||||
|
}
|
||||||
|
if actual != c.expected {
|
||||||
|
t.Errorf("%s => expected %d, got %d", c.expr, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestField(t *testing.T) {
|
||||||
|
fields := []struct {
|
||||||
|
expr string
|
||||||
|
min, max uint
|
||||||
|
expected uint64
|
||||||
|
}{
|
||||||
|
{"5", 1, 7, 1 << 5},
|
||||||
|
{"5,6", 1, 7, 1<<5 | 1<<6},
|
||||||
|
{"5,6,7", 1, 7, 1<<5 | 1<<6 | 1<<7},
|
||||||
|
{"1,5-7/2,3", 1, 7, 1<<1 | 1<<5 | 1<<7 | 1<<3},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range fields {
|
||||||
|
actual, _ := getField(c.expr, bounds{c.min, c.max, nil})
|
||||||
|
if actual != c.expected {
|
||||||
|
t.Errorf("%s => expected %d, got %d", c.expr, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
allBits := []struct {
|
||||||
|
r bounds
|
||||||
|
expected uint64
|
||||||
|
}{
|
||||||
|
{minutes, 0xfffffffffffffff}, // 0-59: 60 ones
|
||||||
|
{hours, 0xffffff}, // 0-23: 24 ones
|
||||||
|
{dom, 0xfffffffe}, // 1-31: 31 ones, 1 zero
|
||||||
|
{months, 0x1ffe}, // 1-12: 12 ones, 1 zero
|
||||||
|
{dow, 0x7f}, // 0-6: 7 ones
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range allBits {
|
||||||
|
actual := all(c.r) // all() adds the starBit, so compensate for that..
|
||||||
|
if c.expected|starBit != actual {
|
||||||
|
t.Errorf("%d-%d/%d => expected %b, got %b",
|
||||||
|
c.r.min, c.r.max, 1, c.expected|starBit, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBits(t *testing.T) {
|
||||||
|
bits := []struct {
|
||||||
|
min, max, step uint
|
||||||
|
expected uint64
|
||||||
|
}{
|
||||||
|
{0, 0, 1, 0x1},
|
||||||
|
{1, 1, 1, 0x2},
|
||||||
|
{1, 5, 2, 0x2a}, // 101010
|
||||||
|
{1, 4, 2, 0xa}, // 1010
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range bits {
|
||||||
|
actual := getBits(c.min, c.max, c.step)
|
||||||
|
if c.expected != actual {
|
||||||
|
t.Errorf("%d-%d/%d => expected %b, got %b",
|
||||||
|
c.min, c.max, c.step, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseScheduleErrors(t *testing.T) {
|
||||||
|
var tests = []struct{ expr, err string }{
|
||||||
|
{"* 5 j * * *", "failed to parse int from"},
|
||||||
|
{"@every Xm", "failed to parse duration"},
|
||||||
|
{"@unrecognized", "unrecognized descriptor"},
|
||||||
|
{"* * * *", "expected 5 to 6 fields"},
|
||||||
|
{"", "empty spec string"},
|
||||||
|
}
|
||||||
|
for _, c := range tests {
|
||||||
|
actual, err := secondParser.Parse(c.expr)
|
||||||
|
if err == nil || !strings.Contains(err.Error(), c.err) {
|
||||||
|
t.Errorf("%s => expected %v, got %v", c.expr, c.err, err)
|
||||||
|
}
|
||||||
|
if actual != nil {
|
||||||
|
t.Errorf("expected nil schedule on error, got %v", actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSchedule(t *testing.T) {
|
||||||
|
tokyo, _ := time.LoadLocation("Asia/Tokyo")
|
||||||
|
entries := []struct {
|
||||||
|
parser Parser
|
||||||
|
expr string
|
||||||
|
expected Schedule
|
||||||
|
}{
|
||||||
|
{secondParser, "0 5 * * * *", every5min(time.Local)},
|
||||||
|
{standardParser, "5 * * * *", every5min(time.Local)},
|
||||||
|
{secondParser, "CRON_TZ=UTC 0 5 * * * *", every5min(time.UTC)},
|
||||||
|
{standardParser, "CRON_TZ=UTC 5 * * * *", every5min(time.UTC)},
|
||||||
|
{secondParser, "CRON_TZ=Asia/Tokyo 0 5 * * * *", every5min(tokyo)},
|
||||||
|
{secondParser, "@every 5m", ConstantDelaySchedule{5 * time.Minute}},
|
||||||
|
{secondParser, "@midnight", midnight(time.Local)},
|
||||||
|
{secondParser, "TZ=UTC @midnight", midnight(time.UTC)},
|
||||||
|
{secondParser, "TZ=Asia/Tokyo @midnight", midnight(tokyo)},
|
||||||
|
{secondParser, "@yearly", annual(time.Local)},
|
||||||
|
{secondParser, "@annually", annual(time.Local)},
|
||||||
|
{
|
||||||
|
parser: secondParser,
|
||||||
|
expr: "* 5 * * * *",
|
||||||
|
expected: &SpecSchedule{
|
||||||
|
Second: all(seconds),
|
||||||
|
Minute: 1 << 5,
|
||||||
|
Hour: all(hours),
|
||||||
|
Dom: all(dom),
|
||||||
|
Month: all(months),
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: time.Local,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range entries {
|
||||||
|
actual, err := c.parser.Parse(c.expr)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s => unexpected error %v", c.expr, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, c.expected) {
|
||||||
|
t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionalSecondSchedule(t *testing.T) {
|
||||||
|
parser := NewParser(SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor)
|
||||||
|
entries := []struct {
|
||||||
|
expr string
|
||||||
|
expected Schedule
|
||||||
|
}{
|
||||||
|
{"0 5 * * * *", every5min(time.Local)},
|
||||||
|
{"5 5 * * * *", every5min5s(time.Local)},
|
||||||
|
{"5 * * * *", every5min(time.Local)},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range entries {
|
||||||
|
actual, err := parser.Parse(c.expr)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s => unexpected error %v", c.expr, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, c.expected) {
|
||||||
|
t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNormalizeFields(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []string
|
||||||
|
options ParseOption
|
||||||
|
expected []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"AllFields_NoOptional",
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"AllFields_SecondOptional_Provided",
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"AllFields_SecondOptional_NotProvided",
|
||||||
|
[]string{"5", "*", "*", "*", "*"},
|
||||||
|
SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor,
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SubsetFields_NoOptional",
|
||||||
|
[]string{"5", "15", "*"},
|
||||||
|
Hour | Dom | Month,
|
||||||
|
[]string{"0", "0", "5", "15", "*", "*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SubsetFields_DowOptional_Provided",
|
||||||
|
[]string{"5", "15", "*", "4"},
|
||||||
|
Hour | Dom | Month | DowOptional,
|
||||||
|
[]string{"0", "0", "5", "15", "*", "4"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SubsetFields_DowOptional_NotProvided",
|
||||||
|
[]string{"5", "15", "*"},
|
||||||
|
Hour | Dom | Month | DowOptional,
|
||||||
|
[]string{"0", "0", "5", "15", "*", "*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SubsetFields_SecondOptional_NotProvided",
|
||||||
|
[]string{"5", "15", "*"},
|
||||||
|
SecondOptional | Hour | Dom | Month,
|
||||||
|
[]string{"0", "0", "5", "15", "*", "*"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
actual, err := normalizeFields(test.input, test.options)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, test.expected) {
|
||||||
|
t.Errorf("expected %v, got %v", test.expected, actual)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNormalizeFields_Errors(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []string
|
||||||
|
options ParseOption
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"TwoOptionals",
|
||||||
|
[]string{"0", "5", "*", "*", "*", "*"},
|
||||||
|
SecondOptional | Minute | Hour | Dom | Month | DowOptional,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"TooManyFields",
|
||||||
|
[]string{"0", "5", "*", "*"},
|
||||||
|
SecondOptional | Minute | Hour,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"NoFields",
|
||||||
|
[]string{},
|
||||||
|
SecondOptional | Minute | Hour,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"TooFewFields",
|
||||||
|
[]string{"*"},
|
||||||
|
SecondOptional | Minute | Hour,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
actual, err := normalizeFields(test.input, test.options)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected an error, got none. results: %v", actual)
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), test.err) {
|
||||||
|
t.Errorf("expected error %q, got %q", test.err, err.Error())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStandardSpecSchedule(t *testing.T) {
|
||||||
|
entries := []struct {
|
||||||
|
expr string
|
||||||
|
expected Schedule
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
expr: "5 * * * *",
|
||||||
|
expected: &SpecSchedule{1 << seconds.min, 1 << 5, all(hours), all(dom), all(months), all(dow), time.Local},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expr: "@every 5m",
|
||||||
|
expected: ConstantDelaySchedule{time.Duration(5) * time.Minute},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expr: "5 j * * *",
|
||||||
|
err: "failed to parse int from",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expr: "* * * *",
|
||||||
|
err: "expected exactly 5 fields",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range entries {
|
||||||
|
actual, err := ParseStandard(c.expr)
|
||||||
|
if len(c.err) != 0 && (err == nil || !strings.Contains(err.Error(), c.err)) {
|
||||||
|
t.Errorf("%s => expected %v, got %v", c.expr, c.err, err)
|
||||||
|
}
|
||||||
|
if len(c.err) == 0 && err != nil {
|
||||||
|
t.Errorf("%s => unexpected error %v", c.expr, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual, c.expected) {
|
||||||
|
t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoDescriptorParser(t *testing.T) {
|
||||||
|
parser := NewParser(Minute | Hour)
|
||||||
|
_, err := parser.Parse("@every 1m")
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected an error, got none")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func every5min(loc *time.Location) *SpecSchedule {
|
||||||
|
return &SpecSchedule{1 << 0, 1 << 5, all(hours), all(dom), all(months), all(dow), loc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func every5min5s(loc *time.Location) *SpecSchedule {
|
||||||
|
return &SpecSchedule{1 << 5, 1 << 5, all(hours), all(dom), all(months), all(dow), loc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func midnight(loc *time.Location) *SpecSchedule {
|
||||||
|
return &SpecSchedule{1, 1, 1, all(dom), all(months), all(dow), loc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func annual(loc *time.Location) *SpecSchedule {
|
||||||
|
return &SpecSchedule{
|
||||||
|
Second: 1 << seconds.min,
|
||||||
|
Minute: 1 << minutes.min,
|
||||||
|
Hour: 1 << hours.min,
|
||||||
|
Dom: 1 << dom.min,
|
||||||
|
Month: 1 << months.min,
|
||||||
|
Dow: all(dow),
|
||||||
|
Location: loc,
|
||||||
|
}
|
||||||
|
}
|
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
Normal file
188
vendor/github.com/robfig/cron/v3/spec.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
|
||||||
|
// traditional crontab specification. It is computed initially and stored as bit sets.
|
||||||
|
type SpecSchedule struct {
|
||||||
|
Second, Minute, Hour, Dom, Month, Dow uint64
|
||||||
|
|
||||||
|
// Override location for this schedule.
|
||||||
|
Location *time.Location
|
||||||
|
}
|
||||||
|
|
||||||
|
// bounds provides a range of acceptable values (plus a map of name to value).
|
||||||
|
type bounds struct {
|
||||||
|
min, max uint
|
||||||
|
names map[string]uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// The bounds for each field.
|
||||||
|
var (
|
||||||
|
seconds = bounds{0, 59, nil}
|
||||||
|
minutes = bounds{0, 59, nil}
|
||||||
|
hours = bounds{0, 23, nil}
|
||||||
|
dom = bounds{1, 31, nil}
|
||||||
|
months = bounds{1, 12, map[string]uint{
|
||||||
|
"jan": 1,
|
||||||
|
"feb": 2,
|
||||||
|
"mar": 3,
|
||||||
|
"apr": 4,
|
||||||
|
"may": 5,
|
||||||
|
"jun": 6,
|
||||||
|
"jul": 7,
|
||||||
|
"aug": 8,
|
||||||
|
"sep": 9,
|
||||||
|
"oct": 10,
|
||||||
|
"nov": 11,
|
||||||
|
"dec": 12,
|
||||||
|
}}
|
||||||
|
dow = bounds{0, 6, map[string]uint{
|
||||||
|
"sun": 0,
|
||||||
|
"mon": 1,
|
||||||
|
"tue": 2,
|
||||||
|
"wed": 3,
|
||||||
|
"thu": 4,
|
||||||
|
"fri": 5,
|
||||||
|
"sat": 6,
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Set the top bit if a star was included in the expression.
|
||||||
|
starBit = 1 << 63
|
||||||
|
)
|
||||||
|
|
||||||
|
// Next returns the next time this schedule is activated, greater than the given
|
||||||
|
// time. If no time can be found to satisfy the schedule, return the zero time.
|
||||||
|
func (s *SpecSchedule) Next(t time.Time) time.Time {
|
||||||
|
// General approach
|
||||||
|
//
|
||||||
|
// For Month, Day, Hour, Minute, Second:
|
||||||
|
// Check if the time value matches. If yes, continue to the next field.
|
||||||
|
// If the field doesn't match the schedule, then increment the field until it matches.
|
||||||
|
// While incrementing the field, a wrap-around brings it back to the beginning
|
||||||
|
// of the field list (since it is necessary to re-verify previous field
|
||||||
|
// values)
|
||||||
|
|
||||||
|
// Convert the given time into the schedule's timezone, if one is specified.
|
||||||
|
// Save the original timezone so we can convert back after we find a time.
|
||||||
|
// Note that schedules without a time zone specified (time.Local) are treated
|
||||||
|
// as local to the time provided.
|
||||||
|
origLocation := t.Location()
|
||||||
|
loc := s.Location
|
||||||
|
if loc == time.Local {
|
||||||
|
loc = t.Location()
|
||||||
|
}
|
||||||
|
if s.Location != time.Local {
|
||||||
|
t = t.In(s.Location)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start at the earliest possible time (the upcoming second).
|
||||||
|
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
|
||||||
|
|
||||||
|
// This flag indicates whether a field has been incremented.
|
||||||
|
added := false
|
||||||
|
|
||||||
|
// If no time is found within five years, return zero.
|
||||||
|
yearLimit := t.Year() + 5
|
||||||
|
|
||||||
|
WRAP:
|
||||||
|
if t.Year() > yearLimit {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the first applicable month.
|
||||||
|
// If it's this month, then do nothing.
|
||||||
|
for 1<<uint(t.Month())&s.Month == 0 {
|
||||||
|
// If we have to add a month, reset the other parts to 0.
|
||||||
|
if !added {
|
||||||
|
added = true
|
||||||
|
// Otherwise, set the date at the beginning (since the current time is irrelevant).
|
||||||
|
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc)
|
||||||
|
}
|
||||||
|
t = t.AddDate(0, 1, 0)
|
||||||
|
|
||||||
|
// Wrapped around.
|
||||||
|
if t.Month() == time.January {
|
||||||
|
goto WRAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now get a day in that month.
|
||||||
|
//
|
||||||
|
// NOTE: This causes issues for daylight savings regimes where midnight does
|
||||||
|
// not exist. For example: Sao Paulo has DST that transforms midnight on
|
||||||
|
// 11/3 into 1am. Handle that by noticing when the Hour ends up != 0.
|
||||||
|
for !dayMatches(s, t) {
|
||||||
|
if !added {
|
||||||
|
added = true
|
||||||
|
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc)
|
||||||
|
}
|
||||||
|
t = t.AddDate(0, 0, 1)
|
||||||
|
// Notice if the hour is no longer midnight due to DST.
|
||||||
|
// Add an hour if it's 23, subtract an hour if it's 1.
|
||||||
|
if t.Hour() != 0 {
|
||||||
|
if t.Hour() > 12 {
|
||||||
|
t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
|
||||||
|
} else {
|
||||||
|
t = t.Add(time.Duration(-t.Hour()) * time.Hour)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Day() == 1 {
|
||||||
|
goto WRAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for 1<<uint(t.Hour())&s.Hour == 0 {
|
||||||
|
if !added {
|
||||||
|
added = true
|
||||||
|
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc)
|
||||||
|
}
|
||||||
|
t = t.Add(1 * time.Hour)
|
||||||
|
|
||||||
|
if t.Hour() == 0 {
|
||||||
|
goto WRAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for 1<<uint(t.Minute())&s.Minute == 0 {
|
||||||
|
if !added {
|
||||||
|
added = true
|
||||||
|
t = t.Truncate(time.Minute)
|
||||||
|
}
|
||||||
|
t = t.Add(1 * time.Minute)
|
||||||
|
|
||||||
|
if t.Minute() == 0 {
|
||||||
|
goto WRAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for 1<<uint(t.Second())&s.Second == 0 {
|
||||||
|
if !added {
|
||||||
|
added = true
|
||||||
|
t = t.Truncate(time.Second)
|
||||||
|
}
|
||||||
|
t = t.Add(1 * time.Second)
|
||||||
|
|
||||||
|
if t.Second() == 0 {
|
||||||
|
goto WRAP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.In(origLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dayMatches returns true if the schedule's day-of-week and day-of-month
|
||||||
|
// restrictions are satisfied by the given time.
|
||||||
|
func dayMatches(s *SpecSchedule, t time.Time) bool {
|
||||||
|
var (
|
||||||
|
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
|
||||||
|
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
|
||||||
|
)
|
||||||
|
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
|
||||||
|
return domMatch && dowMatch
|
||||||
|
}
|
||||||
|
return domMatch || dowMatch
|
||||||
|
}
|
300
vendor/github.com/robfig/cron/v3/spec_test.go
generated
vendored
Normal file
300
vendor/github.com/robfig/cron/v3/spec_test.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestActivation(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
time, spec string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
// Every fifteen minutes.
|
||||||
|
{"Mon Jul 9 15:00 2012", "0/15 * * * *", true},
|
||||||
|
{"Mon Jul 9 15:45 2012", "0/15 * * * *", true},
|
||||||
|
{"Mon Jul 9 15:40 2012", "0/15 * * * *", false},
|
||||||
|
|
||||||
|
// Every fifteen minutes, starting at 5 minutes.
|
||||||
|
{"Mon Jul 9 15:05 2012", "5/15 * * * *", true},
|
||||||
|
{"Mon Jul 9 15:20 2012", "5/15 * * * *", true},
|
||||||
|
{"Mon Jul 9 15:50 2012", "5/15 * * * *", true},
|
||||||
|
|
||||||
|
// Named months
|
||||||
|
{"Sun Jul 15 15:00 2012", "0/15 * * Jul *", true},
|
||||||
|
{"Sun Jul 15 15:00 2012", "0/15 * * Jun *", false},
|
||||||
|
|
||||||
|
// Everything set.
|
||||||
|
{"Sun Jul 15 08:30 2012", "30 08 ? Jul Sun", true},
|
||||||
|
{"Sun Jul 15 08:30 2012", "30 08 15 Jul ?", true},
|
||||||
|
{"Mon Jul 16 08:30 2012", "30 08 ? Jul Sun", false},
|
||||||
|
{"Mon Jul 16 08:30 2012", "30 08 15 Jul ?", false},
|
||||||
|
|
||||||
|
// Predefined schedules
|
||||||
|
{"Mon Jul 9 15:00 2012", "@hourly", true},
|
||||||
|
{"Mon Jul 9 15:04 2012", "@hourly", false},
|
||||||
|
{"Mon Jul 9 15:00 2012", "@daily", false},
|
||||||
|
{"Mon Jul 9 00:00 2012", "@daily", true},
|
||||||
|
{"Mon Jul 9 00:00 2012", "@weekly", false},
|
||||||
|
{"Sun Jul 8 00:00 2012", "@weekly", true},
|
||||||
|
{"Sun Jul 8 01:00 2012", "@weekly", false},
|
||||||
|
{"Sun Jul 8 00:00 2012", "@monthly", false},
|
||||||
|
{"Sun Jul 1 00:00 2012", "@monthly", true},
|
||||||
|
|
||||||
|
// Test interaction of DOW and DOM.
|
||||||
|
// If both are restricted, then only one needs to match.
|
||||||
|
{"Sun Jul 15 00:00 2012", "* * 1,15 * Sun", true},
|
||||||
|
{"Fri Jun 15 00:00 2012", "* * 1,15 * Sun", true},
|
||||||
|
{"Wed Aug 1 00:00 2012", "* * 1,15 * Sun", true},
|
||||||
|
{"Sun Jul 15 00:00 2012", "* * */10 * Sun", true}, // verifies #70
|
||||||
|
|
||||||
|
// However, if one has a star, then both need to match.
|
||||||
|
{"Sun Jul 15 00:00 2012", "* * * * Mon", false},
|
||||||
|
{"Mon Jul 9 00:00 2012", "* * 1,15 * *", false},
|
||||||
|
{"Sun Jul 15 00:00 2012", "* * 1,15 * *", true},
|
||||||
|
{"Sun Jul 15 00:00 2012", "* * */2 * Sun", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
sched, err := ParseStandard(test.spec)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
actual := sched.Next(getTime(test.time).Add(-1 * time.Second))
|
||||||
|
expected := getTime(test.time)
|
||||||
|
if test.expected && expected != actual || !test.expected && expected == actual {
|
||||||
|
t.Errorf("Fail evaluating %s on %s: (expected) %s != %s (actual)",
|
||||||
|
test.spec, test.time, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNext(t *testing.T) {
|
||||||
|
runs := []struct {
|
||||||
|
time, spec string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// Simple cases
|
||||||
|
{"Mon Jul 9 14:45 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"},
|
||||||
|
{"Mon Jul 9 14:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"},
|
||||||
|
{"Mon Jul 9 14:59:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"},
|
||||||
|
|
||||||
|
// Wrap around hours
|
||||||
|
{"Mon Jul 9 15:45 2012", "0 20-35/15 * * * *", "Mon Jul 9 16:20 2012"},
|
||||||
|
|
||||||
|
// Wrap around days
|
||||||
|
{"Mon Jul 9 23:46 2012", "0 */15 * * * *", "Tue Jul 10 00:00 2012"},
|
||||||
|
{"Mon Jul 9 23:45 2012", "0 20-35/15 * * * *", "Tue Jul 10 00:20 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * * * *", "Tue Jul 10 00:20:15 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 * * *", "Tue Jul 10 01:20:15 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 10-12 * * *", "Tue Jul 10 10:20:15 2012"},
|
||||||
|
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 */2 * *", "Thu Jul 11 01:20:15 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 * *", "Wed Jul 10 00:20:15 2012"},
|
||||||
|
{"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 Jul *", "Wed Jul 10 00:20:15 2012"},
|
||||||
|
|
||||||
|
// Wrap around months
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 9 Apr-Oct ?", "Thu Aug 9 00:00 2012"},
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 */5 Apr,Aug,Oct Mon", "Tue Aug 1 00:00 2012"},
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 */5 Oct Mon", "Mon Oct 1 00:00 2012"},
|
||||||
|
|
||||||
|
// Wrap around years
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon", "Mon Feb 4 00:00 2013"},
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon/2", "Fri Feb 1 00:00 2013"},
|
||||||
|
|
||||||
|
// Wrap around minute, hour, day, month, and year
|
||||||
|
{"Mon Dec 31 23:59:45 2012", "0 * * * * *", "Tue Jan 1 00:00:00 2013"},
|
||||||
|
|
||||||
|
// Leap year
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016"},
|
||||||
|
|
||||||
|
// Daylight savings time 2am EST (-5) -> 3am EDT (-4)
|
||||||
|
{"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 30 2 11 Mar ?", "2013-03-11T02:30:00-0400"},
|
||||||
|
|
||||||
|
// hourly job
|
||||||
|
{"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"},
|
||||||
|
{"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"},
|
||||||
|
{"2012-03-11T03:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"},
|
||||||
|
{"2012-03-11T04:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"},
|
||||||
|
|
||||||
|
// hourly job using CRON_TZ
|
||||||
|
{"2012-03-11T00:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"},
|
||||||
|
{"2012-03-11T01:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"},
|
||||||
|
{"2012-03-11T03:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"},
|
||||||
|
{"2012-03-11T04:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"},
|
||||||
|
|
||||||
|
// 1am nightly job
|
||||||
|
{"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-11T01:00:00-0500"},
|
||||||
|
{"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-12T01:00:00-0400"},
|
||||||
|
|
||||||
|
// 2am nightly job (skipped)
|
||||||
|
{"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-03-12T02:00:00-0400"},
|
||||||
|
|
||||||
|
// Daylight savings time 2am EDT (-4) => 1am EST (-5)
|
||||||
|
{"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 30 2 04 Nov ?", "2012-11-04T02:30:00-0500"},
|
||||||
|
{"2012-11-04T01:45:00-0400", "TZ=America/New_York 0 30 1 04 Nov ?", "2012-11-04T01:30:00-0500"},
|
||||||
|
|
||||||
|
// hourly job
|
||||||
|
{"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0400"},
|
||||||
|
{"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0500"},
|
||||||
|
{"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T02:00:00-0500"},
|
||||||
|
|
||||||
|
// 1am nightly job (runs twice)
|
||||||
|
{"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0400"},
|
||||||
|
{"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0500"},
|
||||||
|
{"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-11-05T01:00:00-0500"},
|
||||||
|
|
||||||
|
// 2am nightly job
|
||||||
|
{"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 2 * * ?", "2012-11-04T02:00:00-0500"},
|
||||||
|
{"2012-11-04T02:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-11-05T02:00:00-0500"},
|
||||||
|
|
||||||
|
// 3am nightly job
|
||||||
|
{"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 3 * * ?", "2012-11-04T03:00:00-0500"},
|
||||||
|
{"2012-11-04T03:00:00-0500", "TZ=America/New_York 0 0 3 * * ?", "2012-11-05T03:00:00-0500"},
|
||||||
|
|
||||||
|
// hourly job
|
||||||
|
{"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0400"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0500"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 * * * ?", "2012-11-04T02:00:00-0500"},
|
||||||
|
|
||||||
|
// 1am nightly job (runs twice)
|
||||||
|
{"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0400"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0500"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 1 * * ?", "2012-11-05T01:00:00-0500"},
|
||||||
|
|
||||||
|
// 2am nightly job
|
||||||
|
{"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 2 * * ?", "2012-11-04T02:00:00-0500"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T02:00:00-0500", "0 0 2 * * ?", "2012-11-05T02:00:00-0500"},
|
||||||
|
|
||||||
|
// 3am nightly job
|
||||||
|
{"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 * * ?", "2012-11-04T03:00:00-0500"},
|
||||||
|
{"TZ=America/New_York 2012-11-04T03:00:00-0500", "0 0 3 * * ?", "2012-11-05T03:00:00-0500"},
|
||||||
|
|
||||||
|
// Unsatisfiable
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 30 Feb ?", ""},
|
||||||
|
{"Mon Jul 9 23:35 2012", "0 0 0 31 Apr ?", ""},
|
||||||
|
|
||||||
|
// Monthly job
|
||||||
|
{"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 3 * ?", "2012-12-03T03:00:00-0500"},
|
||||||
|
|
||||||
|
// Test the scenario of DST resulting in midnight not being a valid time.
|
||||||
|
// https://github.com/robfig/cron/issues/157
|
||||||
|
{"2018-10-17T05:00:00-0400", "TZ=America/Sao_Paulo 0 0 9 10 * ?", "2018-11-10T06:00:00-0500"},
|
||||||
|
{"2018-02-14T05:00:00-0500", "TZ=America/Sao_Paulo 0 0 9 22 * ?", "2018-02-22T07:00:00-0500"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range runs {
|
||||||
|
sched, err := secondParser.Parse(c.spec)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
actual := sched.Next(getTime(c.time))
|
||||||
|
expected := getTime(c.expected)
|
||||||
|
if !actual.Equal(expected) {
|
||||||
|
t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrors(t *testing.T) {
|
||||||
|
invalidSpecs := []string{
|
||||||
|
"xyz",
|
||||||
|
"60 0 * * *",
|
||||||
|
"0 60 * * *",
|
||||||
|
"0 0 * * XYZ",
|
||||||
|
}
|
||||||
|
for _, spec := range invalidSpecs {
|
||||||
|
_, err := ParseStandard(spec)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected an error parsing: ", spec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTime(value string) time.Time {
|
||||||
|
if value == "" {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var location = time.Local
|
||||||
|
if strings.HasPrefix(value, "TZ=") {
|
||||||
|
parts := strings.Fields(value)
|
||||||
|
loc, err := time.LoadLocation(parts[0][len("TZ="):])
|
||||||
|
if err != nil {
|
||||||
|
panic("could not parse location:" + err.Error())
|
||||||
|
}
|
||||||
|
location = loc
|
||||||
|
value = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var layouts = []string{
|
||||||
|
"Mon Jan 2 15:04 2006",
|
||||||
|
"Mon Jan 2 15:04:05 2006",
|
||||||
|
}
|
||||||
|
for _, layout := range layouts {
|
||||||
|
if t, err := time.ParseInLocation(layout, value, location); err == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t, err := time.ParseInLocation("2006-01-02T15:04:05-0700", value, location); err == nil {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
panic("could not parse time value " + value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNextWithTz(t *testing.T) {
|
||||||
|
runs := []struct {
|
||||||
|
time, spec string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
// Failing tests
|
||||||
|
{"2016-01-03T13:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"},
|
||||||
|
{"2016-01-03T04:09:03+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"},
|
||||||
|
|
||||||
|
// Passing tests
|
||||||
|
{"2016-01-03T14:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"},
|
||||||
|
{"2016-01-03T14:00:00+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"},
|
||||||
|
}
|
||||||
|
for _, c := range runs {
|
||||||
|
sched, err := ParseStandard(c.spec)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
actual := sched.Next(getTimeTZ(c.time))
|
||||||
|
expected := getTimeTZ(c.expected)
|
||||||
|
if !actual.Equal(expected) {
|
||||||
|
t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTimeTZ(value string) time.Time {
|
||||||
|
if value == "" {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
t, err := time.Parse("Mon Jan 2 15:04 2006", value)
|
||||||
|
if err != nil {
|
||||||
|
t, err = time.Parse("Mon Jan 2 15:04:05 2006", value)
|
||||||
|
if err != nil {
|
||||||
|
t, err = time.Parse("2006-01-02T15:04:05-0700", value)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/robfig/cron/issues/144
|
||||||
|
func TestSlash0NoHang(t *testing.T) {
|
||||||
|
schedule := "TZ=America/New_York 15/0 * * * *"
|
||||||
|
_, err := ParseStandard(schedule)
|
||||||
|
if err == nil {
|
||||||
|
t.Error("expected an error on 0 increment")
|
||||||
|
}
|
||||||
|
}
|
13
vendor/github.com/templexxx/cpu/.gitignore
generated
vendored
Normal file
13
vendor/github.com/templexxx/cpu/.gitignore
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
.idea/
|
32
vendor/github.com/templexxx/cpu/LICENSE
generated
vendored
Normal file
32
vendor/github.com/templexxx/cpu/LICENSE
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
BSD 3-Clause License
|
||||||
|
|
||||||
|
Copyright (c) 2018 Temple3x (temple3x@gmail.com)
|
||||||
|
Copyright 2017 The Go Authors
|
||||||
|
Copyright (c) 2015 Klaus Post
|
||||||
|
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of the copyright holder nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
23
vendor/github.com/templexxx/cpu/README.md
generated
vendored
Normal file
23
vendor/github.com/templexxx/cpu/README.md
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# cpu
|
||||||
|
internal/cpu(in Go standard lib) with these detections:
|
||||||
|
|
||||||
|
>- AVX512
|
||||||
|
>
|
||||||
|
>- Cache Size
|
||||||
|
>
|
||||||
|
>- Invariant TSC
|
||||||
|
>
|
||||||
|
|
||||||
|
It also provides:
|
||||||
|
|
||||||
|
>- False sharing range, see `X86FalseSharingRange` for X86 platform.
|
||||||
|
>
|
||||||
|
>- TSC frequency
|
||||||
|
>
|
||||||
|
>- Name
|
||||||
|
>
|
||||||
|
>- Family & Model
|
||||||
|
|
||||||
|
# Acknowledgement
|
||||||
|
|
||||||
|
[klauspost/cpuid](https://github.com/klauspost/cpuid)
|
235
vendor/github.com/templexxx/cpu/cpu.go
generated
vendored
Normal file
235
vendor/github.com/templexxx/cpu/cpu.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package cpu implements processor feature detection
|
||||||
|
// used by the Go standard library.
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
// debugOptions is set to true by the runtime if go was compiled with GOEXPERIMENT=debugcpu
|
||||||
|
// and GOOS is Linux or Darwin. This variable is linknamed in runtime/proc.go.
|
||||||
|
var debugOptions bool
|
||||||
|
|
||||||
|
var X86 x86
|
||||||
|
|
||||||
|
// "Loads data or instructions from memory to the second-level cache.
|
||||||
|
// To use the streamer, organize the data or instructions in blocks of 128 bytes,
|
||||||
|
// aligned on 128 bytes."
|
||||||
|
// From <Intel® 64 and IA-32 architectures optimization reference manual>,
|
||||||
|
// in section 3.7.3 "Hardware Prefetching for Second-Level Cache"
|
||||||
|
//
|
||||||
|
// In practice, I have found use 128bytes can gain better performance than 64bytes (one cache line).
|
||||||
|
const X86FalseSharingRange = 128
|
||||||
|
|
||||||
|
// The booleans in x86 contain the correspondingly named cpuid feature bit.
|
||||||
|
// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers
|
||||||
|
// in addition to the cpuid feature bit being set.
|
||||||
|
// The struct is padded to avoid false sharing.
|
||||||
|
type x86 struct {
|
||||||
|
_ [X86FalseSharingRange]byte
|
||||||
|
HasCMPXCHG16B bool
|
||||||
|
HasAES bool
|
||||||
|
HasADX bool
|
||||||
|
HasAVX bool
|
||||||
|
HasAVX2 bool
|
||||||
|
HasAVX512F bool
|
||||||
|
HasAVX512DQ bool
|
||||||
|
HasAVX512BW bool
|
||||||
|
HasAVX512VL bool
|
||||||
|
HasBMI1 bool
|
||||||
|
HasBMI2 bool
|
||||||
|
HasERMS bool
|
||||||
|
HasFMA bool
|
||||||
|
HasOSXSAVE bool
|
||||||
|
HasPCLMULQDQ bool
|
||||||
|
HasPOPCNT bool
|
||||||
|
HasSSE2 bool
|
||||||
|
HasSSE3 bool
|
||||||
|
HasSSSE3 bool
|
||||||
|
HasSSE41 bool
|
||||||
|
HasSSE42 bool
|
||||||
|
// The invariant TSC will run at a constant rate in all ACPI P-, C-, and T-states.
|
||||||
|
// This is the architectural behavior moving forward. On processors with
|
||||||
|
// invariant TSC support, the OS may use the TSC for wall clock timer services (instead of ACPI or HPET timers).
|
||||||
|
HasInvariantTSC bool
|
||||||
|
|
||||||
|
Cache Cache
|
||||||
|
|
||||||
|
// TSCFrequency only meaningful when HasInvariantTSC == true.
|
||||||
|
// Unit: Hz.
|
||||||
|
//
|
||||||
|
// Warn:
|
||||||
|
// 1. If it's 0, means failed to get it from frequency table provided by Intel manual.
|
||||||
|
TSCFrequency uint64
|
||||||
|
|
||||||
|
Name string
|
||||||
|
Signature string // DisplayFamily_DisplayModel.
|
||||||
|
Family uint32 // CPU family number.
|
||||||
|
Model uint32 // CPU model number.
|
||||||
|
SteppingID uint32
|
||||||
|
|
||||||
|
_ [X86FalseSharingRange]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU Cache Size.
|
||||||
|
// -1 if undetected.
|
||||||
|
type Cache struct {
|
||||||
|
L1I int
|
||||||
|
L1D int
|
||||||
|
L2 int
|
||||||
|
L3 int
|
||||||
|
}
|
||||||
|
|
||||||
|
var PPC64 ppc64
|
||||||
|
|
||||||
|
// For ppc64x, it is safe to check only for ISA level starting on ISA v3.00,
|
||||||
|
// since there are no optional categories. There are some exceptions that also
|
||||||
|
// require kernel support to work (darn, scv), so there are feature bits for
|
||||||
|
// those as well. The minimum processor requirement is POWER8 (ISA 2.07), so we
|
||||||
|
// maintain some of the old feature checks for optional categories for
|
||||||
|
// safety.
|
||||||
|
// The struct is padded to avoid false sharing.
|
||||||
|
type ppc64 struct {
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
HasVMX bool // Vector unit (Altivec)
|
||||||
|
HasDFP bool // Decimal Floating Point unit
|
||||||
|
HasVSX bool // Vector-scalar unit
|
||||||
|
HasHTM bool // Hardware Transactional Memory
|
||||||
|
HasISEL bool // Integer select
|
||||||
|
HasVCRYPTO bool // Vector cryptography
|
||||||
|
HasHTMNOSC bool // HTM: kernel-aborted transaction in syscalls
|
||||||
|
HasDARN bool // Hardware random number generator (requires kernel enablement)
|
||||||
|
HasSCV bool // Syscall vectored (requires kernel enablement)
|
||||||
|
IsPOWER8 bool // ISA v2.07 (POWER8)
|
||||||
|
IsPOWER9 bool // ISA v3.00 (POWER9)
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var ARM64 arm64
|
||||||
|
|
||||||
|
// The booleans in arm64 contain the correspondingly named cpu feature bit.
|
||||||
|
// The struct is padded to avoid false sharing.
|
||||||
|
type arm64 struct {
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
HasFP bool
|
||||||
|
HasASIMD bool
|
||||||
|
HasEVTSTRM bool
|
||||||
|
HasAES bool
|
||||||
|
HasPMULL bool
|
||||||
|
HasSHA1 bool
|
||||||
|
HasSHA2 bool
|
||||||
|
HasCRC32 bool
|
||||||
|
HasATOMICS bool
|
||||||
|
HasFPHP bool
|
||||||
|
HasASIMDHP bool
|
||||||
|
HasCPUID bool
|
||||||
|
HasASIMDRDM bool
|
||||||
|
HasJSCVT bool
|
||||||
|
HasFCMA bool
|
||||||
|
HasLRCPC bool
|
||||||
|
HasDCPOP bool
|
||||||
|
HasSHA3 bool
|
||||||
|
HasSM3 bool
|
||||||
|
HasSM4 bool
|
||||||
|
HasASIMDDP bool
|
||||||
|
HasSHA512 bool
|
||||||
|
HasSVE bool
|
||||||
|
HasASIMDFHM bool
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var S390X s390x
|
||||||
|
|
||||||
|
type s390x struct {
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
HasZArch bool // z architecture mode is active [mandatory]
|
||||||
|
HasSTFLE bool // store facility list extended [mandatory]
|
||||||
|
HasLDisp bool // long (20-bit) displacements [mandatory]
|
||||||
|
HasEImm bool // 32-bit immediates [mandatory]
|
||||||
|
HasDFP bool // decimal floating point
|
||||||
|
HasETF3Enhanced bool // ETF-3 enhanced
|
||||||
|
HasMSA bool // message security assist (CPACF)
|
||||||
|
HasAES bool // KM-AES{128,192,256} functions
|
||||||
|
HasAESCBC bool // KMC-AES{128,192,256} functions
|
||||||
|
HasAESCTR bool // KMCTR-AES{128,192,256} functions
|
||||||
|
HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
|
||||||
|
HasGHASH bool // KIMD-GHASH function
|
||||||
|
HasSHA1 bool // K{I,L}MD-SHA-1 functions
|
||||||
|
HasSHA256 bool // K{I,L}MD-SHA-256 functions
|
||||||
|
HasSHA512 bool // K{I,L}MD-SHA-512 functions
|
||||||
|
HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records.
|
||||||
|
_ [CacheLineSize]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize examines the processor and sets the relevant variables above.
|
||||||
|
// This is called by the runtime package early in program initialization,
|
||||||
|
// before normal init functions are run. env is set by runtime on Linux and Darwin
|
||||||
|
// if go was compiled with GOEXPERIMENT=debugcpu.
|
||||||
|
func init() {
|
||||||
|
doinit()
|
||||||
|
processOptions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// options contains the cpu debug options that can be used in GODEBUGCPU.
|
||||||
|
// Options are arch dependent and are added by the arch specific doinit functions.
|
||||||
|
// Features that are mandatory for the specific GOARCH should not be added to options
|
||||||
|
// (e.g. SSE2 on amd64).
|
||||||
|
var options []option
|
||||||
|
|
||||||
|
// Option names should be lower case. e.g. avx instead of AVX.
|
||||||
|
type option struct {
|
||||||
|
Name string
|
||||||
|
Feature *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// processOptions disables CPU feature values based on the parsed env string.
|
||||||
|
// The env string is expected to be of the form feature1=0,feature2=0...
|
||||||
|
// where feature names is one of the architecture specifc list stored in the
|
||||||
|
// cpu packages options variable. If env contains all=0 then all capabilities
|
||||||
|
// referenced through the options variable are disabled. Other feature
|
||||||
|
// names and values other than 0 are silently ignored.
|
||||||
|
func processOptions(env string) {
|
||||||
|
field:
|
||||||
|
for env != "" {
|
||||||
|
field := ""
|
||||||
|
i := indexByte(env, ',')
|
||||||
|
if i < 0 {
|
||||||
|
field, env = env, ""
|
||||||
|
} else {
|
||||||
|
field, env = env[:i], env[i+1:]
|
||||||
|
}
|
||||||
|
i = indexByte(field, '=')
|
||||||
|
if i < 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, value := field[:i], field[i+1:]
|
||||||
|
|
||||||
|
// Only allow turning off CPU features by specifying '0'.
|
||||||
|
if value == "0" {
|
||||||
|
if key == "all" {
|
||||||
|
for _, v := range options {
|
||||||
|
*v.Feature = false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
for _, v := range options {
|
||||||
|
if v.Name == key {
|
||||||
|
*v.Feature = false
|
||||||
|
continue field
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexByte returns the index of the first instance of c in s,
|
||||||
|
// or -1 if c is not present in s.
|
||||||
|
func indexByte(s string, c byte) int {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] == c {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
7
vendor/github.com/templexxx/cpu/cpu_386.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_386.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const GOARCH = "386"
|
7
vendor/github.com/templexxx/cpu/cpu_amd64.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_amd64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const GOARCH = "amd64"
|
7
vendor/github.com/templexxx/cpu/cpu_amd64p32.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_amd64p32.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const GOARCH = "amd64p32"
|
7
vendor/github.com/templexxx/cpu/cpu_arm.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_arm.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
102
vendor/github.com/templexxx/cpu/cpu_arm64.go
generated
vendored
Normal file
102
vendor/github.com/templexxx/cpu/cpu_arm64.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 64
|
||||||
|
|
||||||
|
// arm64 doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2.
|
||||||
|
// These are linknamed in runtime/os_linux_arm64.go and are initialized by
|
||||||
|
// archauxv().
|
||||||
|
var hwcap uint
|
||||||
|
var hwcap2 uint
|
||||||
|
|
||||||
|
// HWCAP/HWCAP2 bits. These are exposed by Linux.
|
||||||
|
const (
|
||||||
|
hwcap_FP = (1 << 0)
|
||||||
|
hwcap_ASIMD = (1 << 1)
|
||||||
|
hwcap_EVTSTRM = (1 << 2)
|
||||||
|
hwcap_AES = (1 << 3)
|
||||||
|
hwcap_PMULL = (1 << 4)
|
||||||
|
hwcap_SHA1 = (1 << 5)
|
||||||
|
hwcap_SHA2 = (1 << 6)
|
||||||
|
hwcap_CRC32 = (1 << 7)
|
||||||
|
hwcap_ATOMICS = (1 << 8)
|
||||||
|
hwcap_FPHP = (1 << 9)
|
||||||
|
hwcap_ASIMDHP = (1 << 10)
|
||||||
|
hwcap_CPUID = (1 << 11)
|
||||||
|
hwcap_ASIMDRDM = (1 << 12)
|
||||||
|
hwcap_JSCVT = (1 << 13)
|
||||||
|
hwcap_FCMA = (1 << 14)
|
||||||
|
hwcap_LRCPC = (1 << 15)
|
||||||
|
hwcap_DCPOP = (1 << 16)
|
||||||
|
hwcap_SHA3 = (1 << 17)
|
||||||
|
hwcap_SM3 = (1 << 18)
|
||||||
|
hwcap_SM4 = (1 << 19)
|
||||||
|
hwcap_ASIMDDP = (1 << 20)
|
||||||
|
hwcap_SHA512 = (1 << 21)
|
||||||
|
hwcap_SVE = (1 << 22)
|
||||||
|
hwcap_ASIMDFHM = (1 << 23)
|
||||||
|
)
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
options = []option{
|
||||||
|
{"evtstrm", &ARM64.HasEVTSTRM},
|
||||||
|
{"aes", &ARM64.HasAES},
|
||||||
|
{"pmull", &ARM64.HasPMULL},
|
||||||
|
{"sha1", &ARM64.HasSHA1},
|
||||||
|
{"sha2", &ARM64.HasSHA2},
|
||||||
|
{"crc32", &ARM64.HasCRC32},
|
||||||
|
{"atomics", &ARM64.HasATOMICS},
|
||||||
|
{"fphp", &ARM64.HasFPHP},
|
||||||
|
{"asimdhp", &ARM64.HasASIMDHP},
|
||||||
|
{"cpuid", &ARM64.HasCPUID},
|
||||||
|
{"asimdrdm", &ARM64.HasASIMDRDM},
|
||||||
|
{"jscvt", &ARM64.HasJSCVT},
|
||||||
|
{"fcma", &ARM64.HasFCMA},
|
||||||
|
{"lrcpc", &ARM64.HasLRCPC},
|
||||||
|
{"dcpop", &ARM64.HasDCPOP},
|
||||||
|
{"sha3", &ARM64.HasSHA3},
|
||||||
|
{"sm3", &ARM64.HasSM3},
|
||||||
|
{"sm4", &ARM64.HasSM4},
|
||||||
|
{"asimddp", &ARM64.HasASIMDDP},
|
||||||
|
{"sha512", &ARM64.HasSHA512},
|
||||||
|
{"sve", &ARM64.HasSVE},
|
||||||
|
{"asimdfhm", &ARM64.HasASIMDFHM},
|
||||||
|
|
||||||
|
// These capabilities should always be enabled on arm64:
|
||||||
|
// {"fp", &ARM64.HasFP},
|
||||||
|
// {"asimd", &ARM64.HasASIMD},
|
||||||
|
}
|
||||||
|
|
||||||
|
// HWCAP feature bits
|
||||||
|
ARM64.HasFP = isSet(hwcap, hwcap_FP)
|
||||||
|
ARM64.HasASIMD = isSet(hwcap, hwcap_ASIMD)
|
||||||
|
ARM64.HasEVTSTRM = isSet(hwcap, hwcap_EVTSTRM)
|
||||||
|
ARM64.HasAES = isSet(hwcap, hwcap_AES)
|
||||||
|
ARM64.HasPMULL = isSet(hwcap, hwcap_PMULL)
|
||||||
|
ARM64.HasSHA1 = isSet(hwcap, hwcap_SHA1)
|
||||||
|
ARM64.HasSHA2 = isSet(hwcap, hwcap_SHA2)
|
||||||
|
ARM64.HasCRC32 = isSet(hwcap, hwcap_CRC32)
|
||||||
|
ARM64.HasATOMICS = isSet(hwcap, hwcap_ATOMICS)
|
||||||
|
ARM64.HasFPHP = isSet(hwcap, hwcap_FPHP)
|
||||||
|
ARM64.HasASIMDHP = isSet(hwcap, hwcap_ASIMDHP)
|
||||||
|
ARM64.HasCPUID = isSet(hwcap, hwcap_CPUID)
|
||||||
|
ARM64.HasASIMDRDM = isSet(hwcap, hwcap_ASIMDRDM)
|
||||||
|
ARM64.HasJSCVT = isSet(hwcap, hwcap_JSCVT)
|
||||||
|
ARM64.HasFCMA = isSet(hwcap, hwcap_FCMA)
|
||||||
|
ARM64.HasLRCPC = isSet(hwcap, hwcap_LRCPC)
|
||||||
|
ARM64.HasDCPOP = isSet(hwcap, hwcap_DCPOP)
|
||||||
|
ARM64.HasSHA3 = isSet(hwcap, hwcap_SHA3)
|
||||||
|
ARM64.HasSM3 = isSet(hwcap, hwcap_SM3)
|
||||||
|
ARM64.HasSM4 = isSet(hwcap, hwcap_SM4)
|
||||||
|
ARM64.HasASIMDDP = isSet(hwcap, hwcap_ASIMDDP)
|
||||||
|
ARM64.HasSHA512 = isSet(hwcap, hwcap_SHA512)
|
||||||
|
ARM64.HasSVE = isSet(hwcap, hwcap_SVE)
|
||||||
|
ARM64.HasASIMDFHM = isSet(hwcap, hwcap_ASIMDFHM)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSet(hwc uint, value uint) bool {
|
||||||
|
return hwc&value != 0
|
||||||
|
}
|
7
vendor/github.com/templexxx/cpu/cpu_mips.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_mips.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
7
vendor/github.com/templexxx/cpu/cpu_mips64.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_mips64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
7
vendor/github.com/templexxx/cpu/cpu_mips64le.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_mips64le.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
7
vendor/github.com/templexxx/cpu/cpu_mipsle.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_mipsle.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
16
vendor/github.com/templexxx/cpu/cpu_no_init.go
generated
vendored
Normal file
16
vendor/github.com/templexxx/cpu/cpu_no_init.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !386
|
||||||
|
// +build !amd64
|
||||||
|
// +build !amd64p32
|
||||||
|
// +build !arm64
|
||||||
|
// +build !ppc64
|
||||||
|
// +build !ppc64le
|
||||||
|
// +build !s390x
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
}
|
68
vendor/github.com/templexxx/cpu/cpu_ppc64x.go
generated
vendored
Normal file
68
vendor/github.com/templexxx/cpu/cpu_ppc64x.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build ppc64 ppc64le
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 128
|
||||||
|
|
||||||
|
// ppc64x doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2.
|
||||||
|
// These are linknamed in runtime/os_linux_ppc64x.go and are initialized by
|
||||||
|
// archauxv().
|
||||||
|
var hwcap uint
|
||||||
|
var hwcap2 uint
|
||||||
|
|
||||||
|
// HWCAP/HWCAP2 bits. These are exposed by the kernel.
|
||||||
|
const (
|
||||||
|
// ISA Level
|
||||||
|
_PPC_FEATURE2_ARCH_2_07 = 0x80000000
|
||||||
|
_PPC_FEATURE2_ARCH_3_00 = 0x00800000
|
||||||
|
|
||||||
|
// CPU features
|
||||||
|
_PPC_FEATURE_HAS_ALTIVEC = 0x10000000
|
||||||
|
_PPC_FEATURE_HAS_DFP = 0x00000400
|
||||||
|
_PPC_FEATURE_HAS_VSX = 0x00000080
|
||||||
|
_PPC_FEATURE2_HAS_HTM = 0x40000000
|
||||||
|
_PPC_FEATURE2_HAS_ISEL = 0x08000000
|
||||||
|
_PPC_FEATURE2_HAS_VEC_CRYPTO = 0x02000000
|
||||||
|
_PPC_FEATURE2_HTM_NOSC = 0x01000000
|
||||||
|
_PPC_FEATURE2_DARN = 0x00200000
|
||||||
|
_PPC_FEATURE2_SCV = 0x00100000
|
||||||
|
)
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
options = []option{
|
||||||
|
{"htm", &PPC64.HasHTM},
|
||||||
|
{"htmnosc", &PPC64.HasHTMNOSC},
|
||||||
|
{"darn", &PPC64.HasDARN},
|
||||||
|
{"scv", &PPC64.HasSCV},
|
||||||
|
|
||||||
|
// These capabilities should always be enabled on ppc64 and ppc64le:
|
||||||
|
// {"vmx", &PPC64.HasVMX},
|
||||||
|
// {"dfp", &PPC64.HasDFP},
|
||||||
|
// {"vsx", &PPC64.HasVSX},
|
||||||
|
// {"isel", &PPC64.HasISEL},
|
||||||
|
// {"vcrypto", &PPC64.HasVCRYPTO},
|
||||||
|
}
|
||||||
|
|
||||||
|
// HWCAP feature bits
|
||||||
|
PPC64.HasVMX = isSet(hwcap, _PPC_FEATURE_HAS_ALTIVEC)
|
||||||
|
PPC64.HasDFP = isSet(hwcap, _PPC_FEATURE_HAS_DFP)
|
||||||
|
PPC64.HasVSX = isSet(hwcap, _PPC_FEATURE_HAS_VSX)
|
||||||
|
|
||||||
|
// HWCAP2 feature bits
|
||||||
|
PPC64.IsPOWER8 = isSet(hwcap2, _PPC_FEATURE2_ARCH_2_07)
|
||||||
|
PPC64.HasHTM = isSet(hwcap2, _PPC_FEATURE2_HAS_HTM)
|
||||||
|
PPC64.HasISEL = isSet(hwcap2, _PPC_FEATURE2_HAS_ISEL)
|
||||||
|
PPC64.HasVCRYPTO = isSet(hwcap2, _PPC_FEATURE2_HAS_VEC_CRYPTO)
|
||||||
|
PPC64.HasHTMNOSC = isSet(hwcap2, _PPC_FEATURE2_HTM_NOSC)
|
||||||
|
PPC64.IsPOWER9 = isSet(hwcap2, _PPC_FEATURE2_ARCH_3_00)
|
||||||
|
PPC64.HasDARN = isSet(hwcap2, _PPC_FEATURE2_DARN)
|
||||||
|
PPC64.HasSCV = isSet(hwcap2, _PPC_FEATURE2_SCV)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSet(hwc uint, value uint) bool {
|
||||||
|
return hwc&value != 0
|
||||||
|
}
|
10
vendor/github.com/templexxx/cpu/cpu_riscv64.go
generated
vendored
Normal file
10
vendor/github.com/templexxx/cpu/cpu_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// Copyright 2019 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 32
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
}
|
153
vendor/github.com/templexxx/cpu/cpu_s390x.go
generated
vendored
Normal file
153
vendor/github.com/templexxx/cpu/cpu_s390x.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 256
|
||||||
|
|
||||||
|
// bitIsSet reports whether the bit at index is set. The bit index
|
||||||
|
// is in big endian order, so bit index 0 is the leftmost bit.
|
||||||
|
func bitIsSet(bits []uint64, index uint) bool {
|
||||||
|
return bits[index/64]&((1<<63)>>(index%64)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// function is the function code for the named function.
|
||||||
|
type function uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// KM{,A,C,CTR} function codes
|
||||||
|
aes128 function = 18 // AES-128
|
||||||
|
aes192 = 19 // AES-192
|
||||||
|
aes256 = 20 // AES-256
|
||||||
|
|
||||||
|
// K{I,L}MD function codes
|
||||||
|
sha1 = 1 // SHA-1
|
||||||
|
sha256 = 2 // SHA-256
|
||||||
|
sha512 = 3 // SHA-512
|
||||||
|
|
||||||
|
// KLMD function codes
|
||||||
|
ghash = 65 // GHASH
|
||||||
|
)
|
||||||
|
|
||||||
|
// queryResult contains the result of a Query function
|
||||||
|
// call. Bits are numbered in big endian order so the
|
||||||
|
// leftmost bit (the MSB) is at index 0.
|
||||||
|
type queryResult struct {
|
||||||
|
bits [2]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has reports whether the given functions are present.
|
||||||
|
func (q *queryResult) Has(fns ...function) bool {
|
||||||
|
if len(fns) == 0 {
|
||||||
|
panic("no function codes provided")
|
||||||
|
}
|
||||||
|
for _, f := range fns {
|
||||||
|
if !bitIsSet(q.bits[:], uint(f)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// facility is a bit index for the named facility.
|
||||||
|
type facility uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// mandatory facilities
|
||||||
|
zarch facility = 1 // z architecture mode is active
|
||||||
|
stflef = 7 // store-facility-list-extended
|
||||||
|
ldisp = 18 // long-displacement
|
||||||
|
eimm = 21 // extended-immediate
|
||||||
|
|
||||||
|
// miscellaneous facilities
|
||||||
|
dfp = 42 // decimal-floating-point
|
||||||
|
etf3eh = 30 // extended-translation 3 enhancement
|
||||||
|
|
||||||
|
// cryptography facilities
|
||||||
|
msa = 17 // message-security-assist
|
||||||
|
msa3 = 76 // message-security-assist extension 3
|
||||||
|
msa4 = 77 // message-security-assist extension 4
|
||||||
|
msa5 = 57 // message-security-assist extension 5
|
||||||
|
msa8 = 146 // message-security-assist extension 8
|
||||||
|
|
||||||
|
// Note: vx and highgprs are excluded because they require
|
||||||
|
// kernel support and so must be fetched from HWCAP.
|
||||||
|
)
|
||||||
|
|
||||||
|
// facilityList contains the result of an STFLE call.
|
||||||
|
// Bits are numbered in big endian order so the
|
||||||
|
// leftmost bit (the MSB) is at index 0.
|
||||||
|
type facilityList struct {
|
||||||
|
bits [4]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has reports whether the given facilities are present.
|
||||||
|
func (s *facilityList) Has(fs ...facility) bool {
|
||||||
|
if len(fs) == 0 {
|
||||||
|
panic("no facility bits provided")
|
||||||
|
}
|
||||||
|
for _, f := range fs {
|
||||||
|
if !bitIsSet(s.bits[:], uint(f)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following feature detection functions are defined in cpu_s390x.s.
|
||||||
|
// They are likely to be expensive to call so the results should be cached.
|
||||||
|
func stfle() facilityList
|
||||||
|
func kmQuery() queryResult
|
||||||
|
func kmcQuery() queryResult
|
||||||
|
func kmctrQuery() queryResult
|
||||||
|
func kmaQuery() queryResult
|
||||||
|
func kimdQuery() queryResult
|
||||||
|
func klmdQuery() queryResult
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
options = []option{
|
||||||
|
{"zarch", &S390X.HasZArch},
|
||||||
|
{"stfle", &S390X.HasSTFLE},
|
||||||
|
{"ldisp", &S390X.HasLDisp},
|
||||||
|
{"msa", &S390X.HasMSA},
|
||||||
|
{"eimm", &S390X.HasEImm},
|
||||||
|
{"dfp", &S390X.HasDFP},
|
||||||
|
{"etf3eh", &S390X.HasETF3Enhanced},
|
||||||
|
{"vx", &S390X.HasVX},
|
||||||
|
}
|
||||||
|
|
||||||
|
aes := []function{aes128, aes192, aes256}
|
||||||
|
facilities := stfle()
|
||||||
|
|
||||||
|
S390X.HasZArch = facilities.Has(zarch)
|
||||||
|
S390X.HasSTFLE = facilities.Has(stflef)
|
||||||
|
S390X.HasLDisp = facilities.Has(ldisp)
|
||||||
|
S390X.HasEImm = facilities.Has(eimm)
|
||||||
|
S390X.HasDFP = facilities.Has(dfp)
|
||||||
|
S390X.HasETF3Enhanced = facilities.Has(etf3eh)
|
||||||
|
S390X.HasMSA = facilities.Has(msa)
|
||||||
|
|
||||||
|
if S390X.HasMSA {
|
||||||
|
// cipher message
|
||||||
|
km, kmc := kmQuery(), kmcQuery()
|
||||||
|
S390X.HasAES = km.Has(aes...)
|
||||||
|
S390X.HasAESCBC = kmc.Has(aes...)
|
||||||
|
if facilities.Has(msa4) {
|
||||||
|
kmctr := kmctrQuery()
|
||||||
|
S390X.HasAESCTR = kmctr.Has(aes...)
|
||||||
|
}
|
||||||
|
if facilities.Has(msa8) {
|
||||||
|
kma := kmaQuery()
|
||||||
|
S390X.HasAESGCM = kma.Has(aes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute message digest
|
||||||
|
kimd := kimdQuery() // intermediate (no padding)
|
||||||
|
klmd := klmdQuery() // last (padding)
|
||||||
|
S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
|
||||||
|
S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
|
||||||
|
S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
|
||||||
|
S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
|
||||||
|
}
|
||||||
|
}
|
55
vendor/github.com/templexxx/cpu/cpu_s390x.s
generated
vendored
Normal file
55
vendor/github.com/templexxx/cpu/cpu_s390x.s
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func stfle() facilityList
|
||||||
|
TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
|
MOVD $ret+0(FP), R1
|
||||||
|
MOVD $3, R0 // last doubleword index to store
|
||||||
|
XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
|
||||||
|
WORD $0xb2b01000 // store facility list extended (STFLE)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func kmQuery() queryResult
|
||||||
|
TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KM-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xB92E0024 // cipher message (KM)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func kmcQuery() queryResult
|
||||||
|
TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KMC-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xB92F0024 // cipher message with chaining (KMC)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func kmctrQuery() queryResult
|
||||||
|
TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KMCTR-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xB92D4024 // cipher message with counter (KMCTR)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func kmaQuery() queryResult
|
||||||
|
TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KMA-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xb9296024 // cipher message with authentication (KMA)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func kimdQuery() queryResult
|
||||||
|
TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KIMD-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xB93E0024 // compute intermediate message digest (KIMD)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func klmdQuery() queryResult
|
||||||
|
TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||||
|
MOVD $0, R0 // set function code to 0 (KLMD-Query)
|
||||||
|
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||||
|
WORD $0xB93F0024 // compute last message digest (KLMD)
|
||||||
|
RET
|
7
vendor/github.com/templexxx/cpu/cpu_wasm.go
generated
vendored
Normal file
7
vendor/github.com/templexxx/cpu/cpu_wasm.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
const CacheLineSize = 64
|
433
vendor/github.com/templexxx/cpu/cpu_x86.go
generated
vendored
Normal file
433
vendor/github.com/templexxx/cpu/cpu_x86.go
generated
vendored
Normal file
@ -0,0 +1,433 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build 386 amd64 amd64p32
|
||||||
|
|
||||||
|
package cpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const CacheLineSize = 64
|
||||||
|
|
||||||
|
// cpuid is implemented in cpu_x86.s.
|
||||||
|
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
|
||||||
|
// xgetbv with ecx = 0 is implemented in cpu_x86.s.
|
||||||
|
func xgetbv() (eax, edx uint32)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// edx bits
|
||||||
|
cpuid_SSE2 = 1 << 26
|
||||||
|
|
||||||
|
// ecx bits
|
||||||
|
cpuid_SSE3 = 1 << 0
|
||||||
|
cpuid_PCLMULQDQ = 1 << 1
|
||||||
|
cpuid_SSSE3 = 1 << 9
|
||||||
|
cpuid_FMA = 1 << 12
|
||||||
|
cpuid_SSE41 = 1 << 19
|
||||||
|
cpuid_SSE42 = 1 << 20
|
||||||
|
cpuid_POPCNT = 1 << 23
|
||||||
|
cpuid_AES = 1 << 25
|
||||||
|
cpuid_OSXSAVE = 1 << 27
|
||||||
|
cpuid_AVX = 1 << 28
|
||||||
|
cpuid_CMPXCHG16B = 1 << 13
|
||||||
|
|
||||||
|
// ebx bits
|
||||||
|
cpuid_BMI1 = 1 << 3
|
||||||
|
cpuid_AVX2 = 1 << 5
|
||||||
|
cpuid_BMI2 = 1 << 8
|
||||||
|
cpuid_ERMS = 1 << 9
|
||||||
|
cpuid_ADX = 1 << 19
|
||||||
|
cpuid_AVX512F = 1 << 16
|
||||||
|
cpuid_AVX512DQ = 1 << 17
|
||||||
|
cpuid_AVX512BW = 1 << 30
|
||||||
|
cpuid_AVX512VL = 1 << 31
|
||||||
|
|
||||||
|
// edx bits
|
||||||
|
cpuid_Invariant_TSC = 1 << 8
|
||||||
|
)
|
||||||
|
|
||||||
|
func doinit() {
|
||||||
|
options = []option{
|
||||||
|
{"adx", &X86.HasADX},
|
||||||
|
{"aes", &X86.HasAES},
|
||||||
|
{"avx", &X86.HasAVX},
|
||||||
|
{"avx2", &X86.HasAVX2},
|
||||||
|
{"bmi1", &X86.HasBMI1},
|
||||||
|
{"bmi2", &X86.HasBMI2},
|
||||||
|
{"erms", &X86.HasERMS},
|
||||||
|
{"fma", &X86.HasFMA},
|
||||||
|
{"pclmulqdq", &X86.HasPCLMULQDQ},
|
||||||
|
{"popcnt", &X86.HasPOPCNT},
|
||||||
|
{"sse3", &X86.HasSSE3},
|
||||||
|
{"sse41", &X86.HasSSE41},
|
||||||
|
{"sse42", &X86.HasSSE42},
|
||||||
|
{"ssse3", &X86.HasSSSE3},
|
||||||
|
{"avx512f", &X86.HasAVX512F},
|
||||||
|
{"avx512dq", &X86.HasAVX512DQ},
|
||||||
|
{"avx512bw", &X86.HasAVX512BW},
|
||||||
|
{"avx512vl", &X86.HasAVX512VL},
|
||||||
|
{"invariant_tsc", &X86.HasInvariantTSC},
|
||||||
|
|
||||||
|
// sse2 set as last element so it can easily be removed again. See code below.
|
||||||
|
{"sse2", &X86.HasSSE2},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove sse2 from options on amd64(p32) because SSE2 is a mandatory feature for these GOARCHs.
|
||||||
|
if GOARCH == "amd64" || GOARCH == "amd64p32" {
|
||||||
|
options = options[:len(options)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
maxID, _, _, _ := cpuid(0, 0)
|
||||||
|
|
||||||
|
if maxID < 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, ecx1, edx1 := cpuid(1, 0)
|
||||||
|
X86.HasSSE2 = isSet(edx1, cpuid_SSE2)
|
||||||
|
|
||||||
|
X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
|
||||||
|
X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
|
||||||
|
X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
|
||||||
|
X86.HasFMA = isSet(ecx1, cpuid_FMA)
|
||||||
|
X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
|
||||||
|
X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
|
||||||
|
X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
|
||||||
|
X86.HasAES = isSet(ecx1, cpuid_AES)
|
||||||
|
X86.HasCMPXCHG16B = isSet(ecx1, cpuid_CMPXCHG16B)
|
||||||
|
X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
|
||||||
|
|
||||||
|
osSupportsAVX := false
|
||||||
|
osSupportsAVX512 := false
|
||||||
|
// For XGETBV, OSXSAVE bit is required and sufficient.
|
||||||
|
if X86.HasOSXSAVE {
|
||||||
|
eax, _ := xgetbv()
|
||||||
|
// Check if XMM and YMM registers have OS support.
|
||||||
|
osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
|
||||||
|
// Check is ZMM registers have OS support.
|
||||||
|
osSupportsAVX512 = isSet(eax>>5, 7) && isSet(eax>>1, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
|
||||||
|
|
||||||
|
if maxID < 7 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ebx7, _, _ := cpuid(7, 0)
|
||||||
|
X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
|
||||||
|
X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
|
||||||
|
X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512
|
||||||
|
X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) && osSupportsAVX512
|
||||||
|
X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) && osSupportsAVX512
|
||||||
|
X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) && osSupportsAVX512
|
||||||
|
X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
|
||||||
|
X86.HasERMS = isSet(ebx7, cpuid_ERMS)
|
||||||
|
X86.HasADX = isSet(ebx7, cpuid_ADX)
|
||||||
|
|
||||||
|
X86.Cache = getCacheSize()
|
||||||
|
|
||||||
|
X86.HasInvariantTSC = hasInvariantTSC()
|
||||||
|
|
||||||
|
X86.Family, X86.Model, X86.SteppingID = getVersionInfo()
|
||||||
|
|
||||||
|
X86.Signature = makeSignature(X86.Family, X86.Model)
|
||||||
|
|
||||||
|
X86.Name = getName()
|
||||||
|
|
||||||
|
X86.TSCFrequency = getNativeTSCFrequency(X86.Name, X86.Signature, X86.SteppingID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSet(hwc uint32, value uint32) bool {
|
||||||
|
return hwc&value != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasInvariantTSC() bool {
|
||||||
|
if maxExtendedFunction() < 0x80000007 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, _, _, edx := cpuid(0x80000007, 0)
|
||||||
|
return isSet(edx, cpuid_Invariant_TSC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getName() string {
|
||||||
|
if maxExtendedFunction() >= 0x80000004 {
|
||||||
|
v := make([]uint32, 0, 48)
|
||||||
|
for i := uint32(0); i < 3; i++ {
|
||||||
|
a, b, c, d := cpuid(0x80000002+i, 0)
|
||||||
|
v = append(v, a, b, c, d)
|
||||||
|
}
|
||||||
|
return strings.Trim(string(valAsString(v...)), " ")
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNativeTSCFrequency gets TSC frequency from CPUID,
|
||||||
|
// only supports Intel (Skylake or later microarchitecture) & key information is from Intel manual & kernel codes
|
||||||
|
// (especially this commit: https://github.com/torvalds/linux/commit/604dc9170f2435d27da5039a3efd757dceadc684).
|
||||||
|
func getNativeTSCFrequency(name, sign string, steppingID uint32) uint64 {
|
||||||
|
|
||||||
|
if vendorID() != Intel {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxFunctionID() < 0x15 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApolloLake, GeminiLake, CannonLake (and presumably all new chipsets
|
||||||
|
// from this point) report the crystal frequency directly via CPUID.0x15.
|
||||||
|
// That's definitive data that we can rely upon.
|
||||||
|
eax, ebx, ecx, _ := cpuid(0x15, 0)
|
||||||
|
|
||||||
|
// If ebx is 0, the TSC/”core crystal clock” ratio is not enumerated.
|
||||||
|
// We won't provide TSC frequency detection in this situation.
|
||||||
|
if eax == 0 || ebx == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skylake, Kabylake and all variants of those two chipsets report a
|
||||||
|
// crystal frequency of zero.
|
||||||
|
if ecx == 0 { // Crystal clock frequency is not enumerated.
|
||||||
|
ecx = getCrystalClockFrequency(sign, steppingID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TSC frequency = “core crystal clock frequency” * EBX/EAX.
|
||||||
|
return uint64(ecx) * (uint64(ebx) / uint64(eax))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copied from: CPUID Signature values of DisplayFamily and DisplayModel,
|
||||||
|
// in Intel® 64 and IA-32 Architectures Software Developer’s Manual
|
||||||
|
// Volume 4: Model-Specific Registers
|
||||||
|
// & https://github.com/torvalds/linux/blob/master/arch/x86/include/asm/intel-family.h
|
||||||
|
const (
|
||||||
|
IntelFam6SkylakeL = "06_4EH"
|
||||||
|
IntelFam6Skylake = "06_5EH"
|
||||||
|
IntelFam6XeonScalable = "06_55H"
|
||||||
|
IntelFam6KabylakeL = "06_8EH"
|
||||||
|
IntelFam6Kabylake = "06_9EH"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getCrystalClockFrequency gets crystal clock frequency
|
||||||
|
// for Intel processors in which CPUID.15H.EBX[31:0] ÷ CPUID.0x15.EAX[31:0] is enumerated
|
||||||
|
// but CPUID.15H.ECX is not enumerated using this function to get nominal core crystal clock frequency.
|
||||||
|
//
|
||||||
|
// Actually these crystal clock frequencies provided by Intel hardcoded tables are not so accurate in some cases,
|
||||||
|
// e.g. SkyLake server CPU may have issue (All SKX subject the crystal to an EMI reduction circuit that
|
||||||
|
//reduces its actual frequency by (approximately) -0.25%):
|
||||||
|
// see https://lore.kernel.org/lkml/ff6dcea166e8ff8f2f6a03c17beab2cb436aa779.1513920414.git.len.brown@intel.com/
|
||||||
|
// for more details.
|
||||||
|
// With this report, I set a coefficient (0.9975) for IntelFam6SkyLakeX.
|
||||||
|
//
|
||||||
|
// Unlike the kernel way (mentioned in https://github.com/torvalds/linux/commit/604dc9170f2435d27da5039a3efd757dceadc684),
|
||||||
|
// I prefer the Intel hardcoded tables, (in <Intel® 64 and IA-32 Architectures Software Developer’s Manual, Volume 3>
|
||||||
|
// 18.7.3 Determining the Processor Base Frequency, Table 18-85. Nominal Core Crystal Clock Frequency)
|
||||||
|
// because after some testing (comparing with wall clock, see https://github.com/templexxx/tsc/tsc_test.go for more details),
|
||||||
|
// I found hardcoded tables are more accurate.
|
||||||
|
func getCrystalClockFrequency(sign string, steppingID uint32) uint32 {
|
||||||
|
|
||||||
|
if maxFunctionID() < 0x16 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sign {
|
||||||
|
case IntelFam6SkylakeL:
|
||||||
|
return 24 * 1000 * 1000
|
||||||
|
case IntelFam6Skylake:
|
||||||
|
return 24 * 1000 * 1000
|
||||||
|
case IntelFam6XeonScalable:
|
||||||
|
// SKL-SP.
|
||||||
|
// see: https://community.intel.com/t5/Software-Tuning-Performance/How-to-detect-microarchitecture-on-Xeon-Scalable/m-p/1205162#M7633.
|
||||||
|
if steppingID == 0x2 || steppingID == 0x3 || steppingID == 0x4 {
|
||||||
|
return 25 * 1000 * 1000 * 0.9975
|
||||||
|
}
|
||||||
|
return 25 * 1000 * 1000 // TODO check other Xeon Scalable has no slow down issue.
|
||||||
|
case IntelFam6KabylakeL:
|
||||||
|
return 24 * 1000 * 1000
|
||||||
|
case IntelFam6Kabylake:
|
||||||
|
return 24 * 1000 * 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVersionInfo() (uint32, uint32, uint32) {
|
||||||
|
if maxFunctionID() < 0x1 {
|
||||||
|
return 0, 0, 0
|
||||||
|
}
|
||||||
|
eax, _, _, _ := cpuid(1, 0)
|
||||||
|
family := (eax >> 8) & 0xf
|
||||||
|
displayFamily := family
|
||||||
|
if family == 0xf {
|
||||||
|
displayFamily = ((eax >> 20) & 0xff) + family
|
||||||
|
}
|
||||||
|
model := (eax >> 4) & 0xf
|
||||||
|
displayModel := model
|
||||||
|
if family == 0x6 || family == 0xf {
|
||||||
|
displayModel = ((eax >> 12) & 0xf0) + model
|
||||||
|
}
|
||||||
|
return displayFamily, displayModel, eax & 0x7
|
||||||
|
}
|
||||||
|
|
||||||
|
// signature format: XX_XXH
|
||||||
|
func makeSignature(family, model uint32) string {
|
||||||
|
signature := strings.ToUpper(fmt.Sprintf("0%x_0%xH", family, model))
|
||||||
|
ss := strings.Split(signature, "_")
|
||||||
|
for i, s := range ss {
|
||||||
|
// Maybe insert too more `0`, drop it.
|
||||||
|
if len(s) > 2 {
|
||||||
|
s = s[1:]
|
||||||
|
ss[i] = s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(ss, "_")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCacheSize is from
|
||||||
|
// https://github.com/klauspost/cpuid/blob/5a626f7029c910cc8329dae5405ee4f65034bce5/cpuid.go#L723
|
||||||
|
func getCacheSize() Cache {
|
||||||
|
c := Cache{
|
||||||
|
L1I: -1,
|
||||||
|
L1D: -1,
|
||||||
|
L2: -1,
|
||||||
|
L3: -1,
|
||||||
|
}
|
||||||
|
|
||||||
|
vendor := vendorID()
|
||||||
|
switch vendor {
|
||||||
|
case Intel:
|
||||||
|
if maxFunctionID() < 4 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
for i := uint32(0); ; i++ {
|
||||||
|
eax, ebx, ecx, _ := cpuid(4, i)
|
||||||
|
cacheType := eax & 15
|
||||||
|
if cacheType == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cacheLevel := (eax >> 5) & 7
|
||||||
|
coherency := int(ebx&0xfff) + 1
|
||||||
|
partitions := int((ebx>>12)&0x3ff) + 1
|
||||||
|
associativity := int((ebx>>22)&0x3ff) + 1
|
||||||
|
sets := int(ecx) + 1
|
||||||
|
size := associativity * partitions * coherency * sets
|
||||||
|
switch cacheLevel {
|
||||||
|
case 1:
|
||||||
|
if cacheType == 1 {
|
||||||
|
// 1 = Data Cache
|
||||||
|
c.L1D = size
|
||||||
|
} else if cacheType == 2 {
|
||||||
|
// 2 = Instruction Cache
|
||||||
|
c.L1I = size
|
||||||
|
} else {
|
||||||
|
if c.L1D < 0 {
|
||||||
|
c.L1I = size
|
||||||
|
}
|
||||||
|
if c.L1I < 0 {
|
||||||
|
c.L1I = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
c.L2 = size
|
||||||
|
case 3:
|
||||||
|
c.L3 = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case AMD, Hygon:
|
||||||
|
// Untested.
|
||||||
|
if maxExtendedFunction() < 0x80000005 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
_, _, ecx, edx := cpuid(0x80000005, 0)
|
||||||
|
c.L1D = int(((ecx >> 24) & 0xFF) * 1024)
|
||||||
|
c.L1I = int(((edx >> 24) & 0xFF) * 1024)
|
||||||
|
|
||||||
|
if maxExtendedFunction() < 0x80000006 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
_, _, ecx, _ = cpuid(0x80000006, 0)
|
||||||
|
c.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxFunctionID() uint32 {
|
||||||
|
a, _, _, _ := cpuid(0, 0)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func maxExtendedFunction() uint32 {
|
||||||
|
eax, _, _, _ := cpuid(0x80000000, 0)
|
||||||
|
return eax
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
Other = iota
|
||||||
|
Intel
|
||||||
|
AMD
|
||||||
|
VIA
|
||||||
|
Transmeta
|
||||||
|
NSC
|
||||||
|
KVM // Kernel-based Virtual Machine
|
||||||
|
MSVM // Microsoft Hyper-V or Windows Virtual PC
|
||||||
|
VMware
|
||||||
|
XenHVM
|
||||||
|
Bhyve
|
||||||
|
Hygon
|
||||||
|
)
|
||||||
|
|
||||||
|
// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
|
||||||
|
var vendorMapping = map[string]int{
|
||||||
|
"AMDisbetter!": AMD,
|
||||||
|
"AuthenticAMD": AMD,
|
||||||
|
"CentaurHauls": VIA,
|
||||||
|
"GenuineIntel": Intel,
|
||||||
|
"TransmetaCPU": Transmeta,
|
||||||
|
"GenuineTMx86": Transmeta,
|
||||||
|
"Geode by NSC": NSC,
|
||||||
|
"VIA VIA VIA ": VIA,
|
||||||
|
"KVMKVMKVMKVM": KVM,
|
||||||
|
"Microsoft Hv": MSVM,
|
||||||
|
"VMwareVMware": VMware,
|
||||||
|
"XenVMMXenVMM": XenHVM,
|
||||||
|
"bhyve bhyve ": Bhyve,
|
||||||
|
"HygonGenuine": Hygon,
|
||||||
|
}
|
||||||
|
|
||||||
|
func vendorID() int {
|
||||||
|
_, b, c, d := cpuid(0, 0)
|
||||||
|
v := valAsString(b, d, c)
|
||||||
|
vend, ok := vendorMapping[string(v)]
|
||||||
|
if !ok {
|
||||||
|
return Other
|
||||||
|
}
|
||||||
|
return vend
|
||||||
|
}
|
||||||
|
|
||||||
|
func valAsString(values ...uint32) []byte {
|
||||||
|
r := make([]byte, 4*len(values))
|
||||||
|
for i, v := range values {
|
||||||
|
dst := r[i*4:]
|
||||||
|
dst[0] = byte(v & 0xff)
|
||||||
|
dst[1] = byte((v >> 8) & 0xff)
|
||||||
|
dst[2] = byte((v >> 16) & 0xff)
|
||||||
|
dst[3] = byte((v >> 24) & 0xff)
|
||||||
|
switch {
|
||||||
|
case dst[0] == 0:
|
||||||
|
return r[:i*4]
|
||||||
|
case dst[1] == 0:
|
||||||
|
return r[:i*4+1]
|
||||||
|
case dst[2] == 0:
|
||||||
|
return r[:i*4+2]
|
||||||
|
case dst[3] == 0:
|
||||||
|
return r[:i*4+3]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
32
vendor/github.com/templexxx/cpu/cpu_x86.s
generated
vendored
Normal file
32
vendor/github.com/templexxx/cpu/cpu_x86.s
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build 386 amd64 amd64p32
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·cpuid(SB), NOSPLIT, $0-24
|
||||||
|
MOVL eaxArg+0(FP), AX
|
||||||
|
MOVL ecxArg+4(FP), CX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func xgetbv() (eax, edx uint32)
|
||||||
|
TEXT ·xgetbv(SB),NOSPLIT,$0-8
|
||||||
|
#ifdef GOOS_nacl
|
||||||
|
// nacl does not support XGETBV.
|
||||||
|
MOVL $0, eax+0(FP)
|
||||||
|
MOVL $0, edx+4(FP)
|
||||||
|
#else
|
||||||
|
MOVL $0, CX
|
||||||
|
XGETBV
|
||||||
|
MOVL AX, eax+0(FP)
|
||||||
|
MOVL DX, edx+4(FP)
|
||||||
|
#endif
|
||||||
|
RET
|
1
vendor/github.com/templexxx/xorsimd/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/templexxx/xorsimd/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
*.s linguist-language=go:x
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user