DERO Stargate improved

This commit is contained in:
Captain 2021-08-08 14:53:55 +00:00
parent 42c1129137
commit 16fad834a1
97 changed files with 4911 additions and 396 deletions

View File

@ -49,7 +49,6 @@ import hashicorp_lru "github.com/hashicorp/golang-lru"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
import "github.com/deroproject/derohe/errormsg" import "github.com/deroproject/derohe/errormsg"
import "github.com/prometheus/client_golang/prometheus"
//import "github.com/deroproject/derosuite/address" //import "github.com/deroproject/derosuite/address"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
@ -58,6 +57,7 @@ import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/blockchain/mempool" import "github.com/deroproject/derohe/blockchain/mempool"
import "github.com/deroproject/derohe/blockchain/regpool" import "github.com/deroproject/derohe/blockchain/regpool"
import "github.com/deroproject/derohe/rpc" import "github.com/deroproject/derohe/rpc"
import "github.com/deroproject/derohe/metrics"
/* /*
import "github.com/deroproject/derosuite/emission" import "github.com/deroproject/derosuite/emission"
@ -67,7 +67,7 @@ import "github.com/deroproject/derosuite/crypto/ringct"
import "github.com/deroproject/derosuite/checkpoints" import "github.com/deroproject/derosuite/checkpoints"
import "github.com/deroproject/derosuite/metrics"
import "github.com/deroproject/derosuite/blockchain/inputmaturity" import "github.com/deroproject/derosuite/blockchain/inputmaturity"
*/ */
@ -276,16 +276,7 @@ func Blockchain_Start(params map[string]interface{}) (*Blockchain, error) {
*/ */
/*
// register the metrics with the metrics registry
metrics.Registry.MustRegister(blockchain_tx_counter)
metrics.Registry.MustRegister(mempool_tx_counter)
metrics.Registry.MustRegister(mempool_tx_count)
metrics.Registry.MustRegister(block_size)
metrics.Registry.MustRegister(transaction_size)
metrics.Registry.MustRegister(block_tx_count)
metrics.Registry.MustRegister(block_processing_time)
*/
atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem atomic.AddUint32(&globals.Subsystem_Active, 1) // increment subsystem
return &chain, nil return &chain, nil
@ -334,20 +325,24 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
}() }()
blockchain_tx_counter.Add(float64(len(cbl.Bl.Tx_hashes))) metrics.Blockchain_tx_counter.Add(len(cbl.Bl.Tx_hashes))
block_tx_count.Observe(float64(len(cbl.Bl.Tx_hashes))) metrics.Block_tx.Update(float64(len(cbl.Bl.Tx_hashes)))
block_processing_time.Observe(float64(time.Now().Sub(processing_start).Round(time.Millisecond) / 1000000)) metrics.Block_processing_time.UpdateDuration(processing_start)
// tracks counters for tx_size
// tracks counters for tx internals, do we need to serialize everytime, just for stats
{ {
complete_block_size := 0 complete_block_size := 0
for i := 0; i < len(cbl.Txs); i++ { for i := 0; i < len(cbl.Txs); i++ {
tx_size := len(cbl.Txs[i].Serialize()) tx_size := len(cbl.Txs[i].Serialize())
complete_block_size += tx_size complete_block_size += tx_size
transaction_size.Observe(float64(tx_size)) metrics.Transaction_size.Update(float64(tx_size))
metrics.Transaction_outputs.Update(float64(len(cbl.Txs[i].Payloads)))
if len(cbl.Txs[i].Payloads) >= 1 {
metrics.Transaction_ring_size.Update(float64(cbl.Txs[i].Payloads[0].Statement.RingSize))
}
} }
block_size.Observe(float64(complete_block_size)) metrics.Block_size.Update(float64(complete_block_size))
} }
}() }()
@ -586,49 +581,73 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
} }
// another check, whether the tx is build with the latest snapshot of balance tree
// another check, whether the tx is build with the correct height reference
{ {
for i := 0; i < len(cbl.Txs); i++ { for i := 0; i < len(cbl.Txs); i++ {
if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX { if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX {
if cbl.Txs[i].Height+1 != cbl.Bl.Height { if cbl.Txs[i].Height % config.BLOCK_BATCH_SIZE != 0 {
block_logger.Warnf("invalid tx mined %s tx height not a multiple of %d", cbl.Txs[i].GetHash(), config.BLOCK_BATCH_SIZE)
return errormsg.ErrTXDoubleSpend, false
}
if cbl.Txs[i].Height >= cbl.Bl.Height { // chain height should be more than tx
block_logger.Warnf("invalid tx mined %s", cbl.Txs[i].GetHash()) block_logger.Warnf("invalid tx mined %s", cbl.Txs[i].GetHash())
return errormsg.ErrTXDoubleSpend, false return errormsg.ErrTXDoubleSpend, false
} }
if !Verify_Transaction_NonCoinbase_Height(cbl.Txs[i],cbl.Bl.Height ){
block_logger.Warnf("invalid tx mined height issue %s %d %d", cbl.Txs[i].GetHash(),cbl.Txs[i].Height,cbl.Bl.Height )
return errormsg.ErrTXDoubleSpend, false
}
} }
} }
} }
// another check, whether the tx contains any duplicate nonces within the block // another check, whether the tx contains any duplicate nonces within the block
// block wide duplicate input detector // block wide duplicate input detector
{ {
nonce_map := map[crypto.Hash]bool{} nonce_map := map[crypto.Hash]bool{}
for i := 0; i < len(cbl.Txs); i++ { for i := 0; i < len(cbl.Txs); i++ {
if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX { if cbl.Txs[i].TransactionType == transaction.NORMAL || cbl.Txs[i].TransactionType == transaction.BURN_TX || cbl.Txs[i].TransactionType == transaction.SC_TX {
if _, ok := nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce()]; ok { if _, ok := nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce1()]; ok {
block_logger.Warnf("Double Spend attack within block %s", cbl.Txs[i].GetHash()) block_logger.Warnf("Double Spend attack within block nonce1 %s", cbl.Txs[i].GetHash())
return errormsg.ErrTXDoubleSpend, false return errormsg.ErrTXDoubleSpend, false
} }
nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce()] = true nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce1()] = true
if _, ok := nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce2()]; ok {
block_logger.Warnf("Double Spend attack within block nonce2 %s", cbl.Txs[i].GetHash())
return errormsg.ErrTXDoubleSpend, false
}
nonce_map[cbl.Txs[i].Payloads[0].Proof.Nonce2()] = true
} }
} }
} }
// we also need to reject if the the immediately reachable history, has spent the nonce // we also need to reject if the the immediately reachable history, has spent the nonce
// both the checks works on the basis of nonces and not on the basis of txhash // both the checks works on the basis of nonces and not on the basis of txhash
/*
{ {
reachable_nonces := chain.BuildReachabilityNonces(bl) reachable_nonces,err := chain.BuildNonces(bl.Tips)
if err != nil{
return err,false
}
for i := 0; i < len(cbl.Txs); i++ { // loop through all the TXs for i := 0; i < len(cbl.Txs); i++ { // loop through all the TXs
if cbl.Txs[i].TransactionType == transaction.NORMAL { switch cbl.Txs[i].TransactionType {
if _, ok := reachable_nonces[cbl.Txs[i].Proof.Nonce()]; ok { case transaction.PREMINE,transaction.REGISTRATION, transaction.COINBASE: // these donot have nonces
case transaction.NORMAL, transaction.BURN_TX, transaction.SC_TX: // these have nonces
if reachable_nonces[cbl.Txs[i].Payloads[0].Proof.Nonce1()] || reachable_nonces[cbl.Txs[i].Payloads[0].Proof.Nonce1()] {
block_logger.Warnf("Double spend attack tx %s is already mined, rejecting ", cbl.Txs[i].GetHash()) block_logger.Warnf("Double spend attack tx %s is already mined, rejecting ", cbl.Txs[i].GetHash())
return errormsg.ErrTXDoubleSpend, false return errormsg.ErrTXDoubleSpend, false
} }
default:
} }
} }
}*/ }
// we need to anyways verify the TXS since proofs are not covered by checksum // we need to anyways verify the TXS since proofs are not covered by checksum
{ {
@ -729,11 +748,53 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
base_topo_index = 0 base_topo_index = 0
} }
nonce_map := map[crypto.Hash]bool{}
// any blocks which have not changed their topo will be skipped using graviton trick // any blocks which have not changed their topo will be skipped using graviton trick
skip := true skip := true
for i := int64(0); i < int64(len(full_order)); i++ { for i := int64(0); i < int64(len(full_order)); i++ {
if i == 0 {
bl_prev, err1 := chain.Load_BL_FROM_ID(full_order[0])
if err1 != nil {
block_logger.Debugf("Cannot load block %s for client protocol,probably DB/disk corruption", full_order[0])
return errormsg.ErrInvalidBlock, false
}
nonce_map,err = chain.BuildNonces(bl_prev.Tips)
if err != nil {
panic(err)
}
}
if i >= 1 { // load nonces for previous blocks
bl_prev_hash := full_order[i-1]
bl_prev, err1 := chain.Load_BL_FROM_ID(full_order[i-1])
if err1 != nil {
block_logger.Debugf("Cannot load block %s for client protocol,probably DB/disk corruption", bl_prev_hash)
return errormsg.ErrInvalidBlock, false
}
for i := 0; i < len(bl_prev.Tx_hashes); i++ { // load all tx one by one, no skipping
tx_bytes, err := chain.Store.Block_tx_store.ReadTX(bl_prev.Tx_hashes[i])
if err != nil {
panic(fmt.Errorf("Cannot load tx for %s err %s", bl_prev.Tx_hashes[i], err))
}
var tx transaction.Transaction
if err = tx.DeserializeHeader(tx_bytes); err != nil {
panic(err)
}
if !tx.IsRegistration(){
// tx has been loaded, now lets get the nonce
nonce_map[tx.Payloads[0].Proof.Nonce1()] = true // add element to map for next check
nonce_map[tx.Payloads[0].Proof.Nonce2()] = true // add element to map for next check
}
}
}
// check whether the new block is at the same position at the last position // check whether the new block is at the same position at the last position
current_topo_block := i + base_topo_index current_topo_block := i + base_topo_index
previous_topo_block := current_topo_block - 1 previous_topo_block := current_topo_block - 1
@ -761,7 +822,7 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
bl_current_hash := full_order[i] bl_current_hash := full_order[i]
bl_current, err1 := chain.Load_BL_FROM_ID(bl_current_hash) bl_current, err1 := chain.Load_BL_FROM_ID(bl_current_hash)
if err1 != nil { if err1 != nil {
block_logger.Debugf("Cannot load block %s for client protocol,probably DB corruption", bl_current_hash) block_logger.Debugf("Cannot load block %s for client protocol,probably DB/disk corruption", bl_current_hash)
return errormsg.ErrInvalidBlock, false return errormsg.ErrInvalidBlock, false
} }
@ -770,9 +831,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
height_current := chain.Calculate_Height_At_Tips(bl_current.Tips) height_current := chain.Calculate_Height_At_Tips(bl_current.Tips)
hard_fork_version_current := chain.Get_Current_Version_at_Height(height_current) hard_fork_version_current := chain.Get_Current_Version_at_Height(height_current)
// this version does not require client protocol as of now
// run full client protocol and find valid transactions
// rlog.Debugf("running client protocol for %s minertx %s topo %d", bl_current_hash, bl_current.Miner_TX.GetHash(), highest_topo)
// generate miner TX rewards as per client protocol // generate miner TX rewards as per client protocol
if hard_fork_version_current == 1 { if hard_fork_version_current == 1 {
@ -780,7 +838,6 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
var balance_tree, sc_meta *graviton.Tree var balance_tree, sc_meta *graviton.Tree
_ = sc_meta
var ss *graviton.Snapshot var ss *graviton.Snapshot
if bl_current.Height == 0 { // if it's genesis block if bl_current.Height == 0 { // if it's genesis block
@ -818,14 +875,14 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
fees_collected := uint64(0) fees_collected := uint64(0)
// side blocks only represent chain strenth , else they are are ignored // side blocks not only represent chain strenth , they also represent extra data
// this means they donot get any reward , 0 reward // this means they donot get any reward , 0 reward
// their transactions are ignored // their transactions are ignored
//chain.Store.Topo_store.Write(i+base_topo_index, full_order[i],0, int64(bl_current.Height)) // write entry so as sideblock could work //chain.Store.Topo_store.Write(i+base_topo_index, full_order[i],0, int64(bl_current.Height)) // write entry so as sideblock could work
var data_trees []*graviton.Tree var data_trees []*graviton.Tree
if !chain.isblock_SideBlock_internal(full_order[i], current_topo_block, int64(bl_current.Height)) { {
sc_change_cache := map[crypto.Hash]*graviton.Tree{} // cache entire changes for entire block sc_change_cache := map[crypto.Hash]*graviton.Tree{} // cache entire changes for entire block
@ -837,12 +894,23 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
if err = tx.DeserializeHeader(tx_bytes); err != nil { if err = tx.DeserializeHeader(tx_bytes); err != nil {
panic(err) panic(err)
} }
if tx.TransactionType != transaction.REGISTRATION {
if nonce_map[tx.Payloads[0].Proof.Nonce1()] || nonce_map[tx.Payloads[0].Proof.Nonce1()] {
continue // skip this tx, since it has been processed earlier
}else{
nonce_map[tx.Payloads[0].Proof.Nonce1()] = true
nonce_map[tx.Payloads[0].Proof.Nonce2()] = true
}
for t := range tx.Payloads { for t := range tx.Payloads {
if !tx.Payloads[t].SCID.IsZero() { if !tx.Payloads[t].SCID.IsZero() {
tree, _ := ss.GetTree(string(tx.Payloads[t].SCID[:])) tree, _ := ss.GetTree(string(tx.Payloads[t].SCID[:]))
sc_change_cache[tx.Payloads[t].SCID] = tree sc_change_cache[tx.Payloads[t].SCID] = tree
} }
} }
}
// we have loaded a tx successfully, now lets execute it // we have loaded a tx successfully, now lets execute it
tx_fees := chain.process_transaction(sc_change_cache, tx, balance_tree) tx_fees := chain.process_transaction(sc_change_cache, tx, balance_tree)
@ -886,11 +954,17 @@ func (chain *Blockchain) Add_Complete_Block(cbl *block.Complete_Block) (err erro
} }
chain.process_miner_transaction(bl_current.Miner_TX, bl_current.Height == 0, balance_tree, fees_collected, bl_current.Height)
} else {
rlog.Debugf("this block is a side block block height %d blid %s ", chain.Load_Block_Height(full_order[i]), full_order[i])
} // if block is side block, pass all reward to devs, else pass it to original miner
side_block := chain.isblock_SideBlock_internal(full_order[i], current_topo_block, int64(bl_current.Height))
if side_block{
rlog.Debugf("this block is a side block block height %d blid %s ", chain.Load_Block_Height(full_order[i]), full_order[i])
}
chain.process_miner_transaction(bl_current.Miner_TX, bl_current.Height == 0, balance_tree, fees_collected, bl_current.Height,side_block)
}
// we are here, means everything is okay, lets commit the update balance tree // we are here, means everything is okay, lets commit the update balance tree
@ -1135,53 +1209,6 @@ func (chain *Blockchain) BlockCheckSum(cbl *block.Complete_Block) []byte {
return h.Sum(nil) return h.Sum(nil)
} }
// various counters/gauges which track a numer of metrics
// such as number of txs, number of inputs, number of outputs
// mempool total addition, current mempool size
// block processing time etcs
// Try it once more, this time with a help string.
var blockchain_tx_counter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "blockchain_tx_counter",
Help: "Number of tx mined",
})
var mempool_tx_counter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "mempool_tx_counter",
Help: "Total number of tx added in mempool",
})
var mempool_tx_count = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "mempool_tx_count",
Help: "Number of tx in mempool at this point",
})
// track block size about 2 MB
var block_size = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "block_size_byte",
Help: "Block size in byte (complete)",
Buckets: prometheus.LinearBuckets(0, 102400, 10), // start block size 0, each 1 KB step, 2048 such buckets .
})
// track transaction size upto 500 KB
var transaction_size = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tx_size_byte",
Help: "TX size in byte",
Buckets: prometheus.LinearBuckets(0, 10240, 16), // start 0 byte, each 1024 byte, 512 such buckets.
})
// number of tx per block
var block_tx_count = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "block_tx_count",
Help: "Number of TX in the block",
Buckets: prometheus.LinearBuckets(0, 20, 25), // start 0 byte, each 1024 byte, 1024 such buckets.
})
//
var block_processing_time = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "block_processing_time_ms",
Help: "Block processing time milliseconds",
Buckets: prometheus.LinearBuckets(0, 100, 20), // start 0 ms, each 100 ms, 200 such buckets.
})
// this is the only entrypoint for new txs in the chain // this is the only entrypoint for new txs in the chain
// add a transaction to MEMPOOL, // add a transaction to MEMPOOL,
@ -1222,7 +1249,7 @@ func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) error {
} }
// track counter for the amount of mempool tx // track counter for the amount of mempool tx
defer mempool_tx_count.Set(float64(len(chain.Mempool.Mempool_List_TX()))) defer metrics.Mempool_tx_count.Set(uint64(len(chain.Mempool.Mempool_List_TX())))
txhash := tx.GetHash() txhash := tx.GetHash()
@ -1233,9 +1260,9 @@ func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) error {
} }
chain_height := uint64(chain.Get_Height()) chain_height := uint64(chain.Get_Height())
if chain_height > tx.Height { if !Verify_Transaction_NonCoinbase_Height(tx,chain_height) {
rlog.Tracef(2, "TX %s rejected since chain has already progressed", txhash) rlog.Tracef(2, "TX %s rejected since tx is too recent or too old", txhash)
return fmt.Errorf("TX %s rejected since chain has already progressed", txhash) return fmt.Errorf("TX %s rejected since tx is too recent or too old", txhash)
} }
// quick check without calculating everything whether tx is in pool, if yes we do nothing // quick check without calculating everything whether tx is in pool, if yes we do nothing
@ -1285,11 +1312,11 @@ func (chain *Blockchain) Add_TX_To_Pool(tx *transaction.Transaction) error {
if chain.Mempool.Mempool_Add_TX(tx, 0) { // new tx come with 0 marker if chain.Mempool.Mempool_Add_TX(tx, 0) { // new tx come with 0 marker
rlog.Tracef(2, "Successfully added tx %s to pool", txhash) rlog.Tracef(2, "Successfully added tx %s to pool", txhash)
mempool_tx_counter.Inc() metrics.Mempool_tx_counter.Inc()
return nil return nil
} else { } else {
rlog.Tracef(2, "TX %s rejected by pool", txhash) rlog.Tracef(2, "TX %s rejected by pool by mempool", txhash)
return fmt.Errorf("TX %s rejected by pool", txhash) return fmt.Errorf("TX %s rejected by pool by mempool", txhash)
} }
} }
@ -1394,14 +1421,18 @@ func (chain *Blockchain) isblock_SideBlock_internal(blid crypto.Hash, block_topo
return false return false
} }
// this will return the tx combination as valid/invalid
// this will return the block ids in which tx has been mined
// this is not used as core consensus but reports only to user that his tx though in the blockchain is invalid // this is not used as core consensus but reports only to user that his tx though in the blockchain is invalid
// a tx is valid, if it exist in a block which is not a side block // a tx is valid, if it exist in a block which is not a side block
func (chain *Blockchain) IS_TX_Valid(txhash crypto.Hash) (valid_blid crypto.Hash, invalid_blid []crypto.Hash, valid bool) { // this is not part of consensus but only for support of explorer
func (chain *Blockchain) IS_TX_Mined(txhash crypto.Hash) (mined_blocks []crypto.Hash, state_block crypto.Hash, state_block_topo int64) {
var tx_bytes []byte var tx_bytes []byte
var err error var err error
state_found := false
if tx_bytes, err = chain.Store.Block_tx_store.ReadTX(txhash); err != nil { if tx_bytes, err = chain.Store.Block_tx_store.ReadTX(txhash); err != nil {
return return
} }
@ -1411,11 +1442,24 @@ func (chain *Blockchain) IS_TX_Valid(txhash crypto.Hash) (valid_blid crypto.Hash
return return
} }
blids, _ := chain.Store.Topo_store.binarySearchHeight(int64(tx.Height + 1)) for i := int64(0); i < 2 *config.BLOCK_BATCH_SIZE;i++ {
blids, topos := chain.Store.Topo_store.binarySearchHeight(int64(tx.Height) + i )
var exist_list []crypto.Hash for j, blid := range blids {
if !state_found{ // keep finding state
merkle_hash, err := chain.Load_Merkle_Hash(topos[j])
if err != nil {
panic(err)
}
if len(tx.Payloads) >= 1 && merkle_hash == tx.Payloads[0].Statement.Roothash {
state_block = blid
state_block_topo = topos[j]
state_found = true
}
}
for _, blid := range blids {
bl, err := chain.Load_BL_FROM_ID(blid) bl, err := chain.Load_BL_FROM_ID(blid)
if err != nil { if err != nil {
return return
@ -1423,123 +1467,19 @@ func (chain *Blockchain) IS_TX_Valid(txhash crypto.Hash) (valid_blid crypto.Hash
for _, bltxhash := range bl.Tx_hashes { for _, bltxhash := range bl.Tx_hashes {
if bltxhash == txhash { if bltxhash == txhash {
exist_list = append(exist_list, blid) mined_blocks = append(mined_blocks, blid)
break break
} }
} }
} }
for _, blid := range exist_list {
if chain.Isblock_SideBlock(blid) {
invalid_blid = append(invalid_blid, blid)
} else {
valid_blid = blid
valid = true
}
} }
return return
} }
/*
// runs the client protocol which includes the following operations
// if any TX are being duplicate or double-spend ignore them
// mark all the valid transactions as valid
// mark all invalid transactions as invalid
// calculate total fees based on valid TX
// we need NOT check ranges/ring signatures here, as they have been done already by earlier steps
func (chain *Blockchain) client_protocol(dbtx storage.DBTX, bl *block.Block, blid crypto.Hash, height int64, topoheight int64) (total_fees uint64) {
// run client protocol for all TXs
for i := range bl.Tx_hashes {
tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
if err != nil {
panic(fmt.Errorf("Cannot load tx for %x err %s ", bl.Tx_hashes[i], err))
}
// mark TX found in this block also for explorer
chain.store_TX_in_Block(dbtx, blid, bl.Tx_hashes[i])
// check all key images as double spend, if double-spend detected mark invalid, else consider valid
if chain.Verify_Transaction_NonCoinbase_DoubleSpend_Check(dbtx, tx) {
chain.consume_keyimages(dbtx, tx, height) // mark key images as consumed
total_fees += tx.RctSignature.Get_TX_Fee()
chain.Store_TX_Height(dbtx, bl.Tx_hashes[i], topoheight) // link the tx with the topo height
//mark tx found in this block is valid
chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], true)
} else { // TX is double spend or reincluded by 2 blocks simultaneously
rlog.Tracef(1,"Double spend TX is being ignored %s %s", blid, bl.Tx_hashes[i])
chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], false)
}
}
return total_fees
}
// this undoes everything that is done by client protocol
// NOTE: this will have any effect, only if client protocol has been run on this block earlier
func (chain *Blockchain) client_protocol_reverse(dbtx storage.DBTX, bl *block.Block, blid crypto.Hash) {
// run client protocol for all TXs
for i := range bl.Tx_hashes {
tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
if err != nil {
panic(fmt.Errorf("Cannot load tx for %x err %s ", bl.Tx_hashes[i], err))
}
// only the valid TX must be revoked
if chain.IS_TX_Valid(dbtx, blid, bl.Tx_hashes[i]) {
chain.revoke_keyimages(dbtx, tx) // mark key images as not used
chain.Store_TX_Height(dbtx, bl.Tx_hashes[i], -1) // unlink the tx with the topo height
//mark tx found in this block is invalid
chain.mark_TX(dbtx, blid, bl.Tx_hashes[i], false)
} else { // TX is double spend or reincluded by 2 blocks simultaneously
// invalid tx is related
}
}
return
}
// scavanger for transactions from rusty/stale tips to reinsert them into pool
func (chain *Blockchain) transaction_scavenger(dbtx storage.DBTX, blid crypto.Hash) {
defer func() {
if r := recover(); r != nil {
logger.Warnf("Recovered while transaction scavenging, Stack trace below ")
logger.Warnf("Stack trace \n%s", debug.Stack())
}
}()
logger.Debugf("scavenging transactions from blid %s", blid)
reachable_blocks := chain.BuildReachableBlocks(dbtx, []crypto.Hash{blid})
reachable_blocks[blid] = true // add self
for k, _ := range reachable_blocks {
if chain.Is_Block_Orphan(k) {
bl, err := chain.Load_BL_FROM_ID(dbtx, k)
if err == nil {
for i := range bl.Tx_hashes {
tx, err := chain.Load_TX_FROM_ID(dbtx, bl.Tx_hashes[i])
if err != nil {
rlog.Warnf("err while scavenging blid %s txid %s err %s", k, bl.Tx_hashes[i], err)
} else {
// add tx to pool, it will do whatever is necessarry
chain.Add_TX_To_Pool(tx)
}
}
} else {
rlog.Warnf("err while scavenging blid %s err %s", k, err)
}
}
}
}
*/
// Finds whether a block is orphan // Finds whether a block is orphan
// since we donot store any fields, we need to calculate/find the block as orphan // since we donot store any fields, we need to calculate/find the block as orphan
// using an algorithm // using an algorithm
@ -1548,14 +1488,6 @@ func (chain *Blockchain) Is_Block_Orphan(hash crypto.Hash) bool {
return !chain.Is_Block_Topological_order(hash) return !chain.Is_Block_Topological_order(hash)
} }
// this is used to find if a tx is orphan, YES orphan TX
// these can occur during when they lie only in a side block
// so the TX becomes orphan ( chances are less may be less that .000001 % but they are there)
// if a tx is not valid in any of the blocks, it has been mined it is orphan
func (chain *Blockchain) Is_TX_Orphan(hash crypto.Hash) (result bool) {
_, _, result = chain.IS_TX_Valid(hash)
return !result
}
// verifies whether we are lagging // verifies whether we are lagging
// return true if we need resync // return true if we need resync
@ -1628,6 +1560,74 @@ func (chain *Blockchain) Rewind_Chain(rewind_count int) (result bool) {
return true return true
} }
// used to check for nonce duplication, so as txs can avoid reexecution
// note that the tips might not be part of chain
func (chain *Blockchain) BuildNonces(tips []crypto.Hash) (map[crypto.Hash]bool, error) {
nonce_list := map[crypto.Hash]bool{} // contains a list of all reachable blocks
var blocks_map = map[crypto.Hash]bool{}
var blocks_pending []crypto.Hash
for i := range tips {
blocks_pending = append(blocks_pending,tips[i])
}
height := chain.Calculate_Height_At_Tips(tips) // we are 1 higher than previous highest tip
for ; len(blocks_pending) > 0; {
blid := blocks_pending[len(blocks_pending)-1]
blocks_pending = blocks_pending[:len(blocks_pending)-1]
if _, processed := blocks_map[blid]; processed {
continue
}
blocks_map[blid] = true
bl, err := chain.Load_BL_FROM_ID(blid)
if err != nil {
return nil,err
}
if bl.Height == 0 { // if we reach genesis, we skip
continue
}
if int64(bl.Height + (config.BLOCK_BATCH_SIZE*4)) < height { // if we are too deep, skip past
continue
}
blocks_pending = append(blocks_pending,bl.Tips...)
for i := 0; i < len(bl.Tx_hashes); i++ { // load all tx one by one, no skipping
tx_bytes, err := chain.Store.Block_tx_store.ReadTX(bl.Tx_hashes[i])
if err != nil {
panic(fmt.Errorf("Cannot load tx for %s err %s", bl.Tx_hashes[i], err))
}
var tx transaction.Transaction
if err = tx.DeserializeHeader(tx_bytes); err != nil {
panic(err)
}
if !tx.IsRegistration(){
// tx has been loaded, now lets get the nonce
nonce_list[tx.Payloads[0].Proof.Nonce1()] = true // add element to map for next check
nonce_list[tx.Payloads[0].Proof.Nonce2()] = true // add element to map for next check
}
}
}
return nonce_list,nil
}
// build reachability graph upto 2*config deeps to answer reachability queries // build reachability graph upto 2*config deeps to answer reachability queries
func (chain *Blockchain) buildReachability_internal(reachmap map[crypto.Hash]bool, blid crypto.Hash, level int) { func (chain *Blockchain) buildReachability_internal(reachmap map[crypto.Hash]bool, blid crypto.Hash, level int) {
bl, err := chain.Load_BL_FROM_ID(blid) bl, err := chain.Load_BL_FROM_ID(blid)
@ -1731,7 +1731,8 @@ func (chain *Blockchain) BuildReachabilityNonces(bl *block.Block) map[crypto.Has
} }
// tx has been loaded, now lets get the nonce // tx has been loaded, now lets get the nonce
nonce_reach_map[tx.Payloads[0].Proof.Nonce()] = true // add element to map for next check nonce_reach_map[tx.Payloads[0].Proof.Nonce1()] = true // add element to map for next check
nonce_reach_map[tx.Payloads[0].Proof.Nonce2()] = true // add element to map for next check
} }
} }
return nonce_reach_map return nonce_reach_map

View File

@ -31,6 +31,7 @@ import log "github.com/sirupsen/logrus"
import "github.com/deroproject/derohe/transaction" import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/globals" import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
// this is only used for sorting and nothing else // this is only used for sorting and nothing else
@ -202,7 +203,7 @@ func (pool *Mempool) HouseKeeping(height uint64) {
pool.txs.Range(func(k, value interface{}) bool { pool.txs.Range(func(k, value interface{}) bool {
txhash := k.(crypto.Hash) txhash := k.(crypto.Hash)
v := value.(*mempool_object) v := value.(*mempool_object)
if height >= (v.Tx.Height + 1) { // if we have moved 1 heights, chances are reorg are almost nil if height >= (v.Tx.Height + 3* config.BLOCK_BATCH_SIZE + 1) { // if we have moved 1 heights, chances are reorg are almost nil
delete_list = append(delete_list, txhash) delete_list = append(delete_list, txhash)
} }
return true return true
@ -283,7 +284,12 @@ func (pool *Mempool) Mempool_Add_TX(tx *transaction.Transaction, Height uint64)
var object mempool_object var object mempool_object
tx_hash := crypto.Hash(tx.GetHash()) tx_hash := crypto.Hash(tx.GetHash())
if pool.Mempool_Keyimage_Spent(tx.Payloads[0].Proof.Nonce()) { if pool.Mempool_Keyimage_Spent(tx.Payloads[0].Proof.Nonce1()) {
rlog.Debugf("Rejecting TX, since nonce already seen %x", tx_hash)
return false
}
if pool.Mempool_Keyimage_Spent(tx.Payloads[0].Proof.Nonce2()) {
rlog.Debugf("Rejecting TX, since nonce already seen %x", tx_hash) rlog.Debugf("Rejecting TX, since nonce already seen %x", tx_hash)
return false return false
} }
@ -300,7 +306,8 @@ func (pool *Mempool) Mempool_Add_TX(tx *transaction.Transaction, Height uint64)
// pool.key_images.Store(tx.Vin[i].(transaction.Txin_to_key).K_image,true) // add element to map for next check // pool.key_images.Store(tx.Vin[i].(transaction.Txin_to_key).K_image,true) // add element to map for next check
// } // }
pool.key_images.Store(tx.Payloads[0].Proof.Nonce(), true) pool.key_images.Store(tx.Payloads[0].Proof.Nonce1(), true)
pool.key_images.Store(tx.Payloads[0].Proof.Nonce2(), true)
// we are here means we can add it to pool // we are here means we can add it to pool
object.Tx = tx object.Tx = tx
@ -367,7 +374,8 @@ func (pool *Mempool) Mempool_Delete_TX(txid crypto.Hash) (tx *transaction.Transa
// for i := 0; i < len(object.Tx.Vin); i++ { // for i := 0; i < len(object.Tx.Vin); i++ {
// pool.key_images.Delete(object.Tx.Vin[i].(transaction.Txin_to_key).K_image) // pool.key_images.Delete(object.Tx.Vin[i].(transaction.Txin_to_key).K_image)
// } // }
pool.key_images.Delete(tx.Payloads[0].Proof.Nonce()) pool.key_images.Delete(tx.Payloads[0].Proof.Nonce1())
pool.key_images.Delete(tx.Payloads[0].Proof.Nonce2())
//pool.sort_list() // sort and update pool list //pool.sort_list() // sort and update pool list
pool.modified = true // pool has been modified pool.modified = true // pool has been modified

View File

@ -74,7 +74,6 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
if err != nil { if err != nil {
panic(err) panic(err)
} }
} }
topoheight := chain.Load_TOPO_HEIGHT() topoheight := chain.Load_TOPO_HEIGHT()
@ -116,6 +115,15 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
sizeoftxs := uint64(0) // size of all non coinbase tx included within this block sizeoftxs := uint64(0) // size of all non coinbase tx included within this block
//fees_collected := uint64(0) //fees_collected := uint64(0)
nonce_map,err := chain.BuildNonces(bl.Tips)
if err != nil {
panic(err)
}
local_nonce_map := map[crypto.Hash]bool{}
_ = sizeoftxs _ = sizeoftxs
// add upto 100 registration tx each registration tx is 99 bytes, so 100 tx will take 9900 bytes or 10KB // add upto 100 registration tx each registration tx is 99 bytes, so 100 tx will take 9900 bytes or 10KB
@ -129,9 +137,8 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
_, err = balance_tree.Get(tx.MinerAddress[:]) _, err = balance_tree.Get(tx.MinerAddress[:])
if err != nil { if err != nil {
if xerrors.Is(err, graviton.ErrNotFound) { // address needs registration if xerrors.Is(err, graviton.ErrNotFound) { // address needs registration
cbl.Txs = append(cbl.Txs, tx) cbl.Txs = append(cbl.Txs, tx)
tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i]) tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i])
} else { } else {
panic(err) panic(err)
} }
@ -159,7 +166,7 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
} }
tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash) tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash)
if tx != nil && int64(tx.Height)+1 == height { if tx != nil && Verify_Transaction_NonCoinbase_Height(tx,uint64(height)) {
/* /*
// skip and delete any mempool tx // skip and delete any mempool tx
@ -180,10 +187,22 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
continue continue
} }
*/ */
rlog.Tracef(1, "Adding Top Sorted tx %s to Complete_Block current size %.2f KB max possible %.2f KB\n", tx_hash_list_sorted[i].Hash, float32(sizeoftxs+tx_hash_list_sorted[i].Size)/1024.0, float32(config.STARGATE_HE_MAX_BLOCK_SIZE)/1024.0)
sizeoftxs += tx_hash_list_sorted[i].Size
cbl.Txs = append(cbl.Txs, tx) if nonce_map[tx.Payloads[0].Proof.Nonce1()] || nonce_map[tx.Payloads[0].Proof.Nonce1()] ||
tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i].Hash) local_nonce_map[tx.Payloads[0].Proof.Nonce1()] || local_nonce_map[tx.Payloads[0].Proof.Nonce1()] {
continue // skip this tx
}
cbl.Txs = append(cbl.Txs, tx)
tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i].Hash)
local_nonce_map[tx.Payloads[0].Proof.Nonce1()] = true
local_nonce_map[tx.Payloads[0].Proof.Nonce2()] = true
rlog.Tracef(1, "Adding Top Sorted tx %s to Complete_Block current size %.2f KB max possible %.2f KB\n", tx_hash_list_sorted[i].Hash, float32(sizeoftxs+tx_hash_list_sorted[i].Size)/1024.0, float32(config.STARGATE_HE_MAX_BLOCK_SIZE)/1024.0)
sizeoftxs += tx_hash_list_sorted[i].Size
} }
} }
// any left over transactions, should be randomly selected // any left over transactions, should be randomly selected
@ -204,12 +223,21 @@ func (chain *Blockchain) Create_new_miner_block(miner_address rpc.Address, tx *t
} }
tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash) tx := chain.Mempool.Mempool_Get_TX(tx_hash_list_sorted[i].Hash)
if tx != nil && int64(tx.Height)+1 == height { if tx != nil && Verify_Transaction_NonCoinbase_Height(tx, uint64(height)){
rlog.Tracef(1, "Adding Random tx %s to Complete_Block current size %.2f KB max possible %.2f KB\n", tx_hash_list_sorted[i].Hash, float32(sizeoftxs+tx_hash_list_sorted[i].Size)/1024.0, float32(config.STARGATE_HE_MAX_BLOCK_SIZE)/1024.0)
sizeoftxs += tx_hash_list_sorted[i].Size if nonce_map[tx.Payloads[0].Proof.Nonce1()] || nonce_map[tx.Payloads[0].Proof.Nonce1()] ||
cbl.Txs = append(cbl.Txs, tx) local_nonce_map[tx.Payloads[0].Proof.Nonce1()] || local_nonce_map[tx.Payloads[0].Proof.Nonce1()] {
tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i].Hash) continue // skip this tx
}
cbl.Txs = append(cbl.Txs, tx)
tx_hash_list_included = append(tx_hash_list_included, tx_hash_list_sorted[i].Hash)
local_nonce_map[tx.Payloads[0].Proof.Nonce1()] = true
local_nonce_map[tx.Payloads[0].Proof.Nonce2()] = true
rlog.Tracef(1, "Adding Random tx %s to Complete_Block current size %.2f KB max possible %.2f KB\n", tx_hash_list_sorted[i].Hash, float32(sizeoftxs+tx_hash_list_sorted[i].Size)/1024.0, float32(config.STARGATE_HE_MAX_BLOCK_SIZE)/1024.0)
sizeoftxs += tx_hash_list_sorted[i].Size
} }
} }

View File

@ -54,7 +54,7 @@ func CalcBlockReward(height uint64) uint64 {
} }
// process the miner tx, giving fees, miner rewatd etc // process the miner tx, giving fees, miner rewatd etc
func (chain *Blockchain) process_miner_transaction(tx transaction.Transaction, genesis bool, balance_tree *graviton.Tree, fees uint64, height uint64) { func (chain *Blockchain) process_miner_transaction(tx transaction.Transaction, genesis bool, balance_tree *graviton.Tree, fees uint64, height uint64,sideblock bool) {
var acckey crypto.Point var acckey crypto.Point
if err := acckey.DecodeCompressed(tx.MinerAddress[:]); err != nil { if err := acckey.DecodeCompressed(tx.MinerAddress[:]); err != nil {
panic(err) panic(err)
@ -69,29 +69,26 @@ func (chain *Blockchain) process_miner_transaction(tx transaction.Transaction, g
// general coin base transaction // general coin base transaction
base_reward := CalcBlockReward(uint64(height)) base_reward := CalcBlockReward(uint64(height))
full_reward := base_reward + fees full_reward := base_reward+ fees
dev_reward := (full_reward * config.DEVSHARE) / 10000 // take % from reward
miner_reward := full_reward - dev_reward // it's value, do subtraction
{ // giver miner reward if sideblock {// give devs reward
balance_serialized, err := balance_tree.Get(tx.MinerAddress[:])
if err != nil {
panic(err)
}
balance := new(crypto.ElGamal).Deserialize(balance_serialized)
balance = balance.Plus(new(big.Int).SetUint64(miner_reward)) // add miners reward to miners balance homomorphically
balance_tree.Put(tx.MinerAddress[:], balance.Serialize()) // reserialize and store
}
{ // give devs reward
balance_serialized, err := balance_tree.Get(chain.Dev_Address_Bytes[:]) balance_serialized, err := balance_tree.Get(chain.Dev_Address_Bytes[:])
if err != nil { if err != nil {
panic(err) panic(err)
} }
balance := new(crypto.ElGamal).Deserialize(balance_serialized) balance := new(crypto.ElGamal).Deserialize(balance_serialized)
balance = balance.Plus(new(big.Int).SetUint64(dev_reward)) // add devs reward to devs balance homomorphically balance = balance.Plus(new(big.Int).SetUint64(full_reward)) // add devs reward to devs balance homomorphically
balance_tree.Put(chain.Dev_Address_Bytes[:], balance.Serialize()) // reserialize and store balance_tree.Put(chain.Dev_Address_Bytes[:], balance.Serialize()) // reserialize and store
}else{ // giver miner reward
balance_serialized, err := balance_tree.Get(tx.MinerAddress[:])
if err != nil {
panic(err)
}
balance := new(crypto.ElGamal).Deserialize(balance_serialized)
balance = balance.Plus(new(big.Int).SetUint64(full_reward)) // add miners reward to miners balance homomorphically
balance_tree.Put(tx.MinerAddress[:], balance.Serialize()) // reserialize and store
} }
return return
@ -106,6 +103,9 @@ func (chain *Blockchain) process_transaction(changed map[crypto.Hash]*graviton.T
switch tx.TransactionType { switch tx.TransactionType {
case transaction.REGISTRATION: case transaction.REGISTRATION:
if _, err := balance_tree.Get(tx.MinerAddress[:]); err == nil {
return 0
}
if _, err := balance_tree.Get(tx.MinerAddress[:]); err != nil { if _, err := balance_tree.Get(tx.MinerAddress[:]); err != nil {
if !xerrors.Is(err, graviton.ErrNotFound) { // any other err except not found panic if !xerrors.Is(err, graviton.ErrNotFound) { // any other err except not found panic
panic(err) panic(err)

View File

@ -113,6 +113,34 @@ func (chain *Blockchain) Verify_Transaction_Coinbase(cbl *block.Complete_Block,
return nil // success comes last return nil // success comes last
} }
// only verifies height whether all height checks are good
func Verify_Transaction_NonCoinbase_Height(tx *transaction.Transaction, chain_height uint64) bool {
return Verify_Transaction_Height(tx.Height, chain_height)
}
func Verify_Transaction_Height(tx_height, chain_height uint64) bool{
if tx_height % config.BLOCK_BATCH_SIZE != 0 {
return false
}
if tx_height >= chain_height {
return false
}
if chain_height-tx_height <= 5 { // we should be atleast 5 steps from top
return false
}
comp := (chain_height / config.BLOCK_BATCH_SIZE) - (tx_height / config.BLOCK_BATCH_SIZE)
if comp ==0 || comp ==1 {
return true
}else{
return false
}
}
// all non miner tx must be non-coinbase tx // all non miner tx must be non-coinbase tx
// each check is placed in a separate block of code, to avoid ambigous code or faulty checks // each check is placed in a separate block of code, to avoid ambigous code or faulty checks
// all check are placed and not within individual functions ( so as we cannot skip a check ) // all check are placed and not within individual functions ( so as we cannot skip a check )
@ -329,7 +357,7 @@ func (chain *Blockchain) Verify_Transaction_NonCoinbase(hf_version int64, tx *tr
// at this point has been completely expanded, verify the tx statement // at this point has been completely expanded, verify the tx statement
for t := range tx.Payloads { for t := range tx.Payloads {
if !tx.Payloads[t].Proof.Verify(&tx.Payloads[t].Statement, tx.GetHash(), tx.Payloads[t].BurnValue) { if !tx.Payloads[t].Proof.Verify(&tx.Payloads[t].Statement, tx.GetHash(), tx.Height, tx.Payloads[t].BurnValue) {
fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement) fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement)
fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof) fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof)

View File

@ -27,6 +27,7 @@ import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/errormsg" import "github.com/deroproject/derohe/errormsg"
import "github.com/deroproject/derohe/rpc" import "github.com/deroproject/derohe/rpc"
import "github.com/deroproject/derohe/blockchain"
//import "github.com/deroproject/derohe/dvm" //import "github.com/deroproject/derohe/dvm"
//import "github.com/deroproject/derohe/cryptography/crypto" //import "github.com/deroproject/derohe/cryptography/crypto"
@ -52,6 +53,37 @@ func (DERO_RPC_APIS) GetEncryptedBalance(ctx context.Context, p rpc.GetEncrypted
topoheight = p.TopoHeight topoheight = p.TopoHeight
} }
switch p.TopoHeight {
case rpc.RECENT_BATCH_BLOCK: // give data of specific point from where tx could be built
chain_height := chain.Get_Height()
var topo_list []int64
for ;topoheight > 0; {
toporecord, err := chain.Store.Topo_store.Read(topoheight)
if err != nil {
panic(err)
}
if blockchain.Verify_Transaction_Height(uint64(toporecord.Height), uint64(chain_height)){
if chain_height - toporecord.Height <= (3*config.BLOCK_BATCH_SIZE)/2 { // give us enough leeway
topo_list=append(topo_list, topoheight)
}
}
if chain_height-toporecord.Height >= 2 * config.BLOCK_BATCH_SIZE {
break;
}
topoheight--
}
topoheight = topo_list[len(topo_list)-1]
case rpc.RECENT_BLOCK : fallthrough
default:
}
toporecord, err := chain.Store.Topo_store.Read(topoheight) toporecord, err := chain.Store.Topo_store.Read(topoheight)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -94,20 +94,17 @@ func (DERO_RPC_APIS) GetTransaction(ctx context.Context, p rpc.GetTransaction_Pa
} }
// also fill where the tx is found and in which block is valid and in which it is invalid // also fill where the tx is found and in which block is valid and in which it is invalid
blid_list,state_block,state_block_topo := chain.IS_TX_Mined(hash)
valid_blid, invalid_blid, valid := chain.IS_TX_Valid(hash)
//logger.Infof(" tx %s related info valid_blid %s invalid_blid %+v valid %v ",hash, valid_blid, invalid_blid, valid) //logger.Infof(" tx %s related info valid_blid %s invalid_blid %+v valid %v ",hash, valid_blid, invalid_blid, valid)
if valid { if state_block_topo > 0 {
related.ValidBlock = valid_blid.String() related.StateBlock = state_block.String()
// topo height at which it was mined related.Block_Height = state_block_topo
topo_height := int64(chain.Load_Block_Topological_order(valid_blid))
related.Block_Height = topo_height
if tx.TransactionType != transaction.REGISTRATION { if tx.TransactionType != transaction.REGISTRATION {
// we must now fill in compressed ring members // we must now fill in compressed ring members
if toporecord, err := chain.Store.Topo_store.Read(topo_height); err == nil { if toporecord, err := chain.Store.Topo_store.Read(state_block_topo); err == nil {
if ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version); err == nil { if ss, err := chain.Store.Balance_store.LoadSnapshot(toporecord.State_Version); err == nil {
if tx.TransactionType == transaction.SC_TX { if tx.TransactionType == transaction.SC_TX {
@ -141,7 +138,6 @@ func (DERO_RPC_APIS) GetTransaction(ctx context.Context, p rpc.GetTransaction_Pa
if tx.Payloads[t].SCID.IsZero() { if tx.Payloads[t].SCID.IsZero() {
tree, err = ss.GetTree(config.BALANCE_TREE) tree, err = ss.GetTree(config.BALANCE_TREE)
} else { } else {
tree, err = ss.GetTree(string(tx.Payloads[t].SCID[:])) tree, err = ss.GetTree(string(tx.Payloads[t].SCID[:]))
} }
@ -166,8 +162,8 @@ func (DERO_RPC_APIS) GetTransaction(ctx context.Context, p rpc.GetTransaction_Pa
} }
} }
} }
for i := range invalid_blid { for i := range blid_list {
related.InvalidBlock = append(related.InvalidBlock, invalid_blid[i].String()) related.MinedBlock = append(related.MinedBlock, blid_list[i].String())
} }
result.Txs_as_hex = append(result.Txs_as_hex, hex.EncodeToString(tx.Serialize())) result.Txs_as_hex = append(result.Txs_as_hex, hex.EncodeToString(tx.Serialize()))

View File

@ -64,8 +64,8 @@ func (DERO_RPC_APIS) SendRawTransaction(ctx context.Context, p rpc.SendRawTransa
result.Status = "OK" result.Status = "OK"
rlog.Debugf("Incoming TXID %s from RPC Server successfully accepted by MEMPOOL", tx.GetHash()) rlog.Debugf("Incoming TXID %s from RPC Server successfully accepted by MEMPOOL", tx.GetHash())
} else { } else {
err = fmt.Errorf("Transaction %s rejected by daemon err '%s'", tx.GetHash(), err) rlog.Warnf("Incoming TXID %s from RPC Server rejected by POOL err '%s'", tx.GetHash(),err)
rlog.Warnf("Incoming TXID %s from RPC Server rejected by POOL", tx.GetHash()) err = fmt.Errorf("Transaction %s rejected by daemon err '%s'", tx.GetHash(), err)
} }
return return
} }

View File

@ -26,11 +26,13 @@ import "sync/atomic"
import "context" import "context"
import "strings" import "strings"
import "runtime/debug" import "runtime/debug"
import "net/http/pprof"
import "github.com/romana/rlog" import "github.com/romana/rlog"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/globals" import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/metrics"
import "github.com/deroproject/derohe/blockchain" import "github.com/deroproject/derohe/blockchain"
import "github.com/deroproject/derohe/glue/rwc" import "github.com/deroproject/derohe/glue/rwc"
@ -164,12 +166,15 @@ func (r *RPCServer) Run() {
r.Unlock() r.Unlock()
r.mux.HandleFunc("/json_rpc", translate_http_to_jsonrpc_and_vice_versa) r.mux.HandleFunc("/json_rpc", translate_http_to_jsonrpc_and_vice_versa)
r.mux.HandleFunc("/metrics", metrics.WritePrometheus) // write all the metrics
r.mux.HandleFunc("/ws", ws_handler) r.mux.HandleFunc("/ws", ws_handler)
r.mux.HandleFunc("/", hello) r.mux.HandleFunc("/", hello)
//r.mux.Handle("/json_rpc", mr) r.mux.HandleFunc("/debug/pprof/", pprof.Index)
r.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
// handle nasty http requests r.mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
//r.mux.HandleFunc("/getheight", getheight) r.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
r.mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
//if DEBUG_MODE { //if DEBUG_MODE {
// r.mux.HandleFunc("/debug/pprof/", pprof.Index) // r.mux.HandleFunc("/debug/pprof/", pprof.Index)
@ -182,21 +187,8 @@ func (r *RPCServer) Run() {
r.mux.HandleFunc("/debug/pprof/trace", pprof.Trace) r.mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
*/ */
/*
// Register pprof handlers individually if required
r.mux.HandleFunc("/cdebug/pprof/", pprof.Index)
r.mux.HandleFunc("/cdebug/pprof/cmdline", pprof.Cmdline)
r.mux.HandleFunc("/cdebug/pprof/profile", pprof.Profile)
r.mux.HandleFunc("/cdebug/pprof/symbol", pprof.Symbol)
r.mux.HandleFunc("/cdebug/pprof/trace", pprof.Trace)
*/
// register metrics handler
// r.mux.HandleFunc("/metrics", prometheus.InstrumentHandler("dero", promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{})))
//}
//r.mux.HandleFunc("/json_rpc/debug", mr.ServeDebug)
go Notify_Block_Addition() // process all blocks go Notify_Block_Addition() // process all blocks
go Notify_Height_Changes() // gives notification of changed height go Notify_Height_Changes() // gives notification of changed height

View File

@ -259,8 +259,8 @@ type txinfo struct {
OutAddress []string // contains output secret key OutAddress []string // contains output secret key
OutOffset []uint64 // contains index offsets OutOffset []uint64 // contains index offsets
Type string // ringct or ruffct ( bulletproof) Type string // ringct or ruffct ( bulletproof)
ValidBlock string // the tx is valid in which block StateBlock string // the tx is valid with reference to this block
InvalidBlock []string // the tx is invalid in which block MinedBlock []string // the tx is mined in which block
Skipped bool // this is only valid, when a block is being listed Skipped bool // this is only valid, when a block is being listed
Ring_size int Ring_size int
Ring [][][]byte // contains entire ring in raw form Ring [][][]byte // contains entire ring in raw form
@ -399,9 +399,6 @@ func load_block_from_rpc(info *block_info, block_hash string, recursive bool) (e
var tx txinfo var tx txinfo
err = load_tx_from_rpc(&tx, bl.Tx_hashes[i].String()) //TODO handle error err = load_tx_from_rpc(&tx, bl.Tx_hashes[i].String()) //TODO handle error
fmt.Printf("loading tx %s err %s\n", bl.Tx_hashes[i].String(), err) fmt.Printf("loading tx %s err %s\n", bl.Tx_hashes[i].String(), err)
if tx.ValidBlock != bresult.Block_Header.Hash { // track skipped status
tx.Skipped = true
}
info.Txs = append(info.Txs, tx) info.Txs = append(info.Txs, tx)
fees += tx.Feeuint64 fees += tx.Feeuint64
size += tx.Sizeuint64 size += tx.Sizeuint64
@ -552,8 +549,8 @@ func load_tx_from_rpc(info *txinfo, txhash string) (err error) {
info.Amount = fmt.Sprintf("%.05f", float64(uint64(tx_result.Txs[0].Reward))/100000) info.Amount = fmt.Sprintf("%.05f", float64(uint64(tx_result.Txs[0].Reward))/100000)
} }
info.ValidBlock = tx_result.Txs[0].ValidBlock info.StateBlock = tx_result.Txs[0].StateBlock
info.InvalidBlock = tx_result.Txs[0].InvalidBlock info.MinedBlock = tx_result.Txs[0].MinedBlock
info.Ring = tx_result.Txs[0].Ring info.Ring = tx_result.Txs[0].Ring

View File

@ -308,9 +308,9 @@ var tx_template string = `{{define "tx"}}
<H4 style="margin:5px; color: red">Burns: {{.info.Burn_Value }} DERO</H4> <H4 style="margin:5px; color: red">Burns: {{.info.Burn_Value }} DERO</H4>
{{end}} {{end}}
<H5>Block: <a href="/block/{{.info.ValidBlock}}">{{.info.ValidBlock}}</a> (VALID) </H5> <H5>Block: <a href="/block/{{.info.StateBlock}}">{{.info.StateBlock}}</a> (Reference) </H5>
{{range $i, $e := .info.InvalidBlock}} {{range $i, $e := .info.MinedBlock}}
<H5>Block: <a href="/block/{{$e}}">{{$e}}</a></H5> <H5>Block: <a href="/block/{{$e}}">{{$e}}</a></H5>
{{end}} {{end}}

View File

@ -58,14 +58,16 @@ const MAINNET_MINIMUM_DIFFICULTY = uint64(800 * BLOCK_TIME) // 5 KH/s
const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(800 * BLOCK_TIME) // testnet bootstrap at 800 H/s const TESTNET_BOOTSTRAP_DIFFICULTY = uint64(800 * BLOCK_TIME) // testnet bootstrap at 800 H/s
const TESTNET_MINIMUM_DIFFICULTY = uint64(800 * BLOCK_TIME) // 800 H const TESTNET_MINIMUM_DIFFICULTY = uint64(800 * BLOCK_TIME) // 800 H
//this controls the batch size which controls till how many blocks incoming funds cannot be spend
const BLOCK_BATCH_SIZE = crypto.BLOCK_BATCH_SIZE
// this single parameter controls lots of various parameters // this single parameter controls lots of various parameters
// within the consensus, it should never go below 7 // within the consensus, it should never go below 7
// if changed responsibly, we can have one second or lower blocks (ignoring chain bloat/size issues) // if changed responsibly, we can have one second or lower blocks (ignoring chain bloat/size issues)
// gives immense scalability, // gives immense scalability,
const STABLE_LIMIT = int64(8) const STABLE_LIMIT = int64(8)
// reward percent that is shared between miners/dev
const DEVSHARE = uint64(600) // it's out of 10000, 600*100/10000 = 6%, 3% dev, 3% foundation
// we can have number of chains running for testing reasons // we can have number of chains running for testing reasons
type CHAIN_CONFIG struct { type CHAIN_CONFIG struct {
@ -104,7 +106,7 @@ var Mainnet = CHAIN_CONFIG{Name: "mainnet",
} }
var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0 var Testnet = CHAIN_CONFIG{Name: "testnet", // testnet will always have last 3 bytes 0
Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x26, 0x00, 0x00, 0x00}), Network_ID: uuid.FromBytesOrNil([]byte{0x59, 0xd7, 0xf7, 0xe9, 0xdd, 0x48, 0xd5, 0xfd, 0x13, 0x0a, 0xf6, 0xe0, 0x26, 0x00, 0x02, 0x00}),
P2P_Default_Port: 40401, P2P_Default_Port: 40401,
RPC_Default_Port: 40402, RPC_Default_Port: 40402,
Wallet_RPC_Default_Port: 40403, Wallet_RPC_Default_Port: 40403,

View File

@ -20,4 +20,4 @@ import "github.com/blang/semver"
// right now it has to be manually changed // right now it has to be manually changed
// do we need to include git commitsha?? // do we need to include git commitsha??
var Version = semver.MustParse("3.2.14-1.DEROHE.STARGATE+28022021") var Version = semver.MustParse("3.2.15-1.DEROHE.STARGATE+08082021")

View File

@ -22,7 +22,7 @@ import "math/big"
import "bytes" import "bytes"
//import "crypto/rand" //import "crypto/rand"
//import "encoding/hex" import "encoding/binary"
import "github.com/deroproject/derohe/cryptography/bn256" import "github.com/deroproject/derohe/cryptography/bn256"
@ -30,6 +30,10 @@ import "github.com/deroproject/derohe/cryptography/bn256"
//import "github.com/kubernetes/klog" //import "github.com/kubernetes/klog"
// see comment in config package
const BLOCK_BATCH_SIZE = 10
type Proof struct { type Proof struct {
BA *bn256.G1 BA *bn256.G1
BS *bn256.G1 BS *bn256.G1
@ -39,6 +43,7 @@ type Proof struct {
CLnG, CRnG, C_0G, DG, y_0G, gG, C_XG, y_XG []*bn256.G1 CLnG, CRnG, C_0G, DG, y_0G, gG, C_XG, y_XG []*bn256.G1
u *bn256.G1 u *bn256.G1
u1 *bn256.G1
f *FieldVector f *FieldVector
@ -67,9 +72,12 @@ type IPWitness struct {
} }
// this is based on roothash and user's secret key and thus is the basis of protection from a number of double spending attacks // this is based on roothash and user's secret key and thus is the basis of protection from a number of double spending attacks
func (p *Proof) Nonce() Hash { func (p *Proof) Nonce1() Hash {
return Keccak256(p.u.EncodeCompressed()) return Keccak256(p.u.EncodeCompressed())
} }
func (p *Proof) Nonce2() Hash {
return Keccak256(p.u1.EncodeCompressed())
}
func (p *Proof) Serialize(w *bytes.Buffer) { func (p *Proof) Serialize(w *bytes.Buffer) {
if p == nil { if p == nil {
@ -95,6 +103,7 @@ func (p *Proof) Serialize(w *bytes.Buffer) {
} }
w.Write(p.u.EncodeCompressed()) w.Write(p.u.EncodeCompressed())
w.Write(p.u1.EncodeCompressed())
if len(p.CLnG) != len(p.f.vector) { if len(p.CLnG) != len(p.f.vector) {
/// panic(fmt.Sprintf("different size %d %d", len(p.CLnG), len(p.f.vector))) /// panic(fmt.Sprintf("different size %d %d", len(p.CLnG), len(p.f.vector)))
@ -278,6 +287,16 @@ func (proof *Proof) Deserialize(r *bytes.Reader, length int) error {
return err return err
} }
if n, err := r.Read(bufp[:]); n == 33 && err == nil {
var p bn256.G1
if err = p.DecodeCompressed(bufp[:]); err != nil {
return err
}
proof.u1 = &p
} else {
return err
}
proof.f = &FieldVector{} proof.f = &FieldVector{}
//fmt.Printf("flen %d\n", flen ) //fmt.Printf("flen %d\n", flen )
@ -438,12 +457,27 @@ func reverse(s string) string {
return string(rns) return string(rns)
} }
func HeightToPoint(height uint64) *bn256.G1 {
var input []byte
var h [8]byte
input = append(input, []byte(PROTOCOL_CONSTANT)...)
binary.BigEndian.PutUint64(h[:], height)
input = append(input,h[:]...)
point := HashToPoint(HashtoNumber(input))
return point
}
var params = NewGeneratorParams(128) // these can be pregenerated similarly as in DERO project var params = NewGeneratorParams(128) // these can be pregenerated similarly as in DERO project
func GenerateProof(s *Statement, witness *Witness, u *bn256.G1, txid Hash, burn_value uint64) *Proof { func GenerateProof(s *Statement, witness *Witness, u,u1 *bn256.G1, height uint64, txid Hash, burn_value uint64) *Proof {
var proof Proof var proof Proof
proof.u = u proof.u = u
proof.u1 = u1
statementhash := reducedhash(txid[:]) statementhash := reducedhash(txid[:])
@ -1106,17 +1140,9 @@ func GenerateProof(s *Statement, witness *Witness, u *bn256.G1, txid Hash, burn_
A_t := new(bn256.G1).ScalarMult(params.G, new(big.Int).Mod(new(big.Int).Neg(k_b), bn256.Order)) A_t := new(bn256.G1).ScalarMult(params.G, new(big.Int).Mod(new(big.Int).Neg(k_b), bn256.Order))
A_t = new(bn256.G1).Add(A_t, new(bn256.G1).ScalarMult(params.H, k_tau)) A_t = new(bn256.G1).Add(A_t, new(bn256.G1).ScalarMult(params.H, k_tau))
A_u := new(bn256.G1) A_u := new(bn256.G1).ScalarMult(HeightToPoint(height), k_sk)
A_u1 := new(bn256.G1).ScalarMult(HeightToPoint(height + BLOCK_BATCH_SIZE), k_sk)
{
var input []byte
input = append(input, []byte(PROTOCOL_CONSTANT)...)
input = append(input, s.Roothash[:]...)
point := HashToPoint(HashtoNumber(input))
A_u = new(bn256.G1).ScalarMult(point, k_sk)
}
// klog.V(2).Infof("A_y %s\n", A_y.String()) // klog.V(2).Infof("A_y %s\n", A_y.String())
// klog.V(2).Infof("A_D %s\n", A_D.String()) // klog.V(2).Infof("A_D %s\n", A_D.String())
@ -1134,6 +1160,7 @@ func GenerateProof(s *Statement, witness *Witness, u *bn256.G1, txid Hash, burn_
input = append(input, A_X.Marshal()...) input = append(input, A_X.Marshal()...)
input = append(input, A_t.Marshal()...) input = append(input, A_t.Marshal()...)
input = append(input, A_u.Marshal()...) input = append(input, A_u.Marshal()...)
input = append(input, A_u1.Marshal()...)
proof.c = reducedhash(input) proof.c = reducedhash(input)
} }

View File

@ -64,7 +64,7 @@ type ProtocolSupport struct {
// sigma protocol // sigma protocol
type SigmaSupport struct { type SigmaSupport struct {
c *big.Int c *big.Int
A_y, A_D, A_b, A_X, A_t, A_u *bn256.G1 A_y, A_D, A_b, A_X, A_t, A_u, A_u1 *bn256.G1
} }
// support structures are those which // support structures are those which
@ -96,7 +96,7 @@ var gparams = NewGeneratorParams(128) // these can be pregenerated similarly as
// verify proof // verify proof
// first generate supporting structures // first generate supporting structures
func (proof *Proof) Verify(s *Statement, txid Hash, extra_value uint64) bool { func (proof *Proof) Verify(s *Statement, txid Hash, height uint64, extra_value uint64) bool {
var anonsupport AnonSupport var anonsupport AnonSupport
var protsupport ProtocolSupport var protsupport ProtocolSupport
@ -377,14 +377,14 @@ func (proof *Proof) Verify(s *Statement, txid Hash, extra_value uint64) bool {
// klog.V(2).Infof("protsupport.tEval %s\n", protsupport.tEval.String()) // klog.V(2).Infof("protsupport.tEval %s\n", protsupport.tEval.String())
{ {
var input []byte point := HeightToPoint(height)
input = append(input, []byte(PROTOCOL_CONSTANT)...)
input = append(input, s.Roothash[:]...)
point := HashToPoint(HashtoNumber(input))
sigmasupport.A_u = new(bn256.G1).ScalarMult(point, proof.s_sk) sigmasupport.A_u = new(bn256.G1).ScalarMult(point, proof.s_sk)
sigmasupport.A_u.Add(new(bn256.G1).Set(sigmasupport.A_u), new(bn256.G1).ScalarMult(proof.u, proof_c_neg)) sigmasupport.A_u.Add(new(bn256.G1).Set(sigmasupport.A_u), new(bn256.G1).ScalarMult(proof.u, proof_c_neg))
point = HeightToPoint(height+BLOCK_BATCH_SIZE)
sigmasupport.A_u1 = new(bn256.G1).ScalarMult(point, proof.s_sk)
sigmasupport.A_u1.Add(new(bn256.G1).Set(sigmasupport.A_u1), new(bn256.G1).ScalarMult(proof.u1, proof_c_neg))
} }
// klog.V(2).Infof("A_y %s\n", sigmasupport.A_y.String()) // klog.V(2).Infof("A_y %s\n", sigmasupport.A_y.String())
@ -403,6 +403,7 @@ func (proof *Proof) Verify(s *Statement, txid Hash, extra_value uint64) bool {
input = append(input, sigmasupport.A_X.Marshal()...) input = append(input, sigmasupport.A_X.Marshal()...)
input = append(input, sigmasupport.A_t.Marshal()...) input = append(input, sigmasupport.A_t.Marshal()...)
input = append(input, sigmasupport.A_u.Marshal()...) input = append(input, sigmasupport.A_u.Marshal()...)
input = append(input, sigmasupport.A_u1.Marshal()...)
// fmt.Printf("C calculation expected %s actual %s\n",proof.c.Text(16), reducedhash(input).Text(16) ) // fmt.Printf("C calculation expected %s actual %s\n",proof.c.Text(16), reducedhash(input).Text(16) )
if reducedhash(input).Text(16) != proof.c.Text(16) { // we must fail here if reducedhash(input).Text(16) != proof.c.Text(16) { // we must fail here

View File

@ -18,14 +18,28 @@
package metrics package metrics
import "github.com/prometheus/client_golang/prometheus" import "net/http"
import "github.com/VictoriaMetrics/metrics"
var Registry = prometheus.NewRegistry() // these are exported by the daemon for various analysis
var DefaultRegisterer prometheus.Registerer = Registry
var DefaultGatherer prometheus.Gatherer = Registry
// register some default go collectors var Blockchain_tx_counter = metrics.NewCounter(`blockchain_tx_counter`)
func init() { var Mempool_tx_counter = metrics.NewCounter(`mempool_tx_counter`)
Registry.MustRegister(prometheus.NewGoCollector()) var Mempool_tx_count = metrics.NewCounter(`mempool_tx_count`) // its actually a gauge
} var Block_size = metrics.NewHistogram(`block_size`)
var Block_tx = metrics.NewHistogram(`block_tx`)
var Block_processing_time = metrics.NewHistogram(`block_processing_time`)
var Transaction_size = metrics.NewHistogram(`transaction_size`)
var Transaction_ring_size = metrics.NewHistogram(`transaction_ring_size`)
var Transaction_outputs = metrics.NewHistogram("transaction_outputs") // a single tx will give to so many people
// we may need to expose various p2p stats, but currently they can be skipped
var Block_propagation = metrics.NewHistogram(`block_propagation`)
var Transaction_propagation = metrics.NewHistogram(`transaction_propagation`)
func WritePrometheus(w http.ResponseWriter, req *http.Request){
metrics.WritePrometheus(w, true)
}

View File

@ -56,7 +56,7 @@ func (connection *Connection) bootstrap_chain() {
// we will request top 60 blocks // we will request top 60 blocks
ctopo := connection.TopoHeight ctopo := connection.TopoHeight
var topos []int64 var topos []int64
for i := ctopo - 20; i < ctopo; i++ { for i := ctopo - 200; i < ctopo; i++ {
topos = append(topos, i) topos = append(topos, i)
} }

View File

@ -39,7 +39,6 @@ import "github.com/romana/rlog"
import "github.com/dustin/go-humanize" import "github.com/dustin/go-humanize"
import log "github.com/sirupsen/logrus" import log "github.com/sirupsen/logrus"
import "github.com/paulbellamy/ratecounter" import "github.com/paulbellamy/ratecounter"
import "github.com/prometheus/client_golang/prometheus"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
@ -115,19 +114,6 @@ func (c *Connection) exit() {
} }
// 300 such buckets can be used to track block propagation accuratly upto a minute
var block_propagation = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "block_propagation_ms",
Help: "Block Propagation time milliseconds as detected by daemon",
Buckets: prometheus.LinearBuckets(0, 1000, 20), // start 0 ms, each 1000 ms, 20 such buckets.
})
// 300 such buckets can be used to track transaction propagation accurately upto a minute
var transaction_propagation = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tx_propagation_ms",
Help: "TX Propagation time milliseconds as detected by daemon",
Buckets: prometheus.LinearBuckets(0, 1000, 20), // start 0 ms, each 1000 ms, 20 such buckets.
})
var block_propagation_map sync.Map var block_propagation_map sync.Map
var tx_propagation_map sync.Map var tx_propagation_map sync.Map

View File

@ -43,7 +43,7 @@ import log "github.com/sirupsen/logrus"
import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/globals" import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/blockchain" import "github.com/deroproject/derohe/blockchain"
import "github.com/deroproject/derohe/metrics"
var chain *blockchain.Blockchain // external reference to chain var chain *blockchain.Blockchain // external reference to chain
@ -105,9 +105,6 @@ func P2P_Init(params map[string]interface{}) error {
} }
} }
// register the metrics with the metrics registry
metrics.Registry.MustRegister(block_propagation)
metrics.Registry.MustRegister(transaction_propagation)
go P2P_Server_v2() // start accepting connections go P2P_Server_v2() // start accepting connections
go P2P_engine() // start outgoing engine go P2P_engine() // start outgoing engine

View File

@ -26,6 +26,7 @@ import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/block" import "github.com/deroproject/derohe/block"
import "github.com/deroproject/derohe/errormsg" import "github.com/deroproject/derohe/errormsg"
import "github.com/deroproject/derohe/transaction" import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/metrics"
// notifies inventory // notifies inventory
func (c *Connection) NotifyINV(request ObjectList, response *Dummy) (err error) { func (c *Connection) NotifyINV(request ObjectList, response *Dummy) (err error) {
@ -104,9 +105,7 @@ func (c *Connection) NotifyTx(request Objects, response *Dummy) error {
// track transaction propagation // track transaction propagation
if first_time, ok := tx_propagation_map.Load(tx.GetHash()); ok { if first_time, ok := tx_propagation_map.Load(tx.GetHash()); ok {
// block already has a reference, take the time and observe the value metrics.Transaction_propagation.UpdateDuration(first_time.(time.Time))
diff := time.Now().Sub(first_time.(time.Time)).Round(time.Millisecond)
transaction_propagation.Observe(float64(diff / 1000000))
} else { } else {
tx_propagation_map.Store(tx.GetHash(), time.Now()) // if this is the first time, store the tx time tx_propagation_map.Store(tx.GetHash(), time.Now()) // if this is the first time, store the tx time
} }
@ -153,10 +152,8 @@ func (c *Connection) NotifyBlock(request Objects, response *Dummy) error {
rlog.Infof("Incoming block Notification hash %s %s ", blid, globals.CTXString(c.logger)) rlog.Infof("Incoming block Notification hash %s %s ", blid, globals.CTXString(c.logger))
// track block propagation // track block propagation
if first_time, ok := block_propagation_map.Load(blid); ok { if first_time, ok := block_propagation_map.Load(blid); ok { // block already has a reference, take the time and observe the value
// block already has a reference, take the time and observe the value metrics.Block_propagation.UpdateDuration(first_time.(time.Time))
diff := time.Now().Sub(first_time.(time.Time)).Round(time.Millisecond)
block_propagation.Observe(float64(diff / 1000000))
} else { } else {
block_propagation_map.Store(blid, time.Now()) // if this is the first time, store the block block_propagation_map.Store(blid, time.Now()) // if this is the first time, store the block
} }

View File

@ -123,6 +123,9 @@ type (
} }
) )
const RECENT_BLOCK = int64(-1) // will give most recent data
const RECENT_BATCH_BLOCK = int64(-2) // will give data from recent block batch for tx building
//get encrypted balance call //get encrypted balance call
type ( type (
GetEncryptedBalance_Params struct { GetEncryptedBalance_Params struct {
@ -193,8 +196,8 @@ type (
In_pool bool `json:"in_pool"` In_pool bool `json:"in_pool"`
Output_Indices []uint64 `json:"output_indices"` Output_Indices []uint64 `json:"output_indices"`
Tx_hash string `json:"tx_hash"` Tx_hash string `json:"tx_hash"`
ValidBlock string `json:"valid_block"` // TX is valid in this block StateBlock string `json:"state_block"` // TX is built in reference to this block
InvalidBlock []string `json:"invalid_block"` // TX is invalid in this block, 0 or more MinedBlock []string `json:"mined_block"` // TX is mined in this block, 1 or more
Ring [][][]byte `json:"ring"` // ring members completed, since tx contains compressed Ring [][][]byte `json:"ring"` // ring members completed, since tx contains compressed
Balance uint64 `json:"balance"` // if tx is SC, give SC balance at start Balance uint64 `json:"balance"` // if tx is SC, give SC balance at start
Code string `json:"code"` // smart contract code at start Code string `json:"code"` // smart contract code at start

View File

@ -170,7 +170,7 @@ func (a *AssetPayload) UnmarshalProofs(r *bytes.Reader) (err error) {
type Transaction struct { type Transaction struct {
Transaction_Prefix // same as Transaction_Prefix Transaction_Prefix // same as Transaction_Prefix
Payloads []AssetPayload // each transaction can have a number os payloads Payloads []AssetPayload // each transaction can have a number of payloads
} }
// this excludes the proof part, so it can pruned // this excludes the proof part, so it can pruned

View File

@ -0,0 +1,33 @@
name: main
on:
- push
- pull_request
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Setup Go
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Code checkout
uses: actions/checkout@v1
- name: Test
run: |
go test -v ./... -coverprofile=coverage.txt -covermode=atomic
go test -v ./... -race
- name: Build
run: |
GOOS=linux go build
GOOS=darwin go build
GOOS=freebsd go build
GOOS=windows go build
GOARCH=386 go build
- name: Publish coverage
uses: codecov/codecov-action@v1.0.6
with:
token: ${{secrets.CODECOV_TOKEN}}
file: ./coverage.txt

22
vendor/github.com/VictoriaMetrics/metrics/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2019 VictoriaMetrics
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

104
vendor/github.com/VictoriaMetrics/metrics/README.md generated vendored Normal file
View File

@ -0,0 +1,104 @@
[![Build Status](https://github.com/VictoriaMetrics/metrics/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/metrics/actions)
[![GoDoc](https://godoc.org/github.com/VictoriaMetrics/metrics?status.svg)](http://godoc.org/github.com/VictoriaMetrics/metrics)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/metrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/metrics)
[![codecov](https://codecov.io/gh/VictoriaMetrics/metrics/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/metrics)
# metrics - lightweight package for exporting metrics in Prometheus format
### Features
* Lightweight. Has minimal number of third-party dependencies and all these deps are small.
See [this article](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) for details.
* Easy to use. See the [API docs](http://godoc.org/github.com/VictoriaMetrics/metrics).
* Fast.
* Allows exporting distinct metric sets via distinct endpoints. See [Set](http://godoc.org/github.com/VictoriaMetrics/metrics#Set).
* Supports [easy-to-use histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram), which just work without any tuning.
Read more about VictoriaMetrics histograms at [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350).
### Limitations
* It doesn't implement advanced functionality from [github.com/prometheus/client_golang](https://godoc.org/github.com/prometheus/client_golang).
### Usage
```go
import "github.com/VictoriaMetrics/metrics"
// Register various time series.
// Time series name may contain labels in Prometheus format - see below.
var (
// Register counter without labels.
requestsTotal = metrics.NewCounter("requests_total")
// Register summary with a single label.
requestDuration = metrics.NewSummary(`requests_duration_seconds{path="/foobar/baz"}`)
// Register gauge with two labels.
queueSize = metrics.NewGauge(`queue_size{queue="foobar",topic="baz"}`, func() float64 {
return float64(foobarQueue.Len())
})
// Register histogram with a single label.
responseSize = metrics.NewHistogram(`response_size{path="/foo/bar"}`)
)
// ...
func requestHandler() {
// Increment requestTotal counter.
requestsTotal.Inc()
startTime := time.Now()
processRequest()
// Update requestDuration summary.
requestDuration.UpdateDuration(startTime)
// Update responseSize histogram.
responseSize.Update(responseSize)
}
// Expose the registered metrics at `/metrics` path.
http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
metrics.WritePrometheus(w, true)
})
```
See [docs](http://godoc.org/github.com/VictoriaMetrics/metrics) for more info.
### Users
* `Metrics` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources.
See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac)
for more info about `VictoriaMetrics`.
### FAQ
#### Why the `metrics` API isn't compatible with `github.com/prometheus/client_golang`?
Because the `github.com/prometheus/client_golang` is too complex and is hard to use.
#### Why the `metrics.WritePrometheus` doesn't expose documentation for each metric?
Because this documentation is ignored by Prometheus. The documentation is for users.
Just give meaningful names to the exported metrics or add comments in the source code
or in other suitable place explaining each metric exposed from your application.
#### How to implement [CounterVec](https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec) in `metrics`?
Just use [GetOrCreateCounter](http://godoc.org/github.com/VictoriaMetrics/metrics#GetOrCreateCounter)
instead of `CounterVec.With`. See [this example](https://pkg.go.dev/github.com/VictoriaMetrics/metrics#example-Counter-Vec) for details.
#### Why [Histogram](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) buckets contain `vmrange` labels instead of `le` labels like in Prometheus histograms?
Buckets with `vmrange` labels occupy less disk space compared to Promethes-style buckets with `le` labels,
because `vmrange` buckets don't include counters for the previous ranges. [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) provides `prometheus_buckets`
function, which converts `vmrange` buckets to Prometheus-style buckets with `le` labels. This is useful for building heatmaps in Grafana.
Additionally, its' `histogram_quantile` function transparently handles histogram buckets with `vmrange` labels.

77
vendor/github.com/VictoriaMetrics/metrics/counter.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package metrics
import (
"fmt"
"io"
"sync/atomic"
)
// NewCounter registers and returns new counter with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned counter is safe to use from concurrent goroutines.
func NewCounter(name string) *Counter {
return defaultSet.NewCounter(name)
}
// Counter is a counter.
//
// It may be used as a gauge if Dec and Set are called.
type Counter struct {
n uint64
}
// Inc increments c.
func (c *Counter) Inc() {
atomic.AddUint64(&c.n, 1)
}
// Dec decrements c.
func (c *Counter) Dec() {
atomic.AddUint64(&c.n, ^uint64(0))
}
// Add adds n to c.
func (c *Counter) Add(n int) {
atomic.AddUint64(&c.n, uint64(n))
}
// Get returns the current value for c.
func (c *Counter) Get() uint64 {
return atomic.LoadUint64(&c.n)
}
// Set sets c value to n.
func (c *Counter) Set(n uint64) {
atomic.StoreUint64(&c.n, n)
}
// marshalTo marshals c with the given prefix to w.
func (c *Counter) marshalTo(prefix string, w io.Writer) {
v := c.Get()
fmt.Fprintf(w, "%s %d\n", prefix, v)
}
// GetOrCreateCounter returns registered counter with the given name
// or creates new counter if the registry doesn't contain counter with
// the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned counter is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
func GetOrCreateCounter(name string) *Counter {
return defaultSet.GetOrCreateCounter(name)
}

View File

@ -0,0 +1,41 @@
package metrics_test
import (
"fmt"
"github.com/VictoriaMetrics/metrics"
)
func ExampleCounter() {
// Define a counter in global scope.
var c = metrics.NewCounter(`metric_total{label1="value1", label2="value2"}`)
// Increment the counter when needed.
for i := 0; i < 10; i++ {
c.Inc()
}
n := c.Get()
fmt.Println(n)
// Output:
// 10
}
func ExampleCounter_vec() {
for i := 0; i < 3; i++ {
// Dynamically construct metric name and pass it to GetOrCreateCounter.
name := fmt.Sprintf(`metric_total{label1=%q, label2="%d"}`, "value1", i)
metrics.GetOrCreateCounter(name).Add(i + 1)
}
// Read counter values.
for i := 0; i < 3; i++ {
name := fmt.Sprintf(`metric_total{label1=%q, label2="%d"}`, "value1", i)
n := metrics.GetOrCreateCounter(name).Get()
fmt.Println(n)
}
// Output:
// 1
// 2
// 3
}

View File

@ -0,0 +1,76 @@
package metrics
import (
"fmt"
"testing"
)
func TestCounterSerial(t *testing.T) {
name := "CounterSerial"
c := NewCounter(name)
c.Inc()
if n := c.Get(); n != 1 {
t.Fatalf("unexpected counter value; got %d; want 1", n)
}
c.Set(123)
if n := c.Get(); n != 123 {
t.Fatalf("unexpected counter value; got %d; want 123", n)
}
c.Dec()
if n := c.Get(); n != 122 {
t.Fatalf("unexpected counter value; got %d; want 122", n)
}
c.Add(3)
if n := c.Get(); n != 125 {
t.Fatalf("unexpected counter value; got %d; want 125", n)
}
// Verify MarshalTo
testMarshalTo(t, c, "foobar", "foobar 125\n")
}
func TestCounterConcurrent(t *testing.T) {
name := "CounterConcurrent"
c := NewCounter(name)
err := testConcurrent(func() error {
nPrev := c.Get()
for i := 0; i < 10; i++ {
c.Inc()
if n := c.Get(); n <= nPrev {
return fmt.Errorf("counter value must be greater than %d; got %d", nPrev, n)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateCounterSerial(t *testing.T) {
name := "GetOrCreateCounterSerial"
if err := testGetOrCreateCounter(name); err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateCounterConcurrent(t *testing.T) {
name := "GetOrCreateCounterConcurrent"
err := testConcurrent(func() error {
return testGetOrCreateCounter(name)
})
if err != nil {
t.Fatal(err)
}
}
func testGetOrCreateCounter(name string) error {
c1 := GetOrCreateCounter(name)
for i := 0; i < 10; i++ {
c2 := GetOrCreateCounter(name)
if c1 != c2 {
return fmt.Errorf("unexpected counter returned; got %p; want %p", c2, c1)
}
}
return nil
}

View File

@ -0,0 +1,82 @@
package metrics
import (
"fmt"
"io"
"sync"
)
// NewFloatCounter registers and returns new counter of float64 type with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned counter is safe to use from concurrent goroutines.
func NewFloatCounter(name string) *FloatCounter {
return defaultSet.NewFloatCounter(name)
}
// FloatCounter is a float64 counter guarded by RWmutex.
//
// It may be used as a gauge if Add and Sub are called.
type FloatCounter struct {
mu sync.Mutex
n float64
}
// Add adds n to fc.
func (fc *FloatCounter) Add(n float64) {
fc.mu.Lock()
fc.n += n
fc.mu.Unlock()
}
// Sub substracts n from fc.
func (fc *FloatCounter) Sub(n float64) {
fc.mu.Lock()
fc.n -= n
fc.mu.Unlock()
}
// Get returns the current value for fc.
func (fc *FloatCounter) Get() float64 {
fc.mu.Lock()
n := fc.n
fc.mu.Unlock()
return n
}
// Set sets fc value to n.
func (fc *FloatCounter) Set(n float64) {
fc.mu.Lock()
fc.n = n
fc.mu.Unlock()
}
// marshalTo marshals fc with the given prefix to w.
func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) {
v := fc.Get()
fmt.Fprintf(w, "%s %g\n", prefix, v)
}
// GetOrCreateFloatCounter returns registered FloatCounter with the given name
// or creates new FloatCounter if the registry doesn't contain FloatCounter with
// the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned FloatCounter is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter.
func GetOrCreateFloatCounter(name string) *FloatCounter {
return defaultSet.GetOrCreateFloatCounter(name)
}

View File

@ -0,0 +1,41 @@
package metrics_test
import (
"fmt"
"github.com/VictoriaMetrics/metrics"
)
func ExampleFloatCounter() {
// Define a float64 counter in global scope.
var fc = metrics.NewFloatCounter(`float_metric_total{label1="value1", label2="value2"}`)
// Add to the counter when needed.
for i := 0; i < 10; i++ {
fc.Add(1.01)
}
n := fc.Get()
fmt.Println(n)
// Output:
// 10.1
}
func ExampleFloatCounter_vec() {
for i := 0; i < 3; i++ {
// Dynamically construct metric name and pass it to GetOrCreateFloatCounter.
name := fmt.Sprintf(`float_metric_total{label1=%q, label2="%d"}`, "value1", i)
metrics.GetOrCreateFloatCounter(name).Add(float64(i) + 1.01)
}
// Read counter values.
for i := 0; i < 3; i++ {
name := fmt.Sprintf(`float_metric_total{label1=%q, label2="%d"}`, "value1", i)
n := metrics.GetOrCreateFloatCounter(name).Get()
fmt.Println(n)
}
// Output:
// 1.01
// 2.01
// 3.01
}

View File

@ -0,0 +1,76 @@
package metrics
import (
"fmt"
"testing"
)
func TestFloatCounterSerial(t *testing.T) {
name := "FloatCounterSerial"
c := NewFloatCounter(name)
c.Add(0.1)
if n := c.Get(); n != 0.1 {
t.Fatalf("unexpected counter value; got %f; want 0.1", n)
}
c.Set(123.00001)
if n := c.Get(); n != 123.00001 {
t.Fatalf("unexpected counter value; got %f; want 123.00001", n)
}
c.Sub(0.00001)
if n := c.Get(); n != 123 {
t.Fatalf("unexpected counter value; got %f; want 123", n)
}
c.Add(2.002)
if n := c.Get(); n != 125.002 {
t.Fatalf("unexpected counter value; got %f; want 125.002", n)
}
// Verify MarshalTo
testMarshalTo(t, c, "foobar", "foobar 125.002\n")
}
func TestFloatCounterConcurrent(t *testing.T) {
name := "FloatCounterConcurrent"
c := NewFloatCounter(name)
err := testConcurrent(func() error {
nPrev := c.Get()
for i := 0; i < 10; i++ {
c.Add(1.001)
if n := c.Get(); n <= nPrev {
return fmt.Errorf("counter value must be greater than %f; got %f", nPrev, n)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateFloatCounterSerial(t *testing.T) {
name := "GetOrCreateFloatCounterSerial"
if err := testGetOrCreateCounter(name); err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateFloatCounterConcurrent(t *testing.T) {
name := "GetOrCreateFloatCounterConcurrent"
err := testConcurrent(func() error {
return testGetOrCreateFloatCounter(name)
})
if err != nil {
t.Fatal(err)
}
}
func testGetOrCreateFloatCounter(name string) error {
c1 := GetOrCreateFloatCounter(name)
for i := 0; i < 10; i++ {
c2 := GetOrCreateFloatCounter(name)
if c1 != c2 {
return fmt.Errorf("unexpected counter returned; got %p; want %p", c2, c1)
}
}
return nil
}

67
vendor/github.com/VictoriaMetrics/metrics/gauge.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package metrics
import (
"fmt"
"io"
)
// NewGauge registers and returns gauge with the given name, which calls f
// to obtain gauge value.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines.
//
// See also FloatCounter for working with floating-point values.
func NewGauge(name string, f func() float64) *Gauge {
return defaultSet.NewGauge(name, f)
}
// Gauge is a float64 gauge.
//
// See also Counter, which could be used as a gauge with Set and Dec calls.
type Gauge struct {
f func() float64
}
// Get returns the current value for g.
func (g *Gauge) Get() float64 {
return g.f()
}
func (g *Gauge) marshalTo(prefix string, w io.Writer) {
v := g.f()
if float64(int64(v)) == v {
// Marshal integer values without scientific notation
fmt.Fprintf(w, "%s %d\n", prefix, int64(v))
} else {
fmt.Fprintf(w, "%s %g\n", prefix, v)
}
}
// GetOrCreateGauge returns registered gauge with the given name
// or creates new gauge if the registry doesn't contain gauge with
// the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned gauge is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
//
// See also FloatCounter for working with floating-point values.
func GetOrCreateGauge(name string, f func() float64) *Gauge {
return defaultSet.GetOrCreateGauge(name, f)
}

View File

@ -0,0 +1,41 @@
package metrics_test
import (
"fmt"
"runtime"
"github.com/VictoriaMetrics/metrics"
)
func ExampleGauge() {
// Define a gauge exporting the number of goroutines.
var g = metrics.NewGauge(`goroutines_count`, func() float64 {
return float64(runtime.NumGoroutine())
})
// Obtain gauge value.
fmt.Println(g.Get())
}
func ExampleGauge_vec() {
for i := 0; i < 3; i++ {
// Dynamically construct metric name and pass it to GetOrCreateGauge.
name := fmt.Sprintf(`metric{label1=%q, label2="%d"}`, "value1", i)
iLocal := i
metrics.GetOrCreateGauge(name, func() float64 {
return float64(iLocal + 1)
})
}
// Read counter values.
for i := 0; i < 3; i++ {
name := fmt.Sprintf(`metric{label1=%q, label2="%d"}`, "value1", i)
n := metrics.GetOrCreateGauge(name, func() float64 { return 0 }).Get()
fmt.Println(n)
}
// Output:
// 1
// 2
// 3
}

View File

@ -0,0 +1,64 @@
package metrics
import (
"fmt"
"sync"
"testing"
)
func TestGaugeError(t *testing.T) {
expectPanic(t, "NewGauge_nil_callback", func() {
NewGauge("NewGauge_nil_callback", nil)
})
expectPanic(t, "GetOrCreateGauge_nil_callback", func() {
GetOrCreateGauge("GetOrCreateGauge_nil_callback", nil)
})
}
func TestGaugeSerial(t *testing.T) {
name := "GaugeSerial"
n := 1.23
var nLock sync.Mutex
g := NewGauge(name, func() float64 {
nLock.Lock()
defer nLock.Unlock()
n++
return n
})
for i := 0; i < 10; i++ {
if nn := g.Get(); nn != n {
t.Fatalf("unexpected gauge value; got %v; want %v", nn, n)
}
}
// Verify marshalTo
testMarshalTo(t, g, "foobar", "foobar 12.23\n")
// Verify big numbers marshaling
n = 1234567899
testMarshalTo(t, g, "prefix", "prefix 1234567900\n")
}
func TestGaugeConcurrent(t *testing.T) {
name := "GaugeConcurrent"
var n int
var nLock sync.Mutex
g := NewGauge(name, func() float64 {
nLock.Lock()
defer nLock.Unlock()
n++
return float64(n)
})
err := testConcurrent(func() error {
nPrev := g.Get()
for i := 0; i < 10; i++ {
if n := g.Get(); n <= nPrev {
return fmt.Errorf("gauge value must be greater than %v; got %v", nPrev, n)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}

5
vendor/github.com/VictoriaMetrics/metrics/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/VictoriaMetrics/metrics
require github.com/valyala/histogram v1.1.2
go 1.12

4
vendor/github.com/VictoriaMetrics/metrics/go.sum generated vendored Normal file
View File

@ -0,0 +1,4 @@
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/histogram v1.1.2 h1:vOk5VrGjMBIoPR5k6wA8vBaC8toeJ8XO0yfRjFEc1h8=
github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg=

View File

@ -0,0 +1,64 @@
package metrics
import (
"fmt"
"io"
"runtime"
"github.com/valyala/histogram"
)
func writeGoMetrics(w io.Writer) {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
fmt.Fprintf(w, "go_memstats_alloc_bytes %d\n", ms.Alloc)
fmt.Fprintf(w, "go_memstats_alloc_bytes_total %d\n", ms.TotalAlloc)
fmt.Fprintf(w, "go_memstats_buck_hash_sys_bytes %d\n", ms.BuckHashSys)
fmt.Fprintf(w, "go_memstats_frees_total %d\n", ms.Frees)
fmt.Fprintf(w, "go_memstats_gc_cpu_fraction %g\n", ms.GCCPUFraction)
fmt.Fprintf(w, "go_memstats_gc_sys_bytes %d\n", ms.GCSys)
fmt.Fprintf(w, "go_memstats_heap_alloc_bytes %d\n", ms.HeapAlloc)
fmt.Fprintf(w, "go_memstats_heap_idle_bytes %d\n", ms.HeapIdle)
fmt.Fprintf(w, "go_memstats_heap_inuse_bytes %d\n", ms.HeapInuse)
fmt.Fprintf(w, "go_memstats_heap_objects %d\n", ms.HeapObjects)
fmt.Fprintf(w, "go_memstats_heap_released_bytes %d\n", ms.HeapReleased)
fmt.Fprintf(w, "go_memstats_heap_sys_bytes %d\n", ms.HeapSys)
fmt.Fprintf(w, "go_memstats_last_gc_time_seconds %g\n", float64(ms.LastGC)/1e9)
fmt.Fprintf(w, "go_memstats_lookups_total %d\n", ms.Lookups)
fmt.Fprintf(w, "go_memstats_mallocs_total %d\n", ms.Mallocs)
fmt.Fprintf(w, "go_memstats_mcache_inuse_bytes %d\n", ms.MCacheInuse)
fmt.Fprintf(w, "go_memstats_mcache_sys_bytes %d\n", ms.MCacheSys)
fmt.Fprintf(w, "go_memstats_mspan_inuse_bytes %d\n", ms.MSpanInuse)
fmt.Fprintf(w, "go_memstats_mspan_sys_bytes %d\n", ms.MSpanSys)
fmt.Fprintf(w, "go_memstats_next_gc_bytes %d\n", ms.NextGC)
fmt.Fprintf(w, "go_memstats_other_sys_bytes %d\n", ms.OtherSys)
fmt.Fprintf(w, "go_memstats_stack_inuse_bytes %d\n", ms.StackInuse)
fmt.Fprintf(w, "go_memstats_stack_sys_bytes %d\n", ms.StackSys)
fmt.Fprintf(w, "go_memstats_sys_bytes %d\n", ms.Sys)
fmt.Fprintf(w, "go_cgo_calls_count %d\n", runtime.NumCgoCall())
fmt.Fprintf(w, "go_cpu_count %d\n", runtime.NumCPU())
gcPauses := histogram.NewFast()
for _, pauseNs := range ms.PauseNs[:] {
gcPauses.Update(float64(pauseNs) / 1e9)
}
phis := []float64{0, 0.25, 0.5, 0.75, 1}
quantiles := make([]float64, 0, len(phis))
for i, q := range gcPauses.Quantiles(quantiles[:0], phis) {
fmt.Fprintf(w, `go_gc_duration_seconds{quantile="%g"} %g`+"\n", phis[i], q)
}
fmt.Fprintf(w, `go_gc_duration_seconds_sum %g`+"\n", float64(ms.PauseTotalNs)/1e9)
fmt.Fprintf(w, `go_gc_duration_seconds_count %d`+"\n", ms.NumGC)
fmt.Fprintf(w, `go_gc_forced_count %d`+"\n", ms.NumForcedGC)
fmt.Fprintf(w, `go_gomaxprocs %d`+"\n", runtime.GOMAXPROCS(0))
fmt.Fprintf(w, `go_goroutines %d`+"\n", runtime.NumGoroutine())
numThread, _ := runtime.ThreadCreateProfile(nil)
fmt.Fprintf(w, `go_threads %d`+"\n", numThread)
// Export build details.
fmt.Fprintf(w, "go_info{version=%q} 1\n", runtime.Version())
fmt.Fprintf(w, "go_info_ext{compiler=%q, GOARCH=%q, GOOS=%q, GOROOT=%q} 1\n",
runtime.Compiler, runtime.GOARCH, runtime.GOOS, runtime.GOROOT())
}

230
vendor/github.com/VictoriaMetrics/metrics/histogram.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
package metrics
import (
"fmt"
"io"
"math"
"sync"
"time"
)
const (
e10Min = -9
e10Max = 18
bucketsPerDecimal = 18
decimalBucketsCount = e10Max - e10Min
bucketsCount = decimalBucketsCount * bucketsPerDecimal
)
var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
// Histogram is a histogram for non-negative values with automatically created buckets.
//
// See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350
//
// Each bucket contains a counter for values in the given range.
// Each non-empty bucket is exposed via the following metric:
//
// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
//
// Where:
//
// - <metric_name> is the metric name passed to NewHistogram
// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
// - <start> and <end> - start and end values for the given bucket
// - <counter> - the number of hits to the given bucket during Update* calls
//
// Histogram buckets can be converted to Prometheus-like buckets with `le` labels
// with `prometheus_buckets(<metric_name>_bucket)` function from PromQL extensions in VictoriaMetrics.
// (see https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL ):
//
// prometheus_buckets(request_duration_bucket)
//
// Time series produced by the Histogram have better compression ratio comparing to
// Prometheus histogram buckets with `le` labels, since they don't include counters
// for all the previous buckets.
//
// Zero histogram is usable.
type Histogram struct {
// Mu gurantees synchronous update for all the counters and sum.
mu sync.Mutex
decimalBuckets [decimalBucketsCount]*[bucketsPerDecimal]uint64
lower uint64
upper uint64
sum float64
}
// Reset resets the given histogram.
func (h *Histogram) Reset() {
h.mu.Lock()
for _, db := range h.decimalBuckets[:] {
if db == nil {
continue
}
for i := range db[:] {
db[i] = 0
}
}
h.lower = 0
h.upper = 0
h.sum = 0
h.mu.Unlock()
}
// Update updates h with v.
//
// Negative values and NaNs are ignored.
func (h *Histogram) Update(v float64) {
if math.IsNaN(v) || v < 0 {
// Skip NaNs and negative values.
return
}
bucketIdx := (math.Log10(v) - e10Min) * bucketsPerDecimal
h.mu.Lock()
h.sum += v
if bucketIdx < 0 {
h.lower++
} else if bucketIdx >= bucketsCount {
h.upper++
} else {
idx := uint(bucketIdx)
if bucketIdx == float64(idx) && idx > 0 {
// Edge case for 10^n values, which must go to the lower bucket
// according to Prometheus logic for `le`-based histograms.
idx--
}
decimalBucketIdx := idx / bucketsPerDecimal
offset := idx % bucketsPerDecimal
db := h.decimalBuckets[decimalBucketIdx]
if db == nil {
var b [bucketsPerDecimal]uint64
db = &b
h.decimalBuckets[decimalBucketIdx] = db
}
db[offset]++
}
h.mu.Unlock()
}
// VisitNonZeroBuckets calls f for all buckets with non-zero counters.
//
// vmrange contains "<start>...<end>" string with bucket bounds. The lower bound
// isn't included in the bucket, while the upper bound is included.
// This is required to be compatible with Prometheus-style histogram buckets
// with `le` (less or equal) labels.
func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
h.mu.Lock()
if h.lower > 0 {
f(lowerBucketRange, h.lower)
}
for decimalBucketIdx, db := range h.decimalBuckets[:] {
if db == nil {
continue
}
for offset, count := range db[:] {
if count > 0 {
bucketIdx := decimalBucketIdx*bucketsPerDecimal + offset
vmrange := getVMRange(bucketIdx)
f(vmrange, count)
}
}
}
if h.upper > 0 {
f(upperBucketRange, h.upper)
}
h.mu.Unlock()
}
// NewHistogram creates and returns new histogram with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned histogram is safe to use from concurrent goroutines.
func NewHistogram(name string) *Histogram {
return defaultSet.NewHistogram(name)
}
// GetOrCreateHistogram returns registered histogram with the given name
// or creates new histogram if the registry doesn't contain histogram with
// the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned histogram is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
func GetOrCreateHistogram(name string) *Histogram {
return defaultSet.GetOrCreateHistogram(name)
}
// UpdateDuration updates request duration based on the given startTime.
func (h *Histogram) UpdateDuration(startTime time.Time) {
d := time.Since(startTime).Seconds()
h.Update(d)
}
func getVMRange(bucketIdx int) string {
bucketRangesOnce.Do(initBucketRanges)
return bucketRanges[bucketIdx]
}
func initBucketRanges() {
v := math.Pow10(e10Min)
start := fmt.Sprintf("%.3e", v)
for i := 0; i < bucketsCount; i++ {
v *= bucketMultiplier
end := fmt.Sprintf("%.3e", v)
bucketRanges[i] = start + "..." + end
start = end
}
}
var (
lowerBucketRange = fmt.Sprintf("0...%.3e", math.Pow10(e10Min))
upperBucketRange = fmt.Sprintf("%.3e...+Inf", math.Pow10(e10Max))
bucketRanges [bucketsCount]string
bucketRangesOnce sync.Once
)
func (h *Histogram) marshalTo(prefix string, w io.Writer) {
countTotal := uint64(0)
h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
tag := fmt.Sprintf("vmrange=%q", vmrange)
metricName := addTag(prefix, tag)
name, labels := splitMetricName(metricName)
fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, count)
countTotal += count
})
if countTotal == 0 {
return
}
name, labels := splitMetricName(prefix)
sum := h.getSum()
if float64(int64(sum)) == sum {
fmt.Fprintf(w, "%s_sum%s %d\n", name, labels, int64(sum))
} else {
fmt.Fprintf(w, "%s_sum%s %g\n", name, labels, sum)
}
fmt.Fprintf(w, "%s_count%s %d\n", name, labels, countTotal)
}
func (h *Histogram) getSum() float64 {
h.mu.Lock()
sum := h.sum
h.mu.Unlock()
return sum
}

View File

@ -0,0 +1,27 @@
package metrics_test
import (
"fmt"
"time"
"github.com/VictoriaMetrics/metrics"
)
func ExampleHistogram() {
// Define a histogram in global scope.
var h = metrics.NewHistogram(`request_duration_seconds{path="/foo/bar"}`)
// Update the histogram with the duration of processRequest call.
startTime := time.Now()
processRequest()
h.UpdateDuration(startTime)
}
func ExampleHistogram_vec() {
for i := 0; i < 3; i++ {
// Dynamically construct metric name and pass it to GetOrCreateHistogram.
name := fmt.Sprintf(`response_size_bytes{path=%q}`, "/foo/bar")
response := processRequest()
metrics.GetOrCreateHistogram(name).Update(float64(len(response)))
}
}

View File

@ -0,0 +1,200 @@
package metrics
import (
"bytes"
"fmt"
"math"
"reflect"
"strings"
"testing"
"time"
)
func TestGetVMRange(t *testing.T) {
f := func(bucketIdx int, vmrangeExpected string) {
t.Helper()
vmrange := getVMRange(bucketIdx)
if vmrange != vmrangeExpected {
t.Fatalf("unexpected vmrange for bucketIdx=%d; got %s; want %s", bucketIdx, vmrange, vmrangeExpected)
}
}
f(0, "1.000e-09...1.136e-09")
f(1, "1.136e-09...1.292e-09")
f(bucketsPerDecimal-1, "8.799e-09...1.000e-08")
f(bucketsPerDecimal, "1.000e-08...1.136e-08")
f(bucketsPerDecimal*(-e10Min)-1, "8.799e-01...1.000e+00")
f(bucketsPerDecimal*(-e10Min), "1.000e+00...1.136e+00")
f(bucketsPerDecimal*(e10Max-e10Min)-1, "8.799e+17...1.000e+18")
}
func TestHistogramSerial(t *testing.T) {
name := `TestHistogramSerial`
h := NewHistogram(name)
// Verify that the histogram is invisible in the output of WritePrometheus when it has no data.
var bb bytes.Buffer
WritePrometheus(&bb, false)
result := bb.String()
if strings.Contains(result, name) {
t.Fatalf("histogram %s shouldn't be visible in the WritePrometheus output; got\n%s", name, result)
}
// Write data to histogram
for i := 98; i < 218; i++ {
h.Update(float64(i))
}
// Make sure the histogram prints <prefix>_bucket on marshalTo call
testMarshalTo(t, h, "prefix", `prefix_bucket{vmrange="8.799e+01...1.000e+02"} 3
prefix_bucket{vmrange="1.000e+02...1.136e+02"} 13
prefix_bucket{vmrange="1.136e+02...1.292e+02"} 16
prefix_bucket{vmrange="1.292e+02...1.468e+02"} 17
prefix_bucket{vmrange="1.468e+02...1.668e+02"} 20
prefix_bucket{vmrange="1.668e+02...1.896e+02"} 23
prefix_bucket{vmrange="1.896e+02...2.154e+02"} 26
prefix_bucket{vmrange="2.154e+02...2.448e+02"} 2
prefix_sum 18900
prefix_count 120
`)
testMarshalTo(t, h, ` m{foo="bar"}`, ` m_bucket{foo="bar",vmrange="8.799e+01...1.000e+02"} 3
m_bucket{foo="bar",vmrange="1.000e+02...1.136e+02"} 13
m_bucket{foo="bar",vmrange="1.136e+02...1.292e+02"} 16
m_bucket{foo="bar",vmrange="1.292e+02...1.468e+02"} 17
m_bucket{foo="bar",vmrange="1.468e+02...1.668e+02"} 20
m_bucket{foo="bar",vmrange="1.668e+02...1.896e+02"} 23
m_bucket{foo="bar",vmrange="1.896e+02...2.154e+02"} 26
m_bucket{foo="bar",vmrange="2.154e+02...2.448e+02"} 2
m_sum{foo="bar"} 18900
m_count{foo="bar"} 120
`)
// Verify Reset
h.Reset()
bb.Reset()
WritePrometheus(&bb, false)
result = bb.String()
if strings.Contains(result, name) {
t.Fatalf("unexpected histogram %s in the WritePrometheus output; got\n%s", name, result)
}
// Verify supported ranges
for e10 := -100; e10 < 100; e10++ {
for offset := 0; offset < bucketsPerDecimal; offset++ {
m := 1 + math.Pow(bucketMultiplier, float64(offset))
f1 := m * math.Pow10(e10)
h.Update(f1)
f2 := (m + 0.5*bucketMultiplier) * math.Pow10(e10)
h.Update(f2)
f3 := (m + 2*bucketMultiplier) * math.Pow10(e10)
h.Update(f3)
}
}
h.UpdateDuration(time.Now().Add(-time.Minute))
// Verify edge cases
h.Update(0)
h.Update(math.Inf(1))
h.Update(math.Inf(-1))
h.Update(math.NaN())
h.Update(-123)
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1096
h.Update(math.Float64frombits(0x3e112e0be826d695))
// Make sure the histogram becomes visible in the output of WritePrometheus,
// since now it contains values.
bb.Reset()
WritePrometheus(&bb, false)
result = bb.String()
if !strings.Contains(result, name) {
t.Fatalf("missing histogram %s in the WritePrometheus output; got\n%s", name, result)
}
}
func TestHistogramConcurrent(t *testing.T) {
name := "HistogramConcurrent"
h := NewHistogram(name)
err := testConcurrent(func() error {
for f := 0.6; f < 1.4; f += 0.1 {
h.Update(f)
}
return nil
})
if err != nil {
t.Fatal(err)
}
testMarshalTo(t, h, "prefix", `prefix_bucket{vmrange="5.995e-01...6.813e-01"} 5
prefix_bucket{vmrange="6.813e-01...7.743e-01"} 5
prefix_bucket{vmrange="7.743e-01...8.799e-01"} 5
prefix_bucket{vmrange="8.799e-01...1.000e+00"} 10
prefix_bucket{vmrange="1.000e+00...1.136e+00"} 5
prefix_bucket{vmrange="1.136e+00...1.292e+00"} 5
prefix_bucket{vmrange="1.292e+00...1.468e+00"} 5
prefix_sum 38
prefix_count 40
`)
var labels []string
var counts []uint64
h.VisitNonZeroBuckets(func(label string, count uint64) {
labels = append(labels, label)
counts = append(counts, count)
})
labelsExpected := []string{
"5.995e-01...6.813e-01",
"6.813e-01...7.743e-01",
"7.743e-01...8.799e-01",
"8.799e-01...1.000e+00",
"1.000e+00...1.136e+00",
"1.136e+00...1.292e+00",
"1.292e+00...1.468e+00",
}
if !reflect.DeepEqual(labels, labelsExpected) {
t.Fatalf("unexpected labels; got %v; want %v", labels, labelsExpected)
}
countsExpected := []uint64{5, 5, 5, 10, 5, 5, 5}
if !reflect.DeepEqual(counts, countsExpected) {
t.Fatalf("unexpected counts; got %v; want %v", counts, countsExpected)
}
}
func TestHistogramWithTags(t *testing.T) {
name := `TestHistogram{tag="foo"}`
h := NewHistogram(name)
h.Update(123)
var bb bytes.Buffer
WritePrometheus(&bb, false)
result := bb.String()
namePrefixWithTag := `TestHistogram_bucket{tag="foo",vmrange="1.136e+02...1.292e+02"} 1` + "\n"
if !strings.Contains(result, namePrefixWithTag) {
t.Fatalf("missing histogram %s in the WritePrometheus output; got\n%s", namePrefixWithTag, result)
}
}
func TestGetOrCreateHistogramSerial(t *testing.T) {
name := "GetOrCreateHistogramSerial"
if err := testGetOrCreateHistogram(name); err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateHistogramConcurrent(t *testing.T) {
name := "GetOrCreateHistogramConcurrent"
err := testConcurrent(func() error {
return testGetOrCreateHistogram(name)
})
if err != nil {
t.Fatal(err)
}
}
func testGetOrCreateHistogram(name string) error {
h1 := GetOrCreateHistogram(name)
for i := 0; i < 10; i++ {
h2 := GetOrCreateHistogram(name)
if h1 != h2 {
return fmt.Errorf("unexpected histogram returned; got %p; want %p", h2, h1)
}
}
return nil
}

View File

@ -0,0 +1,17 @@
package metrics
import (
"testing"
)
func BenchmarkHistogramUpdate(b *testing.B) {
h := GetOrCreateHistogram("BenchmarkHistogramUpdate")
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
h.Update(float64(i))
i++
}
})
}

112
vendor/github.com/VictoriaMetrics/metrics/metrics.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// Package metrics implements Prometheus-compatible metrics for applications.
//
// This package is lightweight alternative to https://github.com/prometheus/client_golang
// with simpler API and smaller dependencies.
//
// Usage:
//
// 1. Register the required metrics via New* functions.
// 2. Expose them to `/metrics` page via WritePrometheus.
// 3. Update the registered metrics during application lifetime.
//
// The package has been extracted from https://victoriametrics.com/
package metrics
import (
"io"
)
type namedMetric struct {
name string
metric metric
}
type metric interface {
marshalTo(prefix string, w io.Writer)
}
var defaultSet = NewSet()
// WritePrometheus writes all the registered metrics in Prometheus format to w.
//
// If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics
// are exposed for the current process.
//
// The WritePrometheus func is usually called inside "/metrics" handler:
//
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
// metrics.WritePrometheus(w, true)
// })
//
func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
defaultSet.WritePrometheus(w)
if exposeProcessMetrics {
WriteProcessMetrics(w)
}
}
// WriteProcessMetrics writes additional process metrics in Prometheus format to w.
//
// The following `go_*` and `process_*` metrics are exposed for the currently
// running process. Below is a short description for the exposed `process_*` metrics:
//
// - process_cpu_seconds_system_total - CPU time spent in syscalls
// - process_cpu_seconds_user_total - CPU time spent in userspace
// - process_cpu_seconds_total - CPU time spent by the process
// - process_major_pagefaults_total - page faults resulted in disk IO
// - process_minor_pagefaults_total - page faults resolved without disk IO
// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
// - process_resident_memory_peak_bytes - the maximum RSS memory usage
// - process_resident_memory_anon_bytes - RSS for memory-mapped files
// - process_resident_memory_file_bytes - RSS for memory allocated by the process
// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
// - process_virtual_memory_bytes - virtual memory usage
// - process_virtual_memory_peak_bytes - the maximum virtual memory usage
// - process_num_threads - the number of threads
// - process_start_time_seconds - process start time as unix timestamp
//
// - process_io_read_bytes_total - the number of bytes read via syscalls
// - process_io_written_bytes_total - the number of bytes written via syscalls
// - process_io_read_syscalls_total - the number of read syscalls
// - process_io_write_syscalls_total - the number of write syscalls
// - process_io_storage_read_bytes_total - the number of bytes actually read from disk
// - process_io_storage_written_bytes_total - the number of bytes actually written to disk
//
// - go_memstats_alloc_bytes - memory usage for Go objects in the heap
// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
// - go_memstats_frees_total - the cumulative counter for number of freed Go objects
// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
// - go_memstats_heap_objects - the number of Go objects in the heap
// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
// - go_memstats_mallocs_total - the number of allocations for Go objects
// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
// - go_memstats_sys_bytes - memory requested by Go runtime from the OS
//
// The WriteProcessMetrics func is usually called in combination with writing Set metrics
// inside "/metrics" handler:
//
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
// mySet.WritePrometheus(w)
// metrics.WriteProcessMetrics(w)
// })
//
// See also WrteFDMetrics.
func WriteProcessMetrics(w io.Writer) {
writeGoMetrics(w)
writeProcessMetrics(w)
}
// WriteFDMetrics writes `process_max_fds` and `process_open_fds` metrics to w.
func WriteFDMetrics(w io.Writer) {
writeFDMetrics(w)
}
// UnregisterMetric removes metric with the given name from default set.
func UnregisterMetric(name string) bool {
return defaultSet.UnregisterMetric(name)
}

View File

@ -0,0 +1,14 @@
package metrics_test
import (
"net/http"
"github.com/VictoriaMetrics/metrics"
)
func ExampleWritePrometheus() {
// Export all the registered metrics in Prometheus format at `/metrics` http path.
http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
metrics.WritePrometheus(w, true)
})
}

View File

@ -0,0 +1,146 @@
package metrics
import (
"bytes"
"fmt"
"testing"
"time"
)
func TestInvalidName(t *testing.T) {
f := func(name string) {
t.Helper()
expectPanic(t, fmt.Sprintf("NewCounter(%q)", name), func() { NewCounter(name) })
expectPanic(t, fmt.Sprintf("NewGauge(%q)", name), func() { NewGauge(name, func() float64 { return 0 }) })
expectPanic(t, fmt.Sprintf("NewSummary(%q)", name), func() { NewSummary(name) })
expectPanic(t, fmt.Sprintf("GetOrCreateCounter(%q)", name), func() { GetOrCreateCounter(name) })
expectPanic(t, fmt.Sprintf("GetOrCreateGauge(%q)", name), func() { GetOrCreateGauge(name, func() float64 { return 0 }) })
expectPanic(t, fmt.Sprintf("GetOrCreateSummary(%q)", name), func() { GetOrCreateSummary(name) })
expectPanic(t, fmt.Sprintf("GetOrCreateHistogram(%q)", name), func() { GetOrCreateHistogram(name) })
}
f("")
f("foo{")
f("foo}")
f("foo{bar")
f("foo{bar=")
f(`foo{bar="`)
f(`foo{bar="baz`)
f(`foo{bar="baz"`)
f(`foo{bar="baz",`)
f(`foo{bar="baz",}`)
}
func TestDoubleRegister(t *testing.T) {
t.Run("NewCounter", func(t *testing.T) {
name := "NewCounterDoubleRegister"
NewCounter(name)
expectPanic(t, name, func() { NewCounter(name) })
})
t.Run("NewGauge", func(t *testing.T) {
name := "NewGaugeDoubleRegister"
NewGauge(name, func() float64 { return 0 })
expectPanic(t, name, func() { NewGauge(name, func() float64 { return 0 }) })
})
t.Run("NewSummary", func(t *testing.T) {
name := "NewSummaryDoubleRegister"
NewSummary(name)
expectPanic(t, name, func() { NewSummary(name) })
})
t.Run("NewHistogram", func(t *testing.T) {
name := "NewHistogramDoubleRegister"
NewHistogram(name)
expectPanic(t, name, func() { NewSummary(name) })
})
}
func TestGetOrCreateNotCounter(t *testing.T) {
name := "GetOrCreateNotCounter"
NewSummary(name)
expectPanic(t, name, func() { GetOrCreateCounter(name) })
}
func TestGetOrCreateNotGauge(t *testing.T) {
name := "GetOrCreateNotGauge"
NewCounter(name)
expectPanic(t, name, func() { GetOrCreateGauge(name, func() float64 { return 0 }) })
}
func TestGetOrCreateNotSummary(t *testing.T) {
name := "GetOrCreateNotSummary"
NewCounter(name)
expectPanic(t, name, func() { GetOrCreateSummary(name) })
}
func TestGetOrCreateNotHistogram(t *testing.T) {
name := "GetOrCreateNotHistogram"
NewCounter(name)
expectPanic(t, name, func() { GetOrCreateHistogram(name) })
}
func TestWritePrometheusSerial(t *testing.T) {
if err := testWritePrometheus(); err != nil {
t.Fatal(err)
}
}
func TestWritePrometheusConcurrent(t *testing.T) {
if err := testConcurrent(testWritePrometheus); err != nil {
t.Fatal(err)
}
}
func testWritePrometheus() error {
var bb bytes.Buffer
WritePrometheus(&bb, false)
resultWithoutProcessMetrics := bb.String()
bb.Reset()
WritePrometheus(&bb, true)
resultWithProcessMetrics := bb.String()
if len(resultWithProcessMetrics) <= len(resultWithoutProcessMetrics) {
return fmt.Errorf("result with process metrics must contain more data than the result without process metrics; got\n%q\nvs\n%q",
resultWithProcessMetrics, resultWithoutProcessMetrics)
}
return nil
}
func expectPanic(t *testing.T, context string, f func()) {
t.Helper()
defer func() {
t.Helper()
if r := recover(); r == nil {
t.Fatalf("expecting panic in %s", context)
}
}()
f()
}
func testConcurrent(f func() error) error {
const concurrency = 5
resultsCh := make(chan error, concurrency)
for i := 0; i < concurrency; i++ {
go func() {
resultsCh <- f()
}()
}
for i := 0; i < concurrency; i++ {
select {
case err := <-resultsCh:
if err != nil {
return fmt.Errorf("unexpected error: %s", err)
}
case <-time.After(time.Second * 5):
return fmt.Errorf("timeout")
}
}
return nil
}
func testMarshalTo(t *testing.T, m metric, prefix, resultExpected string) {
t.Helper()
var bb bytes.Buffer
m.marshalTo(prefix, &bb)
result := bb.String()
if result != resultExpected {
t.Fatalf("unexpected marshaled metric;\ngot\n%q\nwant\n%q", result, resultExpected)
}
}

View File

@ -0,0 +1,265 @@
package metrics
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"time"
)
// See https://github.com/prometheus/procfs/blob/a4ac0826abceb44c40fc71daed2b301db498b93e/proc_stat.go#L40 .
const userHZ = 100
// See http://man7.org/linux/man-pages/man5/proc.5.html
type procStat struct {
State byte
Ppid int
Pgrp int
Session int
TtyNr int
Tpgid int
Flags uint
Minflt uint
Cminflt uint
Majflt uint
Cmajflt uint
Utime uint
Stime uint
Cutime int
Cstime int
Priority int
Nice int
NumThreads int
ItrealValue int
Starttime uint64
Vsize uint
Rss int
}
func writeProcessMetrics(w io.Writer) {
statFilepath := "/proc/self/stat"
data, err := ioutil.ReadFile(statFilepath)
if err != nil {
log.Printf("ERROR: cannot open %s: %s", statFilepath, err)
return
}
// Search for the end of command.
n := bytes.LastIndex(data, []byte(") "))
if n < 0 {
log.Printf("ERROR: cannot find command in parentheses in %q read from %s", data, statFilepath)
return
}
data = data[n+2:]
var p procStat
bb := bytes.NewBuffer(data)
_, err = fmt.Fscanf(bb, "%c %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d",
&p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt,
&p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss)
if err != nil {
log.Printf("ERROR: cannot parse %q read from %s: %s", data, statFilepath, err)
return
}
// It is expensive obtaining `process_open_fds` when big number of file descriptors is opened,
// so don't do it here.
// See writeFDMetrics instead.
utime := float64(p.Utime) / userHZ
stime := float64(p.Stime) / userHZ
fmt.Fprintf(w, "process_cpu_seconds_system_total %g\n", stime)
fmt.Fprintf(w, "process_cpu_seconds_total %g\n", utime+stime)
fmt.Fprintf(w, "process_cpu_seconds_user_total %g\n", utime)
fmt.Fprintf(w, "process_major_pagefaults_total %d\n", p.Majflt)
fmt.Fprintf(w, "process_minor_pagefaults_total %d\n", p.Minflt)
fmt.Fprintf(w, "process_num_threads %d\n", p.NumThreads)
fmt.Fprintf(w, "process_resident_memory_bytes %d\n", p.Rss*4096)
fmt.Fprintf(w, "process_start_time_seconds %d\n", startTimeSeconds)
fmt.Fprintf(w, "process_virtual_memory_bytes %d\n", p.Vsize)
writeProcessMemMetrics(w)
writeIOMetrics(w)
}
func writeIOMetrics(w io.Writer) {
ioFilepath := "/proc/self/io"
data, err := ioutil.ReadFile(ioFilepath)
if err != nil {
log.Printf("ERROR: cannot open %q: %s", ioFilepath, err)
}
getInt := func(s string) int64 {
n := strings.IndexByte(s, ' ')
if n < 0 {
log.Printf("ERROR: cannot find whitespace in %q at %q", s, ioFilepath)
return 0
}
v, err := strconv.ParseInt(s[n+1:], 10, 64)
if err != nil {
log.Printf("ERROR: cannot parse %q at %q: %s", s, ioFilepath, err)
return 0
}
return v
}
var rchar, wchar, syscr, syscw, readBytes, writeBytes int64
lines := strings.Split(string(data), "\n")
for _, s := range lines {
s = strings.TrimSpace(s)
switch {
case strings.HasPrefix(s, "rchar: "):
rchar = getInt(s)
case strings.HasPrefix(s, "wchar: "):
wchar = getInt(s)
case strings.HasPrefix(s, "syscr: "):
syscr = getInt(s)
case strings.HasPrefix(s, "syscw: "):
syscw = getInt(s)
case strings.HasPrefix(s, "read_bytes: "):
readBytes = getInt(s)
case strings.HasPrefix(s, "write_bytes: "):
writeBytes = getInt(s)
}
}
fmt.Fprintf(w, "process_io_read_bytes_total %d\n", rchar)
fmt.Fprintf(w, "process_io_written_bytes_total %d\n", wchar)
fmt.Fprintf(w, "process_io_read_syscalls_total %d\n", syscr)
fmt.Fprintf(w, "process_io_write_syscalls_total %d\n", syscw)
fmt.Fprintf(w, "process_io_storage_read_bytes_total %d\n", readBytes)
fmt.Fprintf(w, "process_io_storage_written_bytes_total %d\n", writeBytes)
}
var startTimeSeconds = time.Now().Unix()
// writeFDMetrics writes process_max_fds and process_open_fds metrics to w.
func writeFDMetrics(w io.Writer) {
totalOpenFDs, err := getOpenFDsCount("/proc/self/fd")
if err != nil {
log.Printf("ERROR: cannot determine open file descriptors count: %s", err)
return
}
maxOpenFDs, err := getMaxFilesLimit("/proc/self/limits")
if err != nil {
log.Printf("ERROR: cannot determine the limit on open file descritors: %s", err)
return
}
fmt.Fprintf(w, "process_max_fds %d\n", maxOpenFDs)
fmt.Fprintf(w, "process_open_fds %d\n", totalOpenFDs)
}
func getOpenFDsCount(path string) (uint64, error) {
f, err := os.Open(path)
if err != nil {
return 0, err
}
defer f.Close()
var totalOpenFDs uint64
for {
names, err := f.Readdirnames(512)
if err == io.EOF {
break
}
if err != nil {
return 0, fmt.Errorf("unexpected error at Readdirnames: %s", err)
}
totalOpenFDs += uint64(len(names))
}
return totalOpenFDs, nil
}
func getMaxFilesLimit(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
lines := strings.Split(string(data), "\n")
const prefix = "Max open files"
for _, s := range lines {
if !strings.HasPrefix(s, prefix) {
continue
}
text := strings.TrimSpace(s[len(prefix):])
// Extract soft limit.
n := strings.IndexByte(text, ' ')
if n < 0 {
return 0, fmt.Errorf("cannot extract soft limit from %q", s)
}
text = text[:n]
if text == "unlimited" {
return 1<<64 - 1, nil
}
limit, err := strconv.ParseUint(text, 10, 64)
if err != nil {
return 0, fmt.Errorf("cannot parse soft limit from %q: %s", s, err)
}
return limit, nil
}
return 0, fmt.Errorf("cannot find max open files limit")
}
// https://man7.org/linux/man-pages/man5/procfs.5.html
type memStats struct {
vmPeak uint64
rssPeak uint64
rssAnon uint64
rssFile uint64
rssShmem uint64
}
func writeProcessMemMetrics(w io.Writer) {
ms, err := getMemStats("/proc/self/status")
if err != nil {
log.Printf("ERROR: cannot determine memory status: %s", err)
return
}
fmt.Fprintf(w, "process_virtual_memory_peak_bytes %d\n", ms.vmPeak)
fmt.Fprintf(w, "process_resident_memory_peak_bytes %d\n", ms.rssPeak)
fmt.Fprintf(w, "process_resident_memory_anon_bytes %d\n", ms.rssAnon)
fmt.Fprintf(w, "process_resident_memory_file_bytes %d\n", ms.rssFile)
fmt.Fprintf(w, "process_resident_memory_shared_bytes %d\n", ms.rssShmem)
}
func getMemStats(path string) (*memStats, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var ms memStats
lines := strings.Split(string(data), "\n")
for _, s := range lines {
if !strings.HasPrefix(s, "Vm") && !strings.HasPrefix(s, "Rss") {
continue
}
// Extract key value.
line := strings.Fields(s)
if len(line) != 3 {
return nil, fmt.Errorf("unexpected number of fields found in %q; got %d; want %d", s, len(line), 3)
}
memStatName := line[0]
memStatValue := line[1]
value, err := strconv.ParseUint(memStatValue, 10, 64)
if err != nil {
return nil, fmt.Errorf("cannot parse number from %q: %w", s, err)
}
if line[2] != "kB" {
return nil, fmt.Errorf("expecting kB value in %q; got %q", s, line[2])
}
value *= 1024
switch memStatName {
case "VmPeak:":
ms.vmPeak = value
case "VmHWM:":
ms.rssPeak = value
case "RssAnon:":
ms.rssAnon = value
case "RssFile:":
ms.rssFile = value
case "RssShmem:":
ms.rssShmem = value
}
}
return &ms, nil
}

View File

@ -0,0 +1,51 @@
package metrics
import "testing"
func TestGetMaxFilesLimit(t *testing.T) {
f := func(want uint64, path string, wantErr bool) {
t.Helper()
got, err := getMaxFilesLimit(path)
if err != nil && !wantErr {
t.Fatalf("unexpected error: %v", err)
}
if got != want {
t.Fatalf("unexpected result: %d, want: %d at getMaxFilesLimit", got, want)
}
}
f(1024, "testdata/limits", false)
f(0, "testdata/bad_path", true)
f(0, "testdata/limits_bad", true)
}
func TestGetOpenFDsCount(t *testing.T) {
f := func(want uint64, path string, wantErr bool) {
t.Helper()
got, err := getOpenFDsCount(path)
if (err != nil && !wantErr) || (err == nil && wantErr) {
t.Fatalf("unexpected error: %v", err)
}
if got != want {
t.Fatalf("unexpected result: %d, want: %d at getOpenFDsCount", got, want)
}
}
f(5, "testdata/fd/", false)
f(0, "testdata/fd/0", true)
f(0, "testdata/limits", true)
}
func TestGetMemStats(t *testing.T) {
f := func(want memStats, path string, wantErr bool) {
t.Helper()
got, err := getMemStats(path)
if (err != nil && !wantErr) || (err == nil && wantErr) {
t.Fatalf("unexpected error: %v", err)
}
if got != nil && *got != want {
t.Fatalf("unexpected result: %d, want: %d at getMemStats", *got, want)
}
}
f(memStats{vmPeak: 2130489344, rssPeak: 200679424, rssAnon: 121602048, rssFile: 11362304}, "testdata/status", false)
f(memStats{}, "testdata/status_bad", true)
}

View File

@ -0,0 +1,15 @@
// +build !linux
package metrics
import (
"io"
)
func writeProcessMetrics(w io.Writer) {
// TODO: implement it
}
func writeFDMetrics(w io.Writer) {
// TODO: implement it.
}

519
vendor/github.com/VictoriaMetrics/metrics/set.go generated vendored Normal file
View File

@ -0,0 +1,519 @@
package metrics
import (
"bytes"
"fmt"
"io"
"sort"
"sync"
"time"
)
// Set is a set of metrics.
//
// Metrics belonging to a set are exported separately from global metrics.
//
// Set.WritePrometheus must be called for exporting metrics from the set.
type Set struct {
mu sync.Mutex
a []*namedMetric
m map[string]*namedMetric
summaries []*Summary
}
// NewSet creates new set of metrics.
func NewSet() *Set {
return &Set{
m: make(map[string]*namedMetric),
}
}
// WritePrometheus writes all the metrics from s to w in Prometheus format.
func (s *Set) WritePrometheus(w io.Writer) {
// Collect all the metrics in in-memory buffer in order to prevent from long locking due to slow w.
var bb bytes.Buffer
lessFunc := func(i, j int) bool {
return s.a[i].name < s.a[j].name
}
s.mu.Lock()
for _, sm := range s.summaries {
sm.updateQuantiles()
}
if !sort.SliceIsSorted(s.a, lessFunc) {
sort.Slice(s.a, lessFunc)
}
sa := append([]*namedMetric(nil), s.a...)
s.mu.Unlock()
// Call marshalTo without the global lock, since certain metric types such as Gauge
// can call a callback, which, in turn, can try calling s.mu.Lock again.
for _, nm := range sa {
nm.metric.marshalTo(nm.name, &bb)
}
w.Write(bb.Bytes())
}
// NewHistogram creates and returns new histogram in s with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned histogram is safe to use from concurrent goroutines.
func (s *Set) NewHistogram(name string) *Histogram {
h := &Histogram{}
s.registerMetric(name, h)
return h
}
// GetOrCreateHistogram returns registered histogram in s with the given name
// or creates new histogram if s doesn't contain histogram with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned histogram is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
func (s *Set) GetOrCreateHistogram(name string) *Histogram {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
// Slow path - create and register missing histogram.
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
nmNew := &namedMetric{
name: name,
metric: &Histogram{},
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
h, ok := nm.metric.(*Histogram)
if !ok {
panic(fmt.Errorf("BUG: metric %q isn't a Histogram. It is %T", name, nm.metric))
}
return h
}
// NewCounter registers and returns new counter with the given name in the s.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned counter is safe to use from concurrent goroutines.
func (s *Set) NewCounter(name string) *Counter {
c := &Counter{}
s.registerMetric(name, c)
return c
}
// GetOrCreateCounter returns registered counter in s with the given name
// or creates new counter if s doesn't contain counter with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned counter is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
func (s *Set) GetOrCreateCounter(name string) *Counter {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
// Slow path - create and register missing counter.
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
nmNew := &namedMetric{
name: name,
metric: &Counter{},
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
c, ok := nm.metric.(*Counter)
if !ok {
panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric))
}
return c
}
// NewFloatCounter registers and returns new FloatCounter with the given name in the s.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned FloatCounter is safe to use from concurrent goroutines.
func (s *Set) NewFloatCounter(name string) *FloatCounter {
c := &FloatCounter{}
s.registerMetric(name, c)
return c
}
// GetOrCreateFloatCounter returns registered FloatCounter in s with the given name
// or creates new FloatCounter if s doesn't contain FloatCounter with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned FloatCounter is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter.
func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
// Slow path - create and register missing counter.
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
nmNew := &namedMetric{
name: name,
metric: &FloatCounter{},
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
c, ok := nm.metric.(*FloatCounter)
if !ok {
panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric))
}
return c
}
// NewGauge registers and returns gauge with the given name in s, which calls f
// to obtain gauge value.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// f must be safe for concurrent calls.
//
// The returned gauge is safe to use from concurrent goroutines.
func (s *Set) NewGauge(name string, f func() float64) *Gauge {
if f == nil {
panic(fmt.Errorf("BUG: f cannot be nil"))
}
g := &Gauge{
f: f,
}
s.registerMetric(name, g)
return g
}
// GetOrCreateGauge returns registered gauge with the given name in s
// or creates new gauge if s doesn't contain gauge with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned gauge is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
// Slow path - create and register missing gauge.
if f == nil {
panic(fmt.Errorf("BUG: f cannot be nil"))
}
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
nmNew := &namedMetric{
name: name,
metric: &Gauge{
f: f,
},
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
}
s.mu.Unlock()
}
g, ok := nm.metric.(*Gauge)
if !ok {
panic(fmt.Errorf("BUG: metric %q isn't a Gauge. It is %T", name, nm.metric))
}
return g
}
// NewSummary creates and returns new summary with the given name in s.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
func (s *Set) NewSummary(name string) *Summary {
return s.NewSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
}
// NewSummaryExt creates and returns new summary in s with the given name,
// window and quantiles.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
sm := newSummary(window, quantiles)
s.mu.Lock()
// defer will unlock in case of panic
// checks in tests
defer s.mu.Unlock()
s.mustRegisterLocked(name, sm)
registerSummaryLocked(sm)
s.registerSummaryQuantilesLocked(name, sm)
s.summaries = append(s.summaries, sm)
return sm
}
// GetOrCreateSummary returns registered summary with the given name in s
// or creates new summary if s doesn't contain summary with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
func (s *Set) GetOrCreateSummary(name string) *Summary {
return s.GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
}
// GetOrCreateSummaryExt returns registered summary with the given name,
// window and quantiles in s or creates new summary if s doesn't
// contain summary with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt.
func (s *Set) GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
s.mu.Lock()
nm := s.m[name]
s.mu.Unlock()
if nm == nil {
// Slow path - create and register missing summary.
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
sm := newSummary(window, quantiles)
nmNew := &namedMetric{
name: name,
metric: sm,
}
s.mu.Lock()
nm = s.m[name]
if nm == nil {
nm = nmNew
s.m[name] = nm
s.a = append(s.a, nm)
registerSummaryLocked(sm)
s.registerSummaryQuantilesLocked(name, sm)
}
s.summaries = append(s.summaries, sm)
s.mu.Unlock()
}
sm, ok := nm.metric.(*Summary)
if !ok {
panic(fmt.Errorf("BUG: metric %q isn't a Summary. It is %T", name, nm.metric))
}
if sm.window != window {
panic(fmt.Errorf("BUG: invalid window requested for the summary %q; requested %s; need %s", name, window, sm.window))
}
if !isEqualQuantiles(sm.quantiles, quantiles) {
panic(fmt.Errorf("BUG: invalid quantiles requested from the summary %q; requested %v; need %v", name, quantiles, sm.quantiles))
}
return sm
}
func (s *Set) registerSummaryQuantilesLocked(name string, sm *Summary) {
for i, q := range sm.quantiles {
quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q))
qv := &quantileValue{
sm: sm,
idx: i,
}
s.mustRegisterLocked(quantileValueName, qv)
}
}
func (s *Set) registerMetric(name string, m metric) {
if err := validateMetric(name); err != nil {
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
}
s.mu.Lock()
// defer will unlock in case of panic
// checks in test
defer s.mu.Unlock()
s.mustRegisterLocked(name, m)
}
// mustRegisterLocked registers given metric with
// the given name. Panics if the given name was
// already registered before.
func (s *Set) mustRegisterLocked(name string, m metric) {
nm, ok := s.m[name]
if !ok {
nm = &namedMetric{
name: name,
metric: m,
}
s.m[name] = nm
s.a = append(s.a, nm)
}
if ok {
panic(fmt.Errorf("BUG: metric %q is already registered", name))
}
}
// UnregisterMetric removes metric with the given name from s.
//
// True is returned if the metric has been removed.
// False is returned if the given metric is missing in s.
func (s *Set) UnregisterMetric(name string) bool {
s.mu.Lock()
defer s.mu.Unlock()
nm, ok := s.m[name]
if !ok {
return false
}
m := nm.metric
delete(s.m, name)
deleteFromList := func(metricName string) {
for i, nm := range s.a {
if nm.name == metricName {
s.a = append(s.a[:i], s.a[i+1:]...)
return
}
}
panic(fmt.Errorf("BUG: cannot find metric %q in the list of registered metrics", name))
}
// remove metric from s.a
deleteFromList(name)
sm, ok := m.(*Summary)
if !ok {
// There is no need in cleaning up summary.
return true
}
// cleanup registry from per-quantile metrics
for _, q := range sm.quantiles {
quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q))
delete(s.m, quantileValueName)
deleteFromList(quantileValueName)
}
// Remove sm from s.summaries
found := false
for i, xsm := range s.summaries {
if xsm == sm {
s.summaries = append(s.summaries[:i], s.summaries[i+1:]...)
found = true
break
}
}
if !found {
panic(fmt.Errorf("BUG: cannot find summary %q in the list of registered summaries", name))
}
unregisterSummary(sm)
return true
}
// ListMetricNames returns a list of all the metrics in s.
func (s *Set) ListMetricNames() []string {
var list []string
for name := range s.m {
list = append(list, name)
}
return list
}

View File

@ -0,0 +1,25 @@
package metrics_test
import (
"bytes"
"fmt"
"github.com/VictoriaMetrics/metrics"
)
func ExampleSet() {
// Create a set with a counter
s := metrics.NewSet()
sc := s.NewCounter("set_counter")
sc.Inc()
s.NewGauge(`set_gauge{foo="bar"}`, func() float64 { return 42 })
// Dump metrics from s.
var bb bytes.Buffer
s.WritePrometheus(&bb)
fmt.Printf("set metrics:\n%s\n", bb.String())
// Output:
// set metrics:
// set_counter 1
// set_gauge{foo="bar"} 42
}

152
vendor/github.com/VictoriaMetrics/metrics/set_test.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
package metrics
import (
"fmt"
"sync"
"testing"
"time"
)
func TestNewSet(t *testing.T) {
var ss []*Set
for i := 0; i < 10; i++ {
s := NewSet()
ss = append(ss, s)
}
for i := 0; i < 10; i++ {
s := ss[i]
for j := 0; j < 10; j++ {
c := s.NewCounter(fmt.Sprintf("counter_%d", j))
c.Inc()
if n := c.Get(); n != 1 {
t.Fatalf("unexpected counter value; got %d; want %d", n, 1)
}
g := s.NewGauge(fmt.Sprintf("gauge_%d", j), func() float64 { return 123 })
if v := g.Get(); v != 123 {
t.Fatalf("unexpected gauge value; got %v; want %v", v, 123)
}
sm := s.NewSummary(fmt.Sprintf("summary_%d", j))
if sm == nil {
t.Fatalf("NewSummary returned nil")
}
h := s.NewHistogram(fmt.Sprintf("histogram_%d", j))
if h == nil {
t.Fatalf("NewHistogram returned nil")
}
}
}
}
func TestSetListMetricNames(t *testing.T) {
s := NewSet()
expect := []string{"cnt1", "cnt2", "cnt3"}
// Initialize a few counters
for _, n := range expect {
c := s.NewCounter(n)
c.Inc()
}
list := s.ListMetricNames()
if len(list) != len(expect) {
t.Fatalf("Metrics count is wrong for listing")
}
for _, e := range expect {
found := false
for _, n := range list {
if e == n {
found = true
}
}
if !found {
t.Fatalf("Metric %s not found in listing", e)
}
}
}
func TestSetUnregisterMetric(t *testing.T) {
s := NewSet()
const cName, smName = "counter_1", "summary_1"
// Initialize a few metrics
c := s.NewCounter(cName)
c.Inc()
sm := s.NewSummary(smName)
sm.Update(1)
// Unregister existing metrics
if !s.UnregisterMetric(cName) {
t.Fatalf("UnregisterMetric(%s) must return true", cName)
}
if !s.UnregisterMetric(smName) {
t.Fatalf("UnregisterMetric(%s) must return true", smName)
}
// Unregister twice must return false
if s.UnregisterMetric(cName) {
t.Fatalf("UnregisterMetric(%s) must return false on unregistered metric", cName)
}
if s.UnregisterMetric(smName) {
t.Fatalf("UnregisterMetric(%s) must return false on unregistered metric", smName)
}
// verify that registry is empty
if len(s.m) != 0 {
t.Fatalf("expected metrics map to be empty; got %d elements", len(s.m))
}
if len(s.a) != 0 {
t.Fatalf("expected metrics list to be empty; got %d elements", len(s.a))
}
// Validate metrics are removed
ok := false
for _, n := range s.ListMetricNames() {
if n == cName || n == smName {
ok = true
}
}
if ok {
t.Fatalf("Metric counter_1 and summary_1 must not be listed anymore after unregister")
}
// re-register with the same names supposed
// to be successful
s.NewCounter(cName).Inc()
s.NewSummary(smName).Update(float64(1))
}
// TestRegisterUnregister tests concurrent access to
// metrics during registering and unregistering.
// Should be tested specifically with `-race` enabled.
func TestRegisterUnregister(t *testing.T) {
const (
workers = 16
iterations = 1e3
)
wg := sync.WaitGroup{}
wg.Add(workers)
for n := 0; n < workers; n++ {
go func() {
defer wg.Done()
now := time.Now()
for i := 0; i < iterations; i++ {
iteration := i % 5
counter := fmt.Sprintf(`counter{iteration="%d"}`, iteration)
GetOrCreateCounter(counter).Add(i)
UnregisterMetric(counter)
histogram := fmt.Sprintf(`histogram{iteration="%d"}`, iteration)
GetOrCreateHistogram(histogram).UpdateDuration(now)
UnregisterMetric(histogram)
gauge := fmt.Sprintf(`gauge{iteration="%d"}`, iteration)
GetOrCreateGauge(gauge, func() float64 { return 1 })
UnregisterMetric(gauge)
summary := fmt.Sprintf(`summary{iteration="%d"}`, iteration)
GetOrCreateSummary(summary).Update(float64(i))
UnregisterMetric(summary)
}
}()
}
wg.Wait()
}

254
vendor/github.com/VictoriaMetrics/metrics/summary.go generated vendored Normal file
View File

@ -0,0 +1,254 @@
package metrics
import (
"fmt"
"io"
"math"
"strings"
"sync"
"time"
"github.com/valyala/histogram"
)
const defaultSummaryWindow = 5 * time.Minute
var defaultSummaryQuantiles = []float64{0.5, 0.9, 0.97, 0.99, 1}
// Summary implements summary.
type Summary struct {
mu sync.Mutex
curr *histogram.Fast
next *histogram.Fast
quantiles []float64
quantileValues []float64
sum float64
count uint64
window time.Duration
}
// NewSummary creates and returns new summary with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
func NewSummary(name string) *Summary {
return defaultSet.NewSummary(name)
}
// NewSummaryExt creates and returns new summary with the given name,
// window and quantiles.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
return defaultSet.NewSummaryExt(name, window, quantiles)
}
func newSummary(window time.Duration, quantiles []float64) *Summary {
// Make a copy of quantiles in order to prevent from their modification by the caller.
quantiles = append([]float64{}, quantiles...)
validateQuantiles(quantiles)
sm := &Summary{
curr: histogram.NewFast(),
next: histogram.NewFast(),
quantiles: quantiles,
quantileValues: make([]float64, len(quantiles)),
window: window,
}
return sm
}
func validateQuantiles(quantiles []float64) {
for _, q := range quantiles {
if q < 0 || q > 1 {
panic(fmt.Errorf("BUG: quantile must be in the range [0..1]; got %v", q))
}
}
}
// Update updates the summary.
func (sm *Summary) Update(v float64) {
sm.mu.Lock()
sm.curr.Update(v)
sm.next.Update(v)
sm.sum += v
sm.count++
sm.mu.Unlock()
}
// UpdateDuration updates request duration based on the given startTime.
func (sm *Summary) UpdateDuration(startTime time.Time) {
d := time.Since(startTime).Seconds()
sm.Update(d)
}
func (sm *Summary) marshalTo(prefix string, w io.Writer) {
// Marshal only *_sum and *_count values.
// Quantile values should be already updated by the caller via sm.updateQuantiles() call.
// sm.quantileValues will be marshaled later via quantileValue.marshalTo.
sm.mu.Lock()
sum := sm.sum
count := sm.count
sm.mu.Unlock()
if count > 0 {
name, filters := splitMetricName(prefix)
if float64(int64(sum)) == sum {
// Marshal integer sum without scientific notation
fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum))
} else {
fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum)
}
fmt.Fprintf(w, "%s_count%s %d\n", name, filters, count)
}
}
func splitMetricName(name string) (string, string) {
n := strings.IndexByte(name, '{')
if n < 0 {
return name, ""
}
return name[:n], name[n:]
}
func (sm *Summary) updateQuantiles() {
sm.mu.Lock()
sm.quantileValues = sm.curr.Quantiles(sm.quantileValues[:0], sm.quantiles)
sm.mu.Unlock()
}
// GetOrCreateSummary returns registered summary with the given name
// or creates new summary if the registry doesn't contain summary with
// the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
func GetOrCreateSummary(name string) *Summary {
return defaultSet.GetOrCreateSummary(name)
}
// GetOrCreateSummaryExt returns registered summary with the given name,
// window and quantiles or creates new summary if the registry doesn't
// contain summary with the given name.
//
// name must be valid Prometheus-compatible metric with possible labels.
// For instance,
//
// * foo
// * foo{bar="baz"}
// * foo{bar="baz",aaa="b"}
//
// The returned summary is safe to use from concurrent goroutines.
//
// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt.
func GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
return defaultSet.GetOrCreateSummaryExt(name, window, quantiles)
}
func isEqualQuantiles(a, b []float64) bool {
// Do not use relfect.DeepEqual, since it is slower than the direct comparison.
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
type quantileValue struct {
sm *Summary
idx int
}
func (qv *quantileValue) marshalTo(prefix string, w io.Writer) {
qv.sm.mu.Lock()
v := qv.sm.quantileValues[qv.idx]
qv.sm.mu.Unlock()
if !math.IsNaN(v) {
fmt.Fprintf(w, "%s %g\n", prefix, v)
}
}
func addTag(name, tag string) string {
if len(name) == 0 || name[len(name)-1] != '}' {
return fmt.Sprintf("%s{%s}", name, tag)
}
return fmt.Sprintf("%s,%s}", name[:len(name)-1], tag)
}
func registerSummaryLocked(sm *Summary) {
window := sm.window
summariesLock.Lock()
summaries[window] = append(summaries[window], sm)
if len(summaries[window]) == 1 {
go summariesSwapCron(window)
}
summariesLock.Unlock()
}
func unregisterSummary(sm *Summary) {
window := sm.window
summariesLock.Lock()
sms := summaries[window]
found := false
for i, xsm := range sms {
if xsm == sm {
sms = append(sms[:i], sms[i+1:]...)
found = true
break
}
}
if !found {
panic(fmt.Errorf("BUG: cannot find registered summary %p", sm))
}
summaries[window] = sms
summariesLock.Unlock()
}
func summariesSwapCron(window time.Duration) {
for {
time.Sleep(window / 2)
summariesLock.Lock()
for _, sm := range summaries[window] {
sm.mu.Lock()
tmp := sm.curr
sm.curr = sm.next
sm.next = tmp
sm.next.Reset()
sm.mu.Unlock()
}
summariesLock.Unlock()
}
}
var (
summaries = map[time.Duration][]*Summary{}
summariesLock sync.Mutex
)

View File

@ -0,0 +1,31 @@
package metrics_test
import (
"fmt"
"time"
"github.com/VictoriaMetrics/metrics"
)
func ExampleSummary() {
// Define a summary in global scope.
var s = metrics.NewSummary(`request_duration_seconds{path="/foo/bar"}`)
// Update the summary with the duration of processRequest call.
startTime := time.Now()
processRequest()
s.UpdateDuration(startTime)
}
func ExampleSummary_vec() {
for i := 0; i < 3; i++ {
// Dynamically construct metric name and pass it to GetOrCreateSummary.
name := fmt.Sprintf(`response_size_bytes{path=%q}`, "/foo/bar")
response := processRequest()
metrics.GetOrCreateSummary(name).Update(float64(len(response)))
}
}
func processRequest() string {
return "foobar"
}

View File

@ -0,0 +1,155 @@
package metrics
import (
"bytes"
"fmt"
"strings"
"testing"
"time"
)
func TestSummarySerial(t *testing.T) {
name := `TestSummarySerial`
s := NewSummary(name)
// Verify that the summary isn't visible in the output of WritePrometheus,
// since it doesn't contain any values yet.
var bb bytes.Buffer
WritePrometheus(&bb, false)
result := bb.String()
if strings.Contains(result, name) {
t.Fatalf("summary %s shouldn't be visible in the WritePrometheus output; got\n%s", name, result)
}
// Write data to summary
for i := 0; i < 2000; i++ {
s.Update(float64(i))
t := time.Now()
s.UpdateDuration(t.Add(-time.Millisecond * time.Duration(i)))
}
// Make sure the summary prints <prefix>_sum and <prefix>_count on marshalTo call
testMarshalTo(t, s, "prefix", fmt.Sprintf("prefix_sum %g\nprefix_count %d\n", s.sum, s.count))
testMarshalTo(t, s, `m{foo="bar"}`, fmt.Sprintf("m_sum{foo=\"bar\"} %g\nm_count{foo=\"bar\"} %d\n", s.sum, s.count))
// Verify s.quantileValues
s.updateQuantiles()
if s.quantileValues[len(s.quantileValues)-1] != 1999 {
t.Fatalf("unexpected quantileValues[last]; got %v; want %v", s.quantileValues[len(s.quantileValues)-1], 1999)
}
// Make sure the summary becomes visible in the output of WritePrometheus,
// since now it contains values.
bb.Reset()
WritePrometheus(&bb, false)
result = bb.String()
if !strings.Contains(result, name) {
t.Fatalf("missing summary %s in the WritePrometheus output; got\n%s", name, result)
}
}
func TestSummaryConcurrent(t *testing.T) {
name := "SummaryConcurrent"
s := NewSummary(name)
err := testConcurrent(func() error {
for i := 0; i < 10; i++ {
s.Update(float64(i))
}
return nil
})
if err != nil {
t.Fatal(err)
}
testMarshalTo(t, s, "prefix", "prefix_sum 225\nprefix_count 50\n")
}
func TestSummaryWithTags(t *testing.T) {
name := `TestSummary{tag="foo"}`
s := NewSummary(name)
s.Update(123)
var bb bytes.Buffer
WritePrometheus(&bb, false)
result := bb.String()
namePrefixWithTag := `TestSummary{tag="foo",quantile="`
if !strings.Contains(result, namePrefixWithTag) {
t.Fatalf("missing summary prefix %s in the WritePrometheus output; got\n%s", namePrefixWithTag, result)
}
}
func TestSummaryInvalidQuantiles(t *testing.T) {
name := "SummaryInvalidQuantiles"
expectPanic(t, name, func() {
NewSummaryExt(name, time.Minute, []float64{123, -234})
})
}
func TestSummarySmallWindow(t *testing.T) {
name := "SummarySmallWindow"
window := time.Millisecond * 20
quantiles := []float64{0.1, 0.2, 0.3}
s := NewSummaryExt(name, window, quantiles)
for i := 0; i < 2000; i++ {
s.Update(123)
}
// Wait for window update and verify that the summary has been cleared.
time.Sleep(2 * window)
var bb bytes.Buffer
WritePrometheus(&bb, false)
result := bb.String()
// <name>_sum and <name>_count are present in the output.
// Only <name>{quantile} shouldn't be present.
name += "{"
if strings.Contains(result, name) {
t.Fatalf("summary %s cannot be present in the WritePrometheus output; got\n%s", name, result)
}
}
func TestGetOrCreateSummaryInvalidWindow(t *testing.T) {
name := "GetOrCreateSummaryInvalidWindow"
GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
expectPanic(t, name, func() {
GetOrCreateSummaryExt(name, defaultSummaryWindow/2, defaultSummaryQuantiles)
})
}
func TestGetOrCreateSummaryInvalidQuantiles(t *testing.T) {
name := "GetOrCreateSummaryInvalidQuantiles"
GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
expectPanic(t, name, func() {
GetOrCreateSummaryExt(name, defaultSummaryWindow, []float64{0.1, 0.2})
})
quantiles := append([]float64{}, defaultSummaryQuantiles...)
quantiles[len(quantiles)-1] /= 2
expectPanic(t, name, func() {
GetOrCreateSummaryExt(name, defaultSummaryWindow, quantiles)
})
}
func TestGetOrCreateSummarySerial(t *testing.T) {
name := "GetOrCreateSummarySerial"
if err := testGetOrCreateSummary(name); err != nil {
t.Fatal(err)
}
}
func TestGetOrCreateSummaryConcurrent(t *testing.T) {
name := "GetOrCreateSummaryConcurrent"
err := testConcurrent(func() error {
return testGetOrCreateSummary(name)
})
if err != nil {
t.Fatal(err)
}
}
func testGetOrCreateSummary(name string) error {
s1 := GetOrCreateSummary(name)
for i := 0; i < 10; i++ {
s2 := GetOrCreateSummary(name)
if s1 != s2 {
return fmt.Errorf("unexpected summary returned; got %p; want %p", s2, s1)
}
}
return nil
}

View File

View File

View File

View File

View File

View File

@ -0,0 +1,17 @@
Limit Soft Limit Hard Limit Units
Max cpu time unlimited unlimited seconds
Max file size unlimited unlimited bytes
Max data size unlimited unlimited bytes
Max stack size 8388608 unlimited bytes
Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes
Max processes 127458 127458 processes
Max open files 1024 1048576 files
Max locked memory 67108864 67108864 bytes
Max address space unlimited unlimited bytes
Max file locks unlimited unlimited locks
Max pending signals 127458 127458 signals
Max msgqueue size 819200 819200 bytes
Max nice priority 0 0
Max realtime priority 0 0
Max realtime timeout unlimited unlimited us

View File

@ -0,0 +1 @@
Limit Soft Limit Hard Limit Units

View File

@ -0,0 +1,115 @@
Name: victoria-metric
Umask: 0022
State: S (sleeping)
Tgid: 1
Ngid: 0
Pid: 1
PPid: 0
TracerPid: 0
Uid: 0 0 0 0
Gid: 0 0 0 0
FDSize: 256
Groups: 1 2 3 4 6 10 11 20 26 27
NStgid: 1
NSpid: 1
NSpgid: 1
NSsid: 1
VmPeak: 2080548 kB
VmSize: 2080464 kB
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 195976 kB
VmRSS: 105212 kB
RssAnon: 94092 kB
RssFile: 11120 kB
RssShmem: 0 kB
VmData: 632076 kB
VmStk: 132 kB
VmExe: 7004 kB
VmLib: 8 kB
VmPTE: 940 kB
VmSwap: 0 kB
HugetlbPages: 0 kB
CoreDumping: 0
THP_enabled: 1
Threads: 14
SigQ: 1/127458
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: fffffffc3bfa3a00
SigIgn: 0000000000000000
SigCgt: fffffffdffc1feff
CapInh: 00000000a80425fb
CapPrm: 00000000a80425fb
CapEff: 00000000a80425fb
CapBnd: 00000000a80425fb
CapAmb: 0000000000000000
NoNewPrivs: 0
Seccomp: 0
Speculation_Store_Bypass: thread vulnerable
Cpus_allowed: ff
Cpus_allowed_list: 0-7
Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 82
nonvoluntary_ctxt_switches: 21
/ # cat /proc/1/stat
stat statm status
/ # cat /proc/1/statm
520122 27057 2780 1751 0 158052 0
/ # cat /proc/1/status
Name: victoria-metric
Umask: 0022
State: S (sleeping)
Tgid: 1
Ngid: 0
Pid: 1
PPid: 0
TracerPid: 0
Uid: 0 0 0 0
Gid: 0 0 0 0
FDSize: 256
Groups: 1 2 3 4 6 10 11 20 26 27
NStgid: 1
NSpid: 1
NSpgid: 1
NSsid: 1
VmPeak: 2080556 kB
VmSize: 2080520 kB
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 195976 kB
VmRSS: 129848 kB
RssAnon: 118752 kB
RssFile: 11096 kB
RssShmem: 0 kB
VmData: 633020 kB
VmStk: 132 kB
VmExe: 7004 kB
VmLib: 8 kB
VmPTE: 984 kB
VmSwap: 0 kB
HugetlbPages: 0 kB
CoreDumping: 0
THP_enabled: 1
Threads: 14
SigQ: 1/127458
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: fffffffc3bfa3a00
SigIgn: 0000000000000000
SigCgt: fffffffdffc1feff
CapInh: 00000000a80425fb
CapPrm: 00000000a80425fb
CapEff: 00000000a80425fb
CapBnd: 00000000a80425fb
CapAmb: 0000000000000000
NoNewPrivs: 0
Seccomp: 0
Speculation_Store_Bypass: thread vulnerable
Cpus_allowed: ff
Cpus_allowed_list: 0-7
Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 82
nonvoluntary_ctxt_switches: 21

View File

@ -0,0 +1,115 @@
Name: victoria-metric
Umask: 0022
State: S (sleeping)
Tgid: 1
Ngid: 0
Pid: 1
PPid: 0
TracerPid: 0
Uid: 0 0 0 0
Gid: 0 0 0 0
FDSize: 256
Groups: 1 2 3 4 6 10 11 20 26 27
NStgid: 1
NSpid: 1
NSpgid: 1
NSsid: 1
VmPeak: 2080548 kB
VmSize: 2080464 kB
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 195976 kB
VmRSS: 105212 kB
RssAnon: 94092 kB
RssFile: 11120 kB
RssShmem: 0 kB
VmData: 632076 kB
VmStk: 132 kB
VmExe: 7004 kB
VmLib: 8 kB
VmPTE: 940 kB
VmSwap: 0 kB
HugetlbPages: 0 kB
CoreDumping: 0
THP_enabled: 1
Threads: 14
SigQ: 1/127458
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: fffffffc3bfa3a00
SigIgn: 0000000000000000
SigCgt: fffffffdffc1feff
CapInh: 00000000a80425fb
CapPrm: 00000000a80425fb
CapEff: 00000000a80425fb
CapBnd: 00000000a80425fb
CapAmb: 0000000000000000
NoNewPrivs: 0
Seccomp: 0
Speculation_Store_Bypass: thread vulnerable
Cpus_allowed: ff
Cpus_allowed_list: 0-7
Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 82
nonvoluntary_ctxt_switches: 21
/ # cat /proc/1/stat
stat statm status
/ # cat /proc/1/statm
520122 27057 2780 1751 0 158052 0
/ # cat /proc/1/status
Name: victoria-metric
Umask: 0022
State: S (sleeping)
Tgid: 1
Ngid: 0
Pid: 1
PPid: 0
TracerPid: 0
Uid: 0 0 0 0
Gid: 0 0 0 0
FDSize: 256
Groups: 1 2 3 4 6 10 11 20 26 27
NStgid: 1
NSpid: 1
NSpgid: 1
NSsid: 1
VmPeak: 2080556 kB
VmSize: 2080520 kB as
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 195976 kB
VmRSS: 129848 kB
RssAnon: 118752 kB
RssFile: 11096 kB
RssShmem: 0 kB
VmData: 633020 kB
VmStk: 132 kB
VmExe: 7004 kB
VmLib: 8 kB
VmPTE: 984 kB
VmSwap: 0 kB
HugetlbPages: 0 kB
CoreDumping: 0
THP_enabled: 1
Threads: 14
SigQ: 1/127458
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: fffffffc3bfa3a00 fsa
SigIgn: 0000000000000000
SigCgt: fffffffdffc1feff
CapInh: 00000000a80425fb
CapPrm: 00000000a80425fb
CapEff: 00000000a80425fb
CapBnd: 00000000a80425fb
CapAmb: 0000000000000000
NoNewPrivs: 0
Seccomp: 0
Speculation_Store_Bypass: thread vulnerable
Cpus_allowed: ff
Cpus_allowed_list: 0-7
Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 82
nonvoluntary_ctxt_switches: 21

84
vendor/github.com/VictoriaMetrics/metrics/validator.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package metrics
import (
"fmt"
"regexp"
"strings"
)
func validateMetric(s string) error {
if len(s) == 0 {
return fmt.Errorf("metric cannot be empty")
}
n := strings.IndexByte(s, '{')
if n < 0 {
return validateIdent(s)
}
ident := s[:n]
s = s[n+1:]
if err := validateIdent(ident); err != nil {
return err
}
if len(s) == 0 || s[len(s)-1] != '}' {
return fmt.Errorf("missing closing curly brace at the end of %q", ident)
}
return validateTags(s[:len(s)-1])
}
func validateTags(s string) error {
if len(s) == 0 {
return nil
}
for {
n := strings.IndexByte(s, '=')
if n < 0 {
return fmt.Errorf("missing `=` after %q", s)
}
ident := s[:n]
s = s[n+1:]
if err := validateIdent(ident); err != nil {
return err
}
if len(s) == 0 || s[0] != '"' {
return fmt.Errorf("missing starting `\"` for %q value; tail=%q", ident, s)
}
s = s[1:]
again:
n = strings.IndexByte(s, '"')
if n < 0 {
return fmt.Errorf("missing trailing `\"` for %q value; tail=%q", ident, s)
}
m := n
for m > 0 && s[m-1] == '\\' {
m--
}
if (n-m)%2 == 1 {
s = s[n+1:]
goto again
}
s = s[n+1:]
if len(s) == 0 {
return nil
}
if !strings.HasPrefix(s, ",") {
return fmt.Errorf("missing `,` after %q value; tail=%q", ident, s)
}
s = skipSpace(s[1:])
}
}
func skipSpace(s string) string {
for len(s) > 0 && s[0] == ' ' {
s = s[1:]
}
return s
}
func validateIdent(s string) error {
if !identRegexp.MatchString(s) {
return fmt.Errorf("invalid identifier %q", s)
}
return nil
}
var identRegexp = regexp.MustCompile("^[a-zA-Z_:.][a-zA-Z0-9_:.]*$")

View File

@ -0,0 +1,61 @@
package metrics
import (
"testing"
)
func TestValidateMetricSuccess(t *testing.T) {
f := func(s string) {
t.Helper()
if err := validateMetric(s); err != nil {
t.Fatalf("cannot validate %q: %s", s, err)
}
}
f("a")
f("_9:8")
f("a{}")
f(`a{foo="bar"}`)
f(`foo{bar="baz", x="y\"z"}`)
f(`foo{bar="b}az"}`)
f(`:foo:bar{bar="a",baz="b"}`)
f(`some.foo{bar="baz"}`)
}
func TestValidateMetricError(t *testing.T) {
f := func(s string) {
t.Helper()
if err := validateMetric(s); err == nil {
t.Fatalf("expecting non-nil error when validating %q", s)
}
}
f("")
f("{}")
// superflouos space
f("a ")
f(" a")
f(" a ")
f("a {}")
f("a{} ")
f("a{ }")
f(`a{foo ="bar"}`)
f(`a{ foo="bar"}`)
f(`a{foo= "bar"}`)
f(`a{foo="bar" }`)
f(`a{foo="bar" ,baz="a"}`)
// invalid tags
f("a{foo}")
f("a{=}")
f(`a{=""}`)
f(`a{`)
f(`a}`)
f(`a{foo=}`)
f(`a{foo="`)
f(`a{foo="}`)
f(`a{foo="bar",}`)
f(`a{foo="bar", x`)
f(`a{foo="bar", x=`)
f(`a{foo="bar", x="`)
f(`a{foo="bar", x="}`)
}

View File

@ -0,0 +1,16 @@
language: go
go:
- 1.7
- 1.8
script:
# build test for supported platforms
- GOOS=linux go build
- GOOS=darwin go build
- GOOS=freebsd go build
- GOARCH=386 go build
# run tests on a standard platform
- go test -v ./...

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Aliaksandr Valialkin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,76 @@
[![Build Status](https://travis-ci.org/valyala/fastrand.svg)](https://travis-ci.org/valyala/fastrand)
[![GoDoc](https://godoc.org/github.com/valyala/fastrand?status.svg)](http://godoc.org/github.com/valyala/fastrand)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/fastrand)](https://goreportcard.com/report/github.com/valyala/fastrand)
# fastrand
Fast pseudorandom number generator.
# Features
- Optimized for speed.
- Performance scales on multiple CPUs.
# How does it work?
It abuses [sync.Pool](https://golang.org/pkg/sync/#Pool) for maintaining
"per-CPU" pseudorandom number generators.
TODO: firgure out how to use real per-CPU pseudorandom number generators.
# Benchmark results
```
$ GOMAXPROCS=1 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n 50000000 29.7 ns/op
BenchmarkRNGUint32n 200000000 6.50 ns/op
BenchmarkRNGUint32nWithLock 100000000 21.5 ns/op
BenchmarkMathRandInt31n 50000000 31.8 ns/op
BenchmarkMathRandRNGInt31n 100000000 17.9 ns/op
BenchmarkMathRandRNGInt31nWithLock 50000000 30.2 ns/op
PASS
ok github.com/valyala/fastrand 10.634s
```
```
$ GOMAXPROCS=2 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-2 100000000 17.6 ns/op
BenchmarkRNGUint32n-2 500000000 3.36 ns/op
BenchmarkRNGUint32nWithLock-2 50000000 32.0 ns/op
BenchmarkMathRandInt31n-2 20000000 51.2 ns/op
BenchmarkMathRandRNGInt31n-2 100000000 11.0 ns/op
BenchmarkMathRandRNGInt31nWithLock-2 20000000 91.0 ns/op
PASS
ok github.com/valyala/fastrand 9.543s
```
```
$ GOMAXPROCS=4 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-4 100000000 14.2 ns/op
BenchmarkRNGUint32n-4 500000000 3.30 ns/op
BenchmarkRNGUint32nWithLock-4 20000000 88.7 ns/op
BenchmarkMathRandInt31n-4 10000000 145 ns/op
BenchmarkMathRandRNGInt31n-4 200000000 8.35 ns/op
BenchmarkMathRandRNGInt31nWithLock-4 20000000 102 ns/op
PASS
ok github.com/valyala/fastrand 11.534s
```
As you can see, [fastrand.Uint32n](https://godoc.org/github.com/valyala/fastrand#Uint32n)
scales on multiple CPUs, while [rand.Int31n](https://golang.org/pkg/math/rand/#Int31n)
doesn't scale. Their performance is comparable on `GOMAXPROCS=1`,
but `fastrand.Uint32n` runs 3x faster than `rand.Int31n` on `GOMAXPROCS=2`
and 10x faster than `rand.Int31n` on `GOMAXPROCS=4`.

View File

@ -0,0 +1,74 @@
// Package fastrand implements fast pesudorandom number generator
// that should scale well on multi-CPU systems.
//
// Use crypto/rand instead of this package for generating
// cryptographically secure random numbers.
package fastrand
import (
"sync"
"time"
)
// Uint32 returns pseudorandom uint32.
//
// It is safe calling this function from concurrent goroutines.
func Uint32() uint32 {
v := rngPool.Get()
if v == nil {
v = &RNG{}
}
r := v.(*RNG)
x := r.Uint32()
rngPool.Put(r)
return x
}
var rngPool sync.Pool
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is safe calling this function from concurrent goroutines.
func Uint32n(maxN uint32) uint32 {
x := Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
// RNG is a pseudorandom number generator.
//
// It is unsafe to call RNG methods from concurrent goroutines.
type RNG struct {
x uint32
}
// Uint32 returns pseudorandom uint32.
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32() uint32 {
for r.x == 0 {
r.x = getRandomUint32()
}
// See https://en.wikipedia.org/wiki/Xorshift
x := r.x
x ^= x << 13
x ^= x >> 17
x ^= x << 5
r.x = x
return x
}
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32n(maxN uint32) uint32 {
x := r.Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
func getRandomUint32() uint32 {
x := time.Now().UnixNano()
return uint32((x >> 32) ^ x)
}

View File

@ -0,0 +1 @@
module github.com/valyala/fastrand

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2019 Aliaksandr Valialkin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,9 @@
[![GoDoc](https://godoc.org/github.com/valyala/histogram?status.svg)](http://godoc.org/github.com/valyala/histogram)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/histogram)](https://goreportcard.com/report/github.com/valyala/histogram)
# histogram
Fast histograms for Go.
See [docs](https://godoc.org/github.com/valyala/histogram).

View File

@ -0,0 +1,5 @@
module github.com/valyala/histogram
go 1.12
require github.com/valyala/fastrand v1.0.0

View File

@ -0,0 +1,2 @@
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=

View File

@ -0,0 +1,127 @@
// Package histogram provides building blocks for fast histograms.
package histogram
import (
"math"
"sort"
"sync"
"github.com/valyala/fastrand"
)
var (
infNeg = math.Inf(-1)
infPos = math.Inf(1)
nan = math.NaN()
)
// Fast is a fast histogram.
//
// It cannot be used from concurrently running goroutines without
// external synchronization.
type Fast struct {
max float64
min float64
count uint64
a []float64
tmp []float64
rng fastrand.RNG
}
// NewFast returns new fast histogram.
func NewFast() *Fast {
f := &Fast{}
f.Reset()
return f
}
// Reset resets the histogram.
func (f *Fast) Reset() {
f.max = infNeg
f.min = infPos
f.count = 0
if len(f.a) > 0 {
f.a = f.a[:0]
f.tmp = f.tmp[:0]
} else {
// Free up memory occupied by unused histogram.
f.a = nil
f.tmp = nil
}
}
// Update updates the f with v.
func (f *Fast) Update(v float64) {
if v > f.max {
f.max = v
}
if v < f.min {
f.min = v
}
f.count++
if len(f.a) < maxSamples {
f.a = append(f.a, v)
return
}
if n := int(f.rng.Uint32n(uint32(f.count))); n < len(f.a) {
f.a[n] = v
}
}
const maxSamples = 1000
// Quantile returns the quantile value for the given phi.
func (f *Fast) Quantile(phi float64) float64 {
f.tmp = append(f.tmp[:0], f.a...)
sort.Float64s(f.tmp)
return f.quantile(phi)
}
// Quantiles appends quantile values to dst for the given phis.
func (f *Fast) Quantiles(dst, phis []float64) []float64 {
f.tmp = append(f.tmp[:0], f.a...)
sort.Float64s(f.tmp)
for _, phi := range phis {
q := f.quantile(phi)
dst = append(dst, q)
}
return dst
}
func (f *Fast) quantile(phi float64) float64 {
if len(f.tmp) == 0 || math.IsNaN(phi) {
return nan
}
if phi <= 0 {
return f.min
}
if phi >= 1 {
return f.max
}
idx := uint(phi*float64(len(f.tmp)-1) + 0.5)
if idx >= uint(len(f.tmp)) {
idx = uint(len(f.tmp) - 1)
}
return f.tmp[idx]
}
// GetFast returns a histogram from a pool.
func GetFast() *Fast {
v := fastPool.Get()
if v == nil {
return NewFast()
}
return v.(*Fast)
}
// PutFast puts hf to the pool.
//
// hf cannot be used after this call.
func PutFast(f *Fast) {
f.Reset()
fastPool.Put(f)
}
var fastPool sync.Pool

View File

@ -0,0 +1,4 @@
# github.com/valyala/fastrand v1.0.0
github.com/valyala/fastrand
# github.com/valyala/histogram v1.1.2
github.com/valyala/histogram

16
vendor/github.com/valyala/fastrand/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
go:
- 1.7
- 1.8
script:
# build test for supported platforms
- GOOS=linux go build
- GOOS=darwin go build
- GOOS=freebsd go build
- GOARCH=386 go build
# run tests on a standard platform
- go test -v ./...

21
vendor/github.com/valyala/fastrand/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Aliaksandr Valialkin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

76
vendor/github.com/valyala/fastrand/README.md generated vendored Normal file
View File

@ -0,0 +1,76 @@
[![Build Status](https://travis-ci.org/valyala/fastrand.svg)](https://travis-ci.org/valyala/fastrand)
[![GoDoc](https://godoc.org/github.com/valyala/fastrand?status.svg)](http://godoc.org/github.com/valyala/fastrand)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/fastrand)](https://goreportcard.com/report/github.com/valyala/fastrand)
# fastrand
Fast pseudorandom number generator.
# Features
- Optimized for speed.
- Performance scales on multiple CPUs.
# How does it work?
It abuses [sync.Pool](https://golang.org/pkg/sync/#Pool) for maintaining
"per-CPU" pseudorandom number generators.
TODO: firgure out how to use real per-CPU pseudorandom number generators.
# Benchmark results
```
$ GOMAXPROCS=1 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n 50000000 29.7 ns/op
BenchmarkRNGUint32n 200000000 6.50 ns/op
BenchmarkRNGUint32nWithLock 100000000 21.5 ns/op
BenchmarkMathRandInt31n 50000000 31.8 ns/op
BenchmarkMathRandRNGInt31n 100000000 17.9 ns/op
BenchmarkMathRandRNGInt31nWithLock 50000000 30.2 ns/op
PASS
ok github.com/valyala/fastrand 10.634s
```
```
$ GOMAXPROCS=2 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-2 100000000 17.6 ns/op
BenchmarkRNGUint32n-2 500000000 3.36 ns/op
BenchmarkRNGUint32nWithLock-2 50000000 32.0 ns/op
BenchmarkMathRandInt31n-2 20000000 51.2 ns/op
BenchmarkMathRandRNGInt31n-2 100000000 11.0 ns/op
BenchmarkMathRandRNGInt31nWithLock-2 20000000 91.0 ns/op
PASS
ok github.com/valyala/fastrand 9.543s
```
```
$ GOMAXPROCS=4 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-4 100000000 14.2 ns/op
BenchmarkRNGUint32n-4 500000000 3.30 ns/op
BenchmarkRNGUint32nWithLock-4 20000000 88.7 ns/op
BenchmarkMathRandInt31n-4 10000000 145 ns/op
BenchmarkMathRandRNGInt31n-4 200000000 8.35 ns/op
BenchmarkMathRandRNGInt31nWithLock-4 20000000 102 ns/op
PASS
ok github.com/valyala/fastrand 11.534s
```
As you can see, [fastrand.Uint32n](https://godoc.org/github.com/valyala/fastrand#Uint32n)
scales on multiple CPUs, while [rand.Int31n](https://golang.org/pkg/math/rand/#Int31n)
doesn't scale. Their performance is comparable on `GOMAXPROCS=1`,
but `fastrand.Uint32n` runs 3x faster than `rand.Int31n` on `GOMAXPROCS=2`
and 10x faster than `rand.Int31n` on `GOMAXPROCS=4`.

74
vendor/github.com/valyala/fastrand/fastrand.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// Package fastrand implements fast pesudorandom number generator
// that should scale well on multi-CPU systems.
//
// Use crypto/rand instead of this package for generating
// cryptographically secure random numbers.
package fastrand
import (
"sync"
"time"
)
// Uint32 returns pseudorandom uint32.
//
// It is safe calling this function from concurrent goroutines.
func Uint32() uint32 {
v := rngPool.Get()
if v == nil {
v = &RNG{}
}
r := v.(*RNG)
x := r.Uint32()
rngPool.Put(r)
return x
}
var rngPool sync.Pool
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is safe calling this function from concurrent goroutines.
func Uint32n(maxN uint32) uint32 {
x := Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
// RNG is a pseudorandom number generator.
//
// It is unsafe to call RNG methods from concurrent goroutines.
type RNG struct {
x uint32
}
// Uint32 returns pseudorandom uint32.
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32() uint32 {
for r.x == 0 {
r.x = getRandomUint32()
}
// See https://en.wikipedia.org/wiki/Xorshift
x := r.x
x ^= x << 13
x ^= x >> 17
x ^= x << 5
r.x = x
return x
}
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32n(maxN uint32) uint32 {
x := r.Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
func getRandomUint32() uint32 {
x := time.Now().UnixNano()
return uint32((x >> 32) ^ x)
}

1
vendor/github.com/valyala/fastrand/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/valyala/fastrand

21
vendor/github.com/valyala/histogram/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2019 Aliaksandr Valialkin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

9
vendor/github.com/valyala/histogram/README.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
[![GoDoc](https://godoc.org/github.com/valyala/histogram?status.svg)](http://godoc.org/github.com/valyala/histogram)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/histogram)](https://goreportcard.com/report/github.com/valyala/histogram)
# histogram
Fast histograms for Go.
See [docs](https://godoc.org/github.com/valyala/histogram).

5
vendor/github.com/valyala/histogram/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/valyala/histogram
go 1.12
require github.com/valyala/fastrand v1.0.0

2
vendor/github.com/valyala/histogram/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=

127
vendor/github.com/valyala/histogram/histogram.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
// Package histogram provides building blocks for fast histograms.
package histogram
import (
"math"
"sort"
"sync"
"github.com/valyala/fastrand"
)
var (
infNeg = math.Inf(-1)
infPos = math.Inf(1)
nan = math.NaN()
)
// Fast is a fast histogram.
//
// It cannot be used from concurrently running goroutines without
// external synchronization.
type Fast struct {
max float64
min float64
count uint64
a []float64
tmp []float64
rng fastrand.RNG
}
// NewFast returns new fast histogram.
func NewFast() *Fast {
f := &Fast{}
f.Reset()
return f
}
// Reset resets the histogram.
func (f *Fast) Reset() {
f.max = infNeg
f.min = infPos
f.count = 0
if len(f.a) > 0 {
f.a = f.a[:0]
f.tmp = f.tmp[:0]
} else {
// Free up memory occupied by unused histogram.
f.a = nil
f.tmp = nil
}
}
// Update updates the f with v.
func (f *Fast) Update(v float64) {
if v > f.max {
f.max = v
}
if v < f.min {
f.min = v
}
f.count++
if len(f.a) < maxSamples {
f.a = append(f.a, v)
return
}
if n := int(f.rng.Uint32n(uint32(f.count))); n < len(f.a) {
f.a[n] = v
}
}
const maxSamples = 1000
// Quantile returns the quantile value for the given phi.
func (f *Fast) Quantile(phi float64) float64 {
f.tmp = append(f.tmp[:0], f.a...)
sort.Float64s(f.tmp)
return f.quantile(phi)
}
// Quantiles appends quantile values to dst for the given phis.
func (f *Fast) Quantiles(dst, phis []float64) []float64 {
f.tmp = append(f.tmp[:0], f.a...)
sort.Float64s(f.tmp)
for _, phi := range phis {
q := f.quantile(phi)
dst = append(dst, q)
}
return dst
}
func (f *Fast) quantile(phi float64) float64 {
if len(f.tmp) == 0 || math.IsNaN(phi) {
return nan
}
if phi <= 0 {
return f.min
}
if phi >= 1 {
return f.max
}
idx := uint(phi*float64(len(f.tmp)-1) + 0.5)
if idx >= uint(len(f.tmp)) {
idx = uint(len(f.tmp) - 1)
}
return f.tmp[idx]
}
// GetFast returns a histogram from a pool.
func GetFast() *Fast {
v := fastPool.Get()
if v == nil {
return NewFast()
}
return v.(*Fast)
}
// PutFast puts hf to the pool.
//
// hf cannot be used after this call.
func PutFast(f *Fast) {
f.Reset()
fastPool.Put(f)
}
var fastPool sync.Pool

59
vendor/github.com/valyala/histogram/histogram_test.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
package histogram
import (
"math"
"testing"
)
func TestFastUnderflow(t *testing.T) {
f := GetFast()
defer PutFast(f)
q := f.Quantile(0.5)
if !math.IsNaN(q) {
t.Fatalf("unexpected quantile for empty histogram; got %v; want %v", q, nan)
}
for i := 0; i < maxSamples; i++ {
f.Update(float64(i))
}
qs := f.Quantiles(nil, []float64{0, 0.5, 1})
if qs[0] != 0 {
t.Fatalf("unexpected quantile value for phi=0; got %v; want %v", qs[0], 0)
}
if qs[1] != maxSamples/2 {
t.Fatalf("unexpected quantile value for phi=0.5; got %v; want %v", qs[1], maxSamples/2)
}
if qs[2] != maxSamples-1 {
t.Fatalf("unexpected quantile value for phi=1; got %v; want %v", qs[2], maxSamples-1)
}
}
func TestFastOverflow(t *testing.T) {
f := GetFast()
defer PutFast(f)
for i := 0; i < maxSamples*10; i++ {
f.Update(float64(i))
}
qs := f.Quantiles(nil, []float64{0, 0.5, 0.9999, 1})
if qs[0] != 0 {
t.Fatalf("unexpected quantile value for phi=0; got %v; want %v", qs[0], 0)
}
median := float64(maxSamples*10-1) / 2
if qs[1] < median*0.9 || qs[1] > median*1.1 {
t.Fatalf("unexpected quantile value for phi=0.5; got %v; want %v", qs[1], median)
}
if qs[2] < maxSamples*10*0.9 {
t.Fatalf("unexpected quantile value for phi=0.9999; got %v; want %v", qs[2], maxSamples*10*0.9)
}
if qs[3] != maxSamples*10-1 {
t.Fatalf("unexpected quantile value for phi=1; got %v; want %v", qs[3], maxSamples*10-1)
}
q := f.Quantile(nan)
if !math.IsNaN(q) {
t.Fatalf("unexpected value for phi=NaN; got %v; want %v", q, nan)
}
}

View File

@ -0,0 +1,25 @@
package histogram
import (
"sync"
"testing"
)
func BenchmarkFastUpdate(b *testing.B) {
b.ReportAllocs()
b.SetBytes(1)
b.RunParallel(func(pb *testing.PB) {
f := NewFast()
var v float64
for pb.Next() {
f.Update(v)
v += 1.5
}
SinkLock.Lock()
Sink += f.Quantile(0.5)
SinkLock.Unlock()
})
}
var Sink float64
var SinkLock sync.Mutex

View File

@ -0,0 +1,16 @@
language: go
go:
- 1.7
- 1.8
script:
# build test for supported platforms
- GOOS=linux go build
- GOOS=darwin go build
- GOOS=freebsd go build
- GOARCH=386 go build
# run tests on a standard platform
- go test -v ./...

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Aliaksandr Valialkin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,76 @@
[![Build Status](https://travis-ci.org/valyala/fastrand.svg)](https://travis-ci.org/valyala/fastrand)
[![GoDoc](https://godoc.org/github.com/valyala/fastrand?status.svg)](http://godoc.org/github.com/valyala/fastrand)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/fastrand)](https://goreportcard.com/report/github.com/valyala/fastrand)
# fastrand
Fast pseudorandom number generator.
# Features
- Optimized for speed.
- Performance scales on multiple CPUs.
# How does it work?
It abuses [sync.Pool](https://golang.org/pkg/sync/#Pool) for maintaining
"per-CPU" pseudorandom number generators.
TODO: firgure out how to use real per-CPU pseudorandom number generators.
# Benchmark results
```
$ GOMAXPROCS=1 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n 50000000 29.7 ns/op
BenchmarkRNGUint32n 200000000 6.50 ns/op
BenchmarkRNGUint32nWithLock 100000000 21.5 ns/op
BenchmarkMathRandInt31n 50000000 31.8 ns/op
BenchmarkMathRandRNGInt31n 100000000 17.9 ns/op
BenchmarkMathRandRNGInt31nWithLock 50000000 30.2 ns/op
PASS
ok github.com/valyala/fastrand 10.634s
```
```
$ GOMAXPROCS=2 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-2 100000000 17.6 ns/op
BenchmarkRNGUint32n-2 500000000 3.36 ns/op
BenchmarkRNGUint32nWithLock-2 50000000 32.0 ns/op
BenchmarkMathRandInt31n-2 20000000 51.2 ns/op
BenchmarkMathRandRNGInt31n-2 100000000 11.0 ns/op
BenchmarkMathRandRNGInt31nWithLock-2 20000000 91.0 ns/op
PASS
ok github.com/valyala/fastrand 9.543s
```
```
$ GOMAXPROCS=4 go test -bench=. github.com/valyala/fastrand
goos: linux
goarch: amd64
pkg: github.com/valyala/fastrand
BenchmarkUint32n-4 100000000 14.2 ns/op
BenchmarkRNGUint32n-4 500000000 3.30 ns/op
BenchmarkRNGUint32nWithLock-4 20000000 88.7 ns/op
BenchmarkMathRandInt31n-4 10000000 145 ns/op
BenchmarkMathRandRNGInt31n-4 200000000 8.35 ns/op
BenchmarkMathRandRNGInt31nWithLock-4 20000000 102 ns/op
PASS
ok github.com/valyala/fastrand 11.534s
```
As you can see, [fastrand.Uint32n](https://godoc.org/github.com/valyala/fastrand#Uint32n)
scales on multiple CPUs, while [rand.Int31n](https://golang.org/pkg/math/rand/#Int31n)
doesn't scale. Their performance is comparable on `GOMAXPROCS=1`,
but `fastrand.Uint32n` runs 3x faster than `rand.Int31n` on `GOMAXPROCS=2`
and 10x faster than `rand.Int31n` on `GOMAXPROCS=4`.

View File

@ -0,0 +1,74 @@
// Package fastrand implements fast pesudorandom number generator
// that should scale well on multi-CPU systems.
//
// Use crypto/rand instead of this package for generating
// cryptographically secure random numbers.
package fastrand
import (
"sync"
"time"
)
// Uint32 returns pseudorandom uint32.
//
// It is safe calling this function from concurrent goroutines.
func Uint32() uint32 {
v := rngPool.Get()
if v == nil {
v = &RNG{}
}
r := v.(*RNG)
x := r.Uint32()
rngPool.Put(r)
return x
}
var rngPool sync.Pool
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is safe calling this function from concurrent goroutines.
func Uint32n(maxN uint32) uint32 {
x := Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
// RNG is a pseudorandom number generator.
//
// It is unsafe to call RNG methods from concurrent goroutines.
type RNG struct {
x uint32
}
// Uint32 returns pseudorandom uint32.
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32() uint32 {
for r.x == 0 {
r.x = getRandomUint32()
}
// See https://en.wikipedia.org/wiki/Xorshift
x := r.x
x ^= x << 13
x ^= x >> 17
x ^= x << 5
r.x = x
return x
}
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
//
// It is unsafe to call this method from concurrent goroutines.
func (r *RNG) Uint32n(maxN uint32) uint32 {
x := r.Uint32()
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32((uint64(x) * uint64(maxN)) >> 32)
}
func getRandomUint32() uint32 {
x := time.Now().UnixNano()
return uint32((x >> 32) ^ x)
}

View File

@ -0,0 +1 @@
module github.com/valyala/fastrand

View File

@ -0,0 +1,2 @@
# github.com/valyala/fastrand v1.0.0
github.com/valyala/fastrand

View File

@ -157,7 +157,7 @@ func (w *Wallet_Memory) Sync_Wallet_Memory_With_Daemon() {
rlog.Debugf("wallet topo height %d daemon online topo height %d\n", w.account.TopoHeight, w.Daemon_TopoHeight) rlog.Debugf("wallet topo height %d daemon online topo height %d\n", w.account.TopoHeight, w.Daemon_TopoHeight)
previous := w.account.Balance_Result.Data previous := w.account.Balance_Result.Data
var scid crypto.Hash var scid crypto.Hash
if _, _, err := w.GetEncryptedBalanceAtTopoHeight(scid, -1, w.GetAddress().String()); err == nil { if _, _,_,_,_, err := w.GetEncryptedBalanceAtTopoHeight(scid, -1, w.GetAddress().String()); err == nil {
if w.account.Balance_Result.Data != previous /*|| (len(w.account.EntriesNative[scid]) >= 1 && strings.ToLower(w.account.Balance_Result.Data) != strings.ToLower(w.account.EntriesNative[scid][len(w.account.EntriesNative[scid])-1].EWData)) */ { if w.account.Balance_Result.Data != previous /*|| (len(w.account.EntriesNative[scid]) >= 1 && strings.ToLower(w.account.Balance_Result.Data) != strings.ToLower(w.account.EntriesNative[scid][len(w.account.EntriesNative[scid])-1].EWData)) */ {
w.DecodeEncryptedBalance() // try to decode balance w.DecodeEncryptedBalance() // try to decode balance
@ -240,7 +240,7 @@ func (w *Wallet_Memory) DecodeEncryptedBalanceNow(el *crypto.ElGamal) uint64 {
// TODO in order to stop privacy leaks we must guess this information somehow on client side itself // TODO in order to stop privacy leaks we must guess this information somehow on client side itself
// maybe the server can broadcast a bloomfilter or something else from the mempool keyimages // maybe the server can broadcast a bloomfilter or something else from the mempool keyimages
// //
func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (bits int, e *crypto.ElGamal, err error) { func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (bits int, e *crypto.ElGamal, height,rtopoheight uint64, merkleroot crypto.Hash, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
@ -300,7 +300,7 @@ func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topohe
} }
} }
// fmt.Printf("status '%s' err '%s' %+v %+v \n", result.Status , w.Error , result.Status == errormsg.ErrAccountUnregistered.Error() , accountaddr == w.account.GetAddress().String()) // fmt.Printf("status '%s' err '%s' %+v %+v \n", result.Status , w.Error , result.Status == errormsg.ErrAccountUnregistered.Error() , accountaddr == w.account.GetAddress().String())
if scid.IsZero() && result.Status == errormsg.ErrAccountUnregistered.Error() { if scid.IsZero() && result.Status == errormsg.ErrAccountUnregistered.Error() {
err = fmt.Errorf("%s", result.Status) err = fmt.Errorf("%s", result.Status)
@ -311,7 +311,7 @@ func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topohe
w.Daemon_TopoHeight = result.DTopoheight w.Daemon_TopoHeight = result.DTopoheight
w.Merkle_Balance_TreeHash = result.DMerkle_Balance_TreeHash w.Merkle_Balance_TreeHash = result.DMerkle_Balance_TreeHash
if scid.IsZero() && accountaddr == w.GetAddress().String() { if topoheight == -1 && scid.IsZero() && accountaddr == w.GetAddress().String() {
w.account.Balance_Result = result w.account.Balance_Result = result
w.account.TopoHeight = result.Topoheight w.account.TopoHeight = result.Topoheight
} }
@ -330,10 +330,17 @@ func (w *Wallet_Memory) GetEncryptedBalanceAtTopoHeight(scid crypto.Hash, topohe
w.Error = nil w.Error = nil
} }
//fmt.Printf("decoding elgamal\n")
el := new(crypto.ElGamal).Deserialize(hexdecoded) el := new(crypto.ElGamal).Deserialize(hexdecoded)
return result.Bits, el, nil
hexdecoded, err = hex.DecodeString(result.Merkle_Balance_TreeHash)
if err != nil {
return
}
var mhash crypto.Hash
copy(mhash[:],hexdecoded[:])
return result.Bits, el, uint64(result.Height), uint64(result.Topoheight), mhash, nil
} }
func (w *Wallet_Memory) DecodeEncryptedBalance_Memory(el *crypto.ElGamal, hint uint64) (balance uint64) { func (w *Wallet_Memory) DecodeEncryptedBalance_Memory(el *crypto.ElGamal, hint uint64) (balance uint64) {
@ -346,7 +353,7 @@ func (w *Wallet_Memory) DecodeEncryptedBalance_Memory(el *crypto.ElGamal, hint u
} }
func (w *Wallet_Memory) GetDecryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (balance uint64, err error) { func (w *Wallet_Memory) GetDecryptedBalanceAtTopoHeight(scid crypto.Hash, topoheight int64, accountaddr string) (balance uint64, err error) {
_, encrypted_balance, err := w.GetEncryptedBalanceAtTopoHeight(scid, topoheight, accountaddr) _, encrypted_balance,_,_,_, err := w.GetEncryptedBalanceAtTopoHeight(scid, topoheight, accountaddr)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -466,13 +473,13 @@ func (w *Wallet_Memory) synchistory_internal(scid crypto.Hash, start_topo, end_t
if start_topo == w.account.Balance_Result.Registration { if start_topo == w.account.Balance_Result.Registration {
start_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G) start_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G)
} else { } else {
_, start_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, start_topo, w.GetAddress().String()) _, start_balance_e,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(scid, start_topo, w.GetAddress().String())
if err != nil { if err != nil {
return err return err
} }
} }
_, end_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, end_topo, w.GetAddress().String()) _, end_balance_e,_,_,_, err := w.GetEncryptedBalanceAtTopoHeight(scid, end_topo, w.GetAddress().String())
if err != nil { if err != nil {
return err return err
} }
@ -512,7 +519,7 @@ func (w *Wallet_Memory) synchistory_internal_binary_search(scid crypto.Hash, sta
return w.synchistory_block(scid, end_topo) return w.synchistory_block(scid, end_topo)
} }
_, median_balance_e, err := w.GetEncryptedBalanceAtTopoHeight(scid, median, w.GetAddress().String()) _, median_balance_e,_,_,_, err := w.GetEncryptedBalanceAtTopoHeight(scid, median, w.GetAddress().String())
if err != nil { if err != nil {
return err return err
} }
@ -599,13 +606,13 @@ func (w *Wallet_Memory) synchistory_block(scid crypto.Hash, topo int64) (err err
if topo <= 0 || w.account.Balance_Result.Registration == topo { if topo <= 0 || w.account.Balance_Result.Registration == topo {
previous_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G) previous_balance_e = crypto.ConstructElGamal(w.account.Keys.Public.G1(), crypto.ElGamal_BASE_G)
} else { } else {
_, previous_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo-1, w.GetAddress().String()) _, previous_balance_e,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo-1, w.GetAddress().String())
if err != nil { if err != nil {
return err return err
} }
} }
_, current_balance_e, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo, w.GetAddress().String()) _, current_balance_e,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(scid, topo, w.GetAddress().String())
if err != nil { if err != nil {
return err return err
} }
@ -627,6 +634,10 @@ func (w *Wallet_Memory) synchistory_block(scid crypto.Hash, topo int64) (err err
return fmt.Errorf("getblock rpc failed") return fmt.Errorf("getblock rpc failed")
} }
if bresult.Block_Header.SideBlock { // skip side blocks
return nil
}
block_bin, _ := hex.DecodeString(bresult.Blob) block_bin, _ := hex.DecodeString(bresult.Blob)
bl.Deserialize(block_bin) bl.Deserialize(block_bin)

View File

@ -2,16 +2,16 @@ package walletapi
import "fmt" import "fmt"
import "math/big" import "math/big"
//import "encoding/binary"
import mathrand "math/rand" import mathrand "math/rand"
import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/rpc" import "github.com/deroproject/derohe/rpc"
import "github.com/deroproject/derohe/globals"
import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/transaction" import "github.com/deroproject/derohe/transaction"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
import "github.com/deroproject/derohe/cryptography/bn256" import "github.com/deroproject/derohe/cryptography/bn256"
// generate proof etc // generate proof etc
// we use a previous point in history and cryptographically prove that we have not used the funds till now
func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap map[string]map[string][]byte, rings [][]*bn256.G1, height uint64, scdata rpc.Arguments, roothash []byte, max_bits_array []int) *transaction.Transaction { func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap map[string]map[string][]byte, rings [][]*bn256.G1, height uint64, scdata rpc.Arguments, roothash []byte, max_bits_array []int) *transaction.Transaction {
var tx transaction.Transaction var tx transaction.Transaction
@ -22,6 +22,11 @@ func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap map[stri
tx.Version = 1 tx.Version = 1
tx.Height = height tx.Height = height
tx.TransactionType = transaction.NORMAL tx.TransactionType = transaction.NORMAL
if height % config.BLOCK_BATCH_SIZE != 0 {
panic(fmt.Sprintf("Height must be a multiple of %d (config.BLOCK_BATCH_SIZE)", config.BLOCK_BATCH_SIZE))
}
/* /*
if burn_value >= 1 { if burn_value >= 1 {
tx.TransactionType = transaction.BURN_TX tx.TransactionType = transaction.BURN_TX
@ -217,9 +222,12 @@ func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap map[stri
} }
u := new(bn256.G1).ScalarMult(crypto.HashToPoint(crypto.HashtoNumber(append([]byte(crypto.PROTOCOL_CONSTANT), tx.Payloads[0].Statement.Roothash[:]...))), sender_secret) // this should be moved to generate proof
u := new(bn256.G1).ScalarMult(crypto.HeightToPoint(height), sender_secret) // this should be moved to generate proof
u1 := new(bn256.G1).ScalarMult(crypto.HeightToPoint(height + config.BLOCK_BATCH_SIZE), sender_secret) // this should be moved to generate proof
for t := range transfers { for t := range transfers {
tx.Payloads[t].Proof = crypto.GenerateProof(&tx.Payloads[t].Statement, &witness_list[t], u, tx.GetHash(), tx.Payloads[t].BurnValue) tx.Payloads[t].Proof = crypto.GenerateProof(&tx.Payloads[t].Statement, &witness_list[t], u,u1, height, tx.GetHash(), tx.Payloads[t].BurnValue)
} }
// after the tx is serialized, it loses information which is then fed by blockchain // after the tx is serialized, it loses information which is then fed by blockchain
@ -227,8 +235,8 @@ func (w *Wallet_Memory) BuildTransaction(transfers []rpc.Transfer, emap map[stri
//fmt.Printf("txhash before %s\n", tx.GetHash()) //fmt.Printf("txhash before %s\n", tx.GetHash())
for t := range tx.Payloads { for t := range tx.Payloads {
if tx.Payloads[t].Proof.Verify(&tx.Payloads[t].Statement, tx.GetHash(), tx.Payloads[t].BurnValue) { if tx.Payloads[t].Proof.Verify(&tx.Payloads[t].Statement, tx.GetHash(), height, tx.Payloads[t].BurnValue) {
//fmt.Printf("TX verified with proof successfuly %s burn_value %d\n", tx.GetHash(), tx.Payloads[t].BurnValue) fmt.Printf("TX verified with proof successfuly %s burn_value %d\n", tx.GetHash(), tx.Payloads[t].BurnValue)
//fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement) //fmt.Printf("Statement %+v\n", tx.Payloads[t].Statement)
//fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof) //fmt.Printf("Proof %+v\n", tx.Payloads[t].Proof)

View File

@ -29,7 +29,7 @@ import "github.com/romana/rlog"
//import "github.com/vmihailenco/msgpack" //import "github.com/vmihailenco/msgpack"
//import "github.com/deroproject/derohe/config" import "github.com/deroproject/derohe/config"
import "github.com/deroproject/derohe/cryptography/crypto" import "github.com/deroproject/derohe/cryptography/crypto"
//import "github.com/deroproject/derohe/crypto/ringct" //import "github.com/deroproject/derohe/crypto/ringct"
@ -121,6 +121,7 @@ func (w *Wallet_Memory) PoolTransfer(transfers []rpc.Transfer, scdata rpc.Argume
entry.Transfer_Everything = transfer_all entry.Transfer_Everything = transfer_all
entry.Trigger_Height = int64(w.Daemon_Height) entry.Trigger_Height = int64(w.Daemon_Height)
defer w.processPool(false)
w.account.Lock() w.account.Lock()
defer w.account.Unlock() defer w.account.Unlock()
w.account.Pool = append(w.account.Pool, entry) w.account.Pool = append(w.account.Pool, entry)
@ -219,9 +220,9 @@ func (w *Wallet_Memory) processPool(checkonly bool) error {
return fmt.Errorf("gettransa rpc failed err %s", err) return fmt.Errorf("gettransa rpc failed err %s", err)
} }
if tx_result.Txs_as_hex[0] == "" { if tx_result.Txs_as_hex[0] == "" { // we need to work this code better
try.Status = "Lost (not in mempool/chain), Waiting for more blocks to retry" try.Status = "Lost (not in mempool/chain), Waiting for more blocks to retry"
if try.Height < (info.StableHeight + 1) { // we have attempted, lets wait some blocks, this needs to be optimized, for instant transfer if try.Height + 3*config.BLOCK_BATCH_SIZE < (info.StableHeight + 1) { // we have attempted, lets wait some blocks, this needs to be optimized, for instant transfer
// we need to send this tx again now // we need to send this tx again now
}else{ }else{
continue // try other txs continue // try other txs
@ -229,18 +230,27 @@ func (w *Wallet_Memory) processPool(checkonly bool) error {
} else if tx_result.Txs[0].In_pool { } else if tx_result.Txs[0].In_pool {
try.Status = "TX in Mempool" try.Status = "TX in Mempool"
continue continue
} else if tx_result.Txs[0].ValidBlock != "" { // if the result is valid in one of the blocks } else if len(tx_result.Txs[0].MinedBlock) >= 1 { // if the result tx has been mined in one of the blocks
try.Status = fmt.Sprintf("Mined in %s (%d confirmations)", tx_result.Txs[0].ValidBlock, info.TopoHeight-tx_result.Txs[0].Block_Height)
if try.Height < (info.StableHeight + 1) { // successful confirmation
var bl_result rpc.GetBlockHeaderByHeight_Result
// Issue a call with a response.
if err := rpc_client.Call("DERO.GetBlockHeaderByHash", rpc.GetBlockHeaderByHash_Params{Hash: tx_result.Txs[0].MinedBlock[0]}, &bl_result); err != nil {
rlog.Errorf("GetBlockHeaderByTopoHeight Call failed: %v", err)
return err
}
try.Status = fmt.Sprintf("Mined in %s (%d confirmations)", tx_result.Txs[0].MinedBlock[0], info.TopoHeight-bl_result.Block_Header.TopoHeight)
if try.Height + 2*config.BLOCK_BATCH_SIZE < (info.StableHeight + 1) { // successful confirmation
w.account.PoolHistory = append(w.account.PoolHistory, w.account.Pool[i]) w.account.PoolHistory = append(w.account.PoolHistory, w.account.Pool[i])
rlog.Infof("tx %s confirmed successfully at stableheight %d height %d trigger_height %d\n", try.TXID.String(), info.StableHeight, try.Height, w.account.Pool[i].Trigger_Height) rlog.Infof("tx %s confirmed successfully at stableheight %d height %d trigger_height %d\n", try.TXID.String(), info.StableHeight, try.Height, w.account.Pool[i].Trigger_Height)
w.account.Pool = append(w.account.Pool[:i], w.account.Pool[i+1:]...) w.account.Pool = append(w.account.Pool[:i], w.account.Pool[i+1:]...)
i-- // so another element at same place gets used i-- // so another element at same place gets used
} }
continue continue
} else { } else { // tx may not be in pool and has not been mined, so we wait for couple of blocks
try.Status = fmt.Sprintf("Mined in sideblock (%d confirmations, waiting for more blocks)", info.Height-try.Height) try.Status = fmt.Sprintf("unknown state (waiting for some more blocks)")
if try.Height < (info.StableHeight + 1) { // we have attempted, lets wait some blocks, this needs to be optimized, for instant transfer if try.Height + 2*config.BLOCK_BATCH_SIZE < (info.StableHeight + 1) { // we have attempted, lets wait some blocks, this needs to be optimized, for instant transfer
// we need to send this tx again now // we need to send this tx again now
}else{ }else{
continue // try other txs continue // try other txs

View File

@ -23,7 +23,7 @@ import "fmt"
//import cryptorand "crypto/rand" //import cryptorand "crypto/rand"
//import "encoding/binary" //import "encoding/binary"
import "encoding/hex" //import "encoding/hex"
//import "encoding/json" //import "encoding/json"
@ -153,7 +153,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
var rings [][]*bn256.G1 var rings [][]*bn256.G1
var max_bits_array []int var max_bits_array []int
_, self_e, _ := w.GetEncryptedBalanceAtTopoHeight(transfers[0].SCID, -1, w.GetAddress().String()) _, self_e,height,topoheight,treehash, err := w.GetEncryptedBalanceAtTopoHeight(transfers[0].SCID, rpc.RECENT_BATCH_BLOCK, w.GetAddress().String())
if err != nil { if err != nil {
fmt.Printf("self unregistered err %s\n", err) fmt.Printf("self unregistered err %s\n", err)
return return
@ -162,17 +162,9 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
// WaitNewHeightBlock() // wait till a new block at new height is found // WaitNewHeightBlock() // wait till a new block at new height is found
// due to this we weill dispatch a new tx immediate after a block is found for better propagation // due to this we weill dispatch a new tx immediate after a block is found for better propagation
height := w.Daemon_Height // height := w.Daemon_Height
treehash := w.Merkle_Balance_TreeHash // treehash := w.Merkle_Balance_TreeHash
treehash_raw, err := hex.DecodeString(treehash)
if err != nil {
return
}
if len(treehash_raw) != 32 {
err = fmt.Errorf("roothash is not of 32 bytes, probably daemon corruption '%s'", treehash)
return
}
for t := range transfers { for t := range transfers {
@ -187,7 +179,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
bits_needed := make([]int, ringsize, ringsize) bits_needed := make([]int, ringsize, ringsize)
bits_needed[0], self_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, -1, w.GetAddress().String()) bits_needed[0], self_e,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, int64(topoheight), w.GetAddress().String())
if err != nil { if err != nil {
fmt.Printf("self unregistered err %s\n", err) fmt.Printf("self unregistered err %s\n", err)
return return
@ -201,7 +193,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
return return
} }
var dest_e *crypto.ElGamal var dest_e *crypto.ElGamal
bits_needed[1], dest_e, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, -1, addr.String()) bits_needed[1], dest_e,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, int64(topoheight), addr.String())
if err != nil { if err != nil {
fmt.Printf(" t %d unregistered1 '%s' %s\n", t, addr, err) fmt.Printf(" t %d unregistered1 '%s' %s\n", t, addr, err)
return return
@ -224,7 +216,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
// fmt.Printf("%s receiver %s sender %s\n", k, receiver_without_payment_id.String(), w.GetAddress().String()) // fmt.Printf("%s receiver %s sender %s\n", k, receiver_without_payment_id.String(), w.GetAddress().String())
var ebal *crypto.ElGamal var ebal *crypto.ElGamal
var addr *rpc.Address var addr *rpc.Address
bits_needed[len(ring_members_keys)], ebal, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, -1, k) bits_needed[len(ring_members_keys)], ebal,_,_,_, err = w.GetEncryptedBalanceAtTopoHeight(transfers[t].SCID, int64(topoheight), k)
if err != nil { if err != nil {
fmt.Printf(" unregistered %s\n", k) fmt.Printf(" unregistered %s\n", k)
return return
@ -262,7 +254,7 @@ func (w *Wallet_Memory) TransferPayload0(transfers []rpc.Transfer, transfer_all
if !dry_run { if !dry_run {
rlog.Debugf("we should build a TX now") rlog.Debugf("we should build a TX now")
tx = w.BuildTransaction(transfers, emap, rings, height, scdata, treehash_raw, max_bits_array) tx = w.BuildTransaction(transfers, emap, rings, height, scdata, treehash[:], max_bits_array)
} }
return return