Partial merge mining template support for p2pool merge-mining upcoming changes
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
DataHoarder 2024-04-09 18:30:37 +02:00
parent a79fa9f9f2
commit d85464b4e2
Signed by: DataHoarder
SSH key fingerprint: SHA256:OLTRf6Fl87G52SiR7sWLGNzlJt4WOX+tfI2yxo0z7xk
23 changed files with 873 additions and 598 deletions

252
merge_mining/client.go Normal file
View file

@ -0,0 +1,252 @@
package merge_mining
import (
"bytes"
"errors"
"fmt"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/crypto"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"io"
"net/http"
"net/url"
)
type Client interface {
GetChainId() (id types.Hash, err error)
GetJob(chainAddress string, auxiliaryHash types.Hash, height uint64, prevId types.Hash) (job AuxiliaryJob, same bool, err error)
SubmitSolution(job AuxiliaryJob, blob []byte, proof crypto.MerkleProof) (status string, err error)
}
type GenericClient struct {
Address *url.URL
Client *http.Client
}
func NewGenericClient(address string, client *http.Client) (*GenericClient, error) {
uri, err := url.Parse(address)
if err != nil {
return nil, err
}
if client == nil {
client = http.DefaultClient
}
return &GenericClient{
Address: uri,
Client: client,
}, nil
}
type RPCJSON struct {
JSONRPC string `json:"jsonrpc"`
Id string `json:"id"`
Method string `json:"method"`
Params any `json:"params,omitempty"`
}
type MergeMiningGetChainIdResult struct {
Result struct {
ChainID types.Hash `json:"chain_id"`
} `json:"result"`
Error string `json:"error"`
}
type MergeMiningGetJobJSON struct {
// Address A wallet address on the merge mined chain
Address string `json:"address"`
// AuxiliaryHash Merge mining job that is currently being used
AuxiliaryHash types.Hash `json:"aux_hash"`
// Height Monero height
Height uint64 `json:"height"`
// PreviousId Hash of the previous Monero block
PreviousId types.Hash `json:"prev_id"`
}
type MergeMiningGetJobResult struct {
Result AuxiliaryJob `json:"result"`
Error string `json:"error"`
}
type MergeMiningSubmitSolutionJSON struct {
// AuxiliaryBlob blob of data returned by merge_mining_get_job.
AuxiliaryBlob types.Bytes `json:"aux_blob"`
// AuxiliaryHash A 32-byte hex-encoded hash of the aux_blob - the same value that was returned by merge_mining_get_job.
AuxiliaryHash types.Hash `json:"aux_hash"`
// Blob Monero block template that has enough PoW to satisfy difficulty returned by merge_mining_get_job.
// It also must have a merge mining tag in tx_extra of the coinbase transaction.
Blob types.Bytes `json:"blob"`
// MerkleProof A proof that aux_hash was included when calculating Merkle root hash from the merge mining tag
MerkleProof crypto.MerkleProof `json:"merkle_proof"`
}
type MergeMiningSubmitSolutionResult struct {
Result struct {
Status string `json:"status"`
} `json:"result"`
Error string `json:"error"`
}
func (c *GenericClient) GetChainId() (id types.Hash, err error) {
data, err := utils.MarshalJSON(RPCJSON{
JSONRPC: "2.0",
Id: "0",
Method: "merge_mining_get_chain_id",
})
if err != nil {
return types.ZeroHash, err
}
response, err := c.Client.Do(&http.Request{
Method: "POST",
URL: c.Address,
Header: http.Header{
"Content-Type": []string{"application/json-rpc"},
},
Body: io.NopCloser(bytes.NewBuffer(data)),
ContentLength: int64(len(data)),
})
if err != nil {
return types.ZeroHash, err
}
defer io.ReadAll(response.Body)
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return types.ZeroHash, fmt.Errorf("unexpected status code: %d", response.StatusCode)
}
resultJSON, err := io.ReadAll(response.Body)
if err != nil {
return types.ZeroHash, err
}
var result MergeMiningGetChainIdResult
if err := utils.UnmarshalJSON(resultJSON, &result); err != nil {
return types.ZeroHash, err
}
if result.Error != "" {
return types.ZeroHash, errors.New(result.Error)
}
return result.Result.ChainID, nil
}
func (c *GenericClient) GetJob(chainAddress string, auxiliaryHash types.Hash, height uint64, prevId types.Hash) (job AuxiliaryJob, same bool, err error) {
data, err := utils.MarshalJSON(RPCJSON{
JSONRPC: "2.0",
Id: "0",
Method: "merge_mining_get_job",
Params: MergeMiningGetJobJSON{
Address: chainAddress,
AuxiliaryHash: auxiliaryHash,
Height: height,
PreviousId: prevId,
},
})
if err != nil {
return AuxiliaryJob{}, false, err
}
response, err := c.Client.Do(&http.Request{
Method: "POST",
URL: c.Address,
Header: http.Header{
"Content-Type": []string{"application/json-rpc"},
},
Body: io.NopCloser(bytes.NewBuffer(data)),
ContentLength: int64(len(data)),
})
if err != nil {
return AuxiliaryJob{}, false, err
}
defer io.ReadAll(response.Body)
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return AuxiliaryJob{}, false, fmt.Errorf("unexpected status code: %d", response.StatusCode)
}
resultJSON, err := io.ReadAll(response.Body)
if err != nil {
return AuxiliaryJob{}, false, err
}
var result MergeMiningGetJobResult
if err := utils.UnmarshalJSON(resultJSON, &result); err != nil {
return AuxiliaryJob{}, false, err
}
if result.Error != "" {
return AuxiliaryJob{}, false, errors.New(result.Error)
}
// If aux_hash is the same as in the request, all other fields will be ignored by P2Pool, so they don't have to be included in the response.
if result.Result.Hash == auxiliaryHash {
return AuxiliaryJob{}, true, nil
}
// Moreover, empty response will be interpreted as a response having the same aux_hash as in the request. This enables an efficient polling.
// TODO: properly check for emptiness
if result.Result.Hash == types.ZeroHash {
return AuxiliaryJob{}, true, nil
}
return result.Result, false, nil
}
func (c *GenericClient) SubmitSolution(job AuxiliaryJob, blob []byte, proof crypto.MerkleProof) (status string, err error) {
data, err := utils.MarshalJSON(RPCJSON{
JSONRPC: "2.0",
Id: "0",
Method: "merge_mining_submit_solution",
Params: MergeMiningSubmitSolutionJSON{
AuxiliaryBlob: job.Blob,
AuxiliaryHash: job.Hash,
Blob: blob,
MerkleProof: proof,
},
})
if err != nil {
return "", err
}
response, err := c.Client.Do(&http.Request{
Method: "POST",
URL: c.Address,
Header: http.Header{
"Content-Type": []string{"application/json-rpc"},
},
Body: io.NopCloser(bytes.NewBuffer(data)),
ContentLength: int64(len(data)),
})
if err != nil {
return "", err
}
defer io.ReadAll(response.Body)
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %d", response.StatusCode)
}
resultJSON, err := io.ReadAll(response.Body)
if err != nil {
return "", err
}
var result MergeMiningSubmitSolutionResult
if err := utils.UnmarshalJSON(resultJSON, &result); err != nil {
return "", err
}
if result.Error != "" {
return "", errors.New(result.Error)
}
return result.Result.Status, nil
}

9
merge_mining/job.go Normal file
View file

@ -0,0 +1,9 @@
package merge_mining
import "git.gammaspectra.live/P2Pool/consensus/v3/types"
type AuxiliaryJob struct {
Hash types.Hash `json:"aux_hash"`
Blob types.Bytes `json:"aux_blob"`
Difficulty types.Difficulty `json:"aux_diff"`
}

99
merge_mining/tag.go Normal file
View file

@ -0,0 +1,99 @@
package merge_mining
import (
"crypto/sha256"
"encoding/binary"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"io"
)
type Tag struct {
NumberAuxiliaryChains uint32
Nonce uint32
RootHash types.Hash
}
// FromReader Decodes the merge mining tag located in coinbase transaction
// Format according to https://github.com/SChernykh/p2pool/blob/e6b8292d5b59692921af23613456674ccab4958b/docs/MERGE_MINING.MD
func (t *Tag) FromReader(reader utils.ReaderAndByteReader) error {
merkleTreeData, err := binary.ReadUvarint(reader)
if err != nil {
return err
}
k := uint32(merkleTreeData)
n := 1 + (k & 7)
t.NumberAuxiliaryChains = 1 + ((k >> 3) & ((1 << n) - 1))
t.Nonce = uint32(merkleTreeData >> (3 + n))
if _, err = io.ReadFull(reader, t.RootHash[:]); err != nil {
return err
}
return nil
}
func (t *Tag) MarshalBinary() (buf []byte, err error) {
nBits := uint32(1)
for (1<<nBits) >= t.NumberAuxiliaryChains && nBits < 8 {
nBits++
}
merkleTreeData := (uint64(nBits) - 1) | (uint64(t.NumberAuxiliaryChains-1) << 3) | (uint64(t.Nonce) << (3 + nBits))
buf = make([]byte, utils.UVarInt64Size(merkleTreeData)+types.HashSize)
n := binary.PutUvarint(buf, merkleTreeData)
copy(buf[n:], t.RootHash[:])
return buf, nil
}
// GetAuxiliarySlot Gets the slot for an auxiliary chain
func GetAuxiliarySlot(id types.Hash, nonce, numberAuxiliaryChains uint32) (auxiliarySlot uint32) {
if numberAuxiliaryChains <= 1 {
return 0
}
const HashKeyMergeMineSlot = 'm'
var buf [types.HashSize + 4 + 1]byte
copy(buf[:], id[:])
binary.LittleEndian.PutUint32(buf[types.HashSize:], nonce)
buf[types.HashSize+4] = HashKeyMergeMineSlot
//todo: optimize sha256
result := sha256.Sum256(buf[:])
return binary.LittleEndian.Uint32(result[:]) % numberAuxiliaryChains
}
func FindAuxiliaryNonce(auxId []types.Hash, maxNonce uint32) (nonce uint32, ok bool) {
numberAuxiliaryChains := uint32(len(auxId))
if numberAuxiliaryChains <= 1 {
return 0, true
}
slots := make([]bool, numberAuxiliaryChains)
for i := uint32(0); ; i++ {
clear(slots)
var j uint32
for ; j < numberAuxiliaryChains; j++ {
k := GetAuxiliarySlot(auxId[j], i, numberAuxiliaryChains)
if slots[k] {
break
}
slots[k] = true
}
if j >= numberAuxiliaryChains {
return i, true
}
if i == maxNonce {
return 0, false
}
}
}

View file

@ -47,7 +47,7 @@ type Header struct {
}
func (b *Block) MarshalBinary() (buf []byte, err error) {
return b.MarshalBinaryFlags(false, false)
return b.MarshalBinaryFlags(false, false, false)
}
func (b *Block) BufferLength() int {
@ -59,11 +59,11 @@ func (b *Block) BufferLength() int {
utils.UVarInt64Size(len(b.Transactions)) + types.HashSize*len(b.Transactions)
}
func (b *Block) MarshalBinaryFlags(pruned, compact bool) (buf []byte, err error) {
return b.AppendBinaryFlags(make([]byte, 0, b.BufferLength()), pruned, compact)
func (b *Block) MarshalBinaryFlags(compact, pruned, containsAuxiliaryTemplateId bool) (buf []byte, err error) {
return b.AppendBinaryFlags(make([]byte, 0, b.BufferLength()), pruned, compact, containsAuxiliaryTemplateId)
}
func (b *Block) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bool) (buf []byte, err error) {
func (b *Block) AppendBinaryFlags(preAllocatedBuf []byte, compact, pruned, containsAuxiliaryTemplateId bool) (buf []byte, err error) {
buf = preAllocatedBuf
buf = append(buf, b.MajorVersion)
if b.MajorVersion > monero.HardForkSupportedVersion {
@ -77,7 +77,7 @@ func (b *Block) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bool)
buf = append(buf, b.PreviousId[:]...)
buf = binary.LittleEndian.AppendUint32(buf, b.Nonce)
if buf, err = b.Coinbase.AppendBinaryFlags(buf, pruned); err != nil {
if buf, err = b.Coinbase.AppendBinaryFlags(buf, pruned, containsAuxiliaryTemplateId); err != nil {
return nil, err
}
@ -100,20 +100,22 @@ func (b *Block) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bool)
return buf, nil
}
func (b *Block) FromReader(reader utils.ReaderAndByteReader) (err error) {
return b.FromReaderFlags(reader, false)
type PrunedFlagsFunc func() (containsAuxiliaryTemplateId bool)
func (b *Block) FromReader(reader utils.ReaderAndByteReader, canBePruned bool, f PrunedFlagsFunc) (err error) {
return b.FromReaderFlags(reader, false, canBePruned, f)
}
func (b *Block) FromCompactReader(reader utils.ReaderAndByteReader) (err error) {
return b.FromReaderFlags(reader, true)
func (b *Block) FromCompactReader(reader utils.ReaderAndByteReader, canBePruned bool, f PrunedFlagsFunc) (err error) {
return b.FromReaderFlags(reader, true, canBePruned, f)
}
func (b *Block) UnmarshalBinary(data []byte) error {
func (b *Block) UnmarshalBinary(data []byte, canBePruned bool, f PrunedFlagsFunc) error {
reader := bytes.NewReader(data)
return b.FromReader(reader)
return b.FromReader(reader, canBePruned, f)
}
func (b *Block) FromReaderFlags(reader utils.ReaderAndByteReader, compact bool) (err error) {
func (b *Block) FromReaderFlags(reader utils.ReaderAndByteReader, compact, canBePruned bool, f PrunedFlagsFunc) (err error) {
var (
txCount uint64
transactionHash types.Hash
@ -138,9 +140,15 @@ func (b *Block) FromReaderFlags(reader utils.ReaderAndByteReader, compact bool)
return err
}
var containsAuxiliaryTemplateId bool
if canBePruned && f != nil {
containsAuxiliaryTemplateId = f()
}
// Coinbase Tx Decoding
{
if err = b.Coinbase.FromReader(reader); err != nil {
if err = b.Coinbase.FromReader(reader, canBePruned, containsAuxiliaryTemplateId); err != nil {
return err
}
}
@ -200,7 +208,7 @@ func (b *Block) Header() *Header {
PreviousId: b.PreviousId,
Height: b.Coinbase.GenHeight,
Nonce: b.Nonce,
Reward: b.Coinbase.TotalReward,
Reward: b.Coinbase.AuxiliaryData.TotalReward,
Id: b.Id(),
Difficulty: types.ZeroDifficulty,
}

View file

@ -136,7 +136,7 @@ func (c *Client) GetCoinbaseTransaction(txId types.Hash) (*transaction.CoinbaseT
}
tx := &transaction.CoinbaseTransaction{}
if err = tx.UnmarshalBinary(result.Txs[0].PrunedAsHex); err != nil {
if err = tx.UnmarshalBinary(result.Txs[0].PrunedAsHex, false, false); err != nil {
return nil, err
}

View file

@ -8,17 +8,17 @@ import (
type BinaryTreeHash []types.Hash
func (t BinaryTreeHash) leafHash(hasher *sha3.HasherState) (rootHash types.Hash) {
switch len(t) {
func leafHash(data []types.Hash, hasher *sha3.HasherState) (rootHash types.Hash) {
switch len(data) {
case 0:
panic("unsupported length")
case 1:
return t[0]
return data[0]
default:
//only hash the next two items
hasher.Reset()
_, _ = hasher.Write(t[0][:])
_, _ = hasher.Write(t[1][:])
_, _ = hasher.Write(data[0][:])
_, _ = hasher.Write(data[1][:])
HashFastSum(hasher, rootHash[:])
return rootHash
}
@ -26,12 +26,12 @@ func (t BinaryTreeHash) leafHash(hasher *sha3.HasherState) (rootHash types.Hash)
// RootHash Calculates the Merkle root hash of the tree
func (t BinaryTreeHash) RootHash() (rootHash types.Hash) {
hasher := GetKeccak256Hasher()
defer PutKeccak256Hasher(hasher)
count := len(t)
if count <= 2 {
return t.leafHash(hasher)
return leafHash(t, hasher)
}
pow2cnt := utils.PreviousPowerOfTwo(uint64(count))
@ -45,29 +45,29 @@ func (t BinaryTreeHash) RootHash() (rootHash types.Hash) {
offsetTree := temporaryTree[offset:]
for i := range offsetTree {
offsetTree[i] = t[offset+i*2:].leafHash(hasher)
offsetTree[i] = leafHash(t[offset+i*2:], hasher)
}
for pow2cnt >>= 1; pow2cnt > 1; pow2cnt >>= 1 {
for i := range temporaryTree[:pow2cnt] {
temporaryTree[i] = temporaryTree[i*2:].leafHash(hasher)
temporaryTree[i] = leafHash(temporaryTree[i*2:], hasher)
}
}
rootHash = temporaryTree.leafHash(hasher)
rootHash = leafHash(temporaryTree, hasher)
return
}
func (t BinaryTreeHash) MainBranch() (mainBranch []types.Hash) {
hasher := GetKeccak256Hasher()
defer PutKeccak256Hasher(hasher)
count := len(t)
if count <= 2 {
return nil
}
hasher := GetKeccak256Hasher()
defer PutKeccak256Hasher(hasher)
pow2cnt := utils.PreviousPowerOfTwo(uint64(count))
offset := pow2cnt*2 - count
@ -80,7 +80,7 @@ func (t BinaryTreeHash) MainBranch() (mainBranch []types.Hash) {
if (offset + i*2) == 0 {
mainBranch = append(mainBranch, t[1])
}
offsetTree[i] = t[offset+i*2:].leafHash(hasher)
offsetTree[i] = leafHash(t[offset+i*2:], hasher)
}
for pow2cnt >>= 1; pow2cnt > 1; pow2cnt >>= 1 {
@ -89,7 +89,7 @@ func (t BinaryTreeHash) MainBranch() (mainBranch []types.Hash) {
mainBranch = append(mainBranch, temporaryTree[1])
}
temporaryTree[i] = temporaryTree[i*2:].leafHash(hasher)
temporaryTree[i] = leafHash(temporaryTree[i*2:], hasher)
}
}
@ -97,3 +97,74 @@ func (t BinaryTreeHash) MainBranch() (mainBranch []types.Hash) {
return
}
type MerkleProof []types.Hash
func (proof MerkleProof) Verify(h types.Hash, index, count int, rootHash types.Hash) bool {
return proof.GetRoot(h, index, count) == rootHash
}
func pairHash(index int, h, p types.Hash, hasher *sha3.HasherState) (out types.Hash) {
hasher.Reset()
if index&1 > 0 {
_, _ = hasher.Write(p[:])
_, _ = hasher.Write(h[:])
} else {
_, _ = hasher.Write(h[:])
_, _ = hasher.Write(p[:])
}
HashFastSum(hasher, out[:])
return out
}
func (proof MerkleProof) GetRoot(h types.Hash, index, count int) types.Hash {
if count == 1 {
return h
}
if index >= count {
return types.ZeroHash
}
hasher := GetKeccak256Hasher()
defer PutKeccak256Hasher(hasher)
if count == 2 {
if len(proof) == 0 {
return types.ZeroHash
}
h = pairHash(index, h, proof[0], hasher)
} else {
pow2cnt := utils.PreviousPowerOfTwo(uint64(count))
k := pow2cnt*2 - count
var proofIndex int
if index >= k {
index -= k
if len(proof) == 0 {
return types.ZeroHash
}
h = pairHash(index, h, proof[0], hasher)
index = (index >> 1) + k
proofIndex = 1
}
for ; pow2cnt >= 2; proofIndex, index, pow2cnt = proofIndex+1, index>>1, pow2cnt>>1 {
if proofIndex >= len(proof) {
return types.ZeroHash
}
h = pairHash(index, h, proof[proofIndex], hasher)
}
}
return h
}

View file

@ -9,6 +9,7 @@ import (
"git.gammaspectra.live/P2Pool/consensus/v3/monero/crypto"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"io"
)
type CoinbaseTransaction struct {
@ -21,28 +22,37 @@ type CoinbaseTransaction struct {
GenHeight uint64 `json:"gen_height"`
Outputs Outputs `json:"outputs"`
Extra ExtraTags `json:"extra"`
ExtraBaseRCT uint8 `json:"extra_base_rct"`
// AuxiliaryData Used by p2pool serialized pruned blocks
AuxiliaryData CoinbaseTransactionAuxiliaryData `json:"auxiliary_data"`
}
type CoinbaseTransactionAuxiliaryData struct {
// WasPruned TODO: use this in encoding instead of flags?
WasPruned bool `json:"-"`
// OutputsBlobSize length of serialized Outputs. Used by p2pool serialized pruned blocks, filled regardless
OutputsBlobSize uint64 `json:"outputs_blob_size"`
// TotalReward amount of reward existing Outputs. Used by p2pool serialized pruned blocks, filled regardless
TotalReward uint64 `json:"total_reward"`
Extra ExtraTags `json:"extra"`
ExtraBaseRCT uint8 `json:"extra_base_rct"`
// TemplateId Required by sidechain.GetOutputs to speed up repeated broadcasts from different peers
TemplateId types.Hash `json:"template_id,omitempty"`
}
func (c *CoinbaseTransaction) UnmarshalBinary(data []byte) error {
func (c *CoinbaseTransaction) UnmarshalBinary(data []byte, canBePruned, containsAuxiliaryTemplateId bool) error {
reader := bytes.NewReader(data)
return c.FromReader(reader)
return c.FromReader(reader, canBePruned, containsAuxiliaryTemplateId)
}
func (c *CoinbaseTransaction) FromReader(reader utils.ReaderAndByteReader) (err error) {
func (c *CoinbaseTransaction) FromReader(reader utils.ReaderAndByteReader, canBePruned, containsAuxiliaryTemplateId bool) (err error) {
var (
txExtraSize uint64
)
c.TotalReward = 0
c.OutputsBlobSize = 0
c.AuxiliaryData.TotalReward = 0
c.AuxiliaryData.OutputsBlobSize = 0
if c.Version, err = reader.ReadByte(); err != nil {
return err
@ -86,26 +96,39 @@ func (c *CoinbaseTransaction) FromReader(reader utils.ReaderAndByteReader) (err
for _, o := range c.Outputs {
switch o.Type {
case TxOutToTaggedKey:
c.OutputsBlobSize += 1 + types.HashSize + 1
c.AuxiliaryData.OutputsBlobSize += 1 + types.HashSize + 1
case TxOutToKey:
c.OutputsBlobSize += 1 + types.HashSize
c.AuxiliaryData.OutputsBlobSize += 1 + types.HashSize
default:
return fmt.Errorf("unknown %d TXOUT key", o.Type)
}
c.TotalReward += o.Reward
c.AuxiliaryData.TotalReward += o.Reward
}
} else {
if !canBePruned {
return errors.New("pruned outputs not supported")
}
c.AuxiliaryData.WasPruned = true
// Outputs are not in the buffer and must be calculated from sidechain data
// We only have total reward and outputs blob size here
//special case, pruned block. outputs have to be generated from chain
if c.TotalReward, err = binary.ReadUvarint(reader); err != nil {
if c.AuxiliaryData.TotalReward, err = binary.ReadUvarint(reader); err != nil {
return err
}
if c.OutputsBlobSize, err = binary.ReadUvarint(reader); err != nil {
if c.AuxiliaryData.OutputsBlobSize, err = binary.ReadUvarint(reader); err != nil {
return err
}
if containsAuxiliaryTemplateId {
// Required by sidechain.get_outputs_blob() to speed up repeated broadcasts from different peers
if _, err = io.ReadFull(reader, c.AuxiliaryData.TemplateId[:]); err != nil {
return err
}
}
}
if txExtraSize, err = binary.ReadUvarint(reader); err != nil {
@ -134,7 +157,7 @@ func (c *CoinbaseTransaction) FromReader(reader utils.ReaderAndByteReader) (err
}
func (c *CoinbaseTransaction) MarshalBinary() ([]byte, error) {
return c.MarshalBinaryFlags(false)
return c.MarshalBinaryFlags(false, false)
}
func (c *CoinbaseTransaction) BufferLength() int {
@ -146,11 +169,11 @@ func (c *CoinbaseTransaction) BufferLength() int {
utils.UVarInt64Size(c.Extra.BufferLength()) + c.Extra.BufferLength() + 1
}
func (c *CoinbaseTransaction) MarshalBinaryFlags(pruned bool) ([]byte, error) {
return c.AppendBinaryFlags(make([]byte, 0, c.BufferLength()), pruned)
func (c *CoinbaseTransaction) MarshalBinaryFlags(pruned, containsAuxiliaryTemplateId bool) ([]byte, error) {
return c.AppendBinaryFlags(make([]byte, 0, c.BufferLength()), pruned, containsAuxiliaryTemplateId)
}
func (c *CoinbaseTransaction) AppendBinaryFlags(preAllocatedBuf []byte, pruned bool) ([]byte, error) {
func (c *CoinbaseTransaction) AppendBinaryFlags(preAllocatedBuf []byte, pruned, containsAuxiliaryTemplateId bool) ([]byte, error) {
buf := preAllocatedBuf
buf = append(buf, c.Version)
@ -162,10 +185,14 @@ func (c *CoinbaseTransaction) AppendBinaryFlags(preAllocatedBuf []byte, pruned b
if pruned {
//pruned output
buf = binary.AppendUvarint(buf, 0)
buf = binary.AppendUvarint(buf, c.TotalReward)
buf = binary.AppendUvarint(buf, c.AuxiliaryData.TotalReward)
outputs := make([]byte, 0, c.Outputs.BufferLength())
outputs, _ = c.Outputs.AppendBinary(outputs)
buf = binary.AppendUvarint(buf, uint64(len(outputs)))
if containsAuxiliaryTemplateId {
buf = append(buf, c.AuxiliaryData.TemplateId[:]...)
}
} else {
buf, _ = c.Outputs.AppendBinary(buf)
}
@ -201,7 +228,7 @@ func (c *CoinbaseTransaction) SideChainHashingBlob(preAllocatedBuf []byte, zeroT
func (c *CoinbaseTransaction) CalculateId() (hash types.Hash) {
txBytes, _ := c.AppendBinaryFlags(make([]byte, 0, c.BufferLength()), false)
txBytes, _ := c.AppendBinaryFlags(make([]byte, 0, c.BufferLength()), false, false)
return crypto.PooledKeccak256(
// remove base RCT

View file

@ -21,6 +21,7 @@ const TxExtraTagMysteriousMinergate = 0xde
const TxExtraPaddingMaxCount = 255
const TxExtraNonceMaxCount = 255
const TxExtraAdditionalPubKeysMaxCount = 4096
const TxExtraTagMergeMiningMaxCount = types.HashSize + 9
const TxExtraTemplateNonceSize = 4
@ -138,7 +139,14 @@ func (t *ExtraTag) SideChainHashingBlob(preAllocatedBuf []byte, zeroTemplateId b
buf = binary.AppendUvarint(buf, t.VarInt)
}
if zeroTemplateId && t.Tag == TxExtraTagMergeMining {
buf = append(buf, make([]byte, len(t.Data))...)
// TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
// v3 has some extra data included before hash
// serialize everything but the last hash size bytes
dataLen := max(0, len(t.Data)-types.HashSize)
buf = append(buf, t.Data[:dataLen]...)
// serialize zero hash or remaining data only
buf = append(buf, make([]byte, len(t.Data)-dataLen)...)
} else if t.Tag == TxExtraTagNonce {
b := make([]byte, len(t.Data))
//Replace only the first four bytes
@ -207,7 +215,10 @@ func (t *ExtraTag) FromReader(reader utils.ReaderAndByteReader) (err error) {
if t.VarInt, err = binary.ReadUvarint(reader); err != nil {
return err
} else {
t.Data = make([]byte, types.HashSize)
if t.VarInt > TxExtraTagMergeMiningMaxCount {
return errors.New("merge mining is too big")
}
t.Data = make([]byte, t.VarInt)
if _, err = io.ReadFull(reader, t.Data); err != nil {
return err
}

View file

@ -1,8 +1,10 @@
package mainchain
import (
"bytes"
"context"
"fmt"
"git.gammaspectra.live/P2Pool/consensus/v3/merge_mining"
mainblock "git.gammaspectra.live/P2Pool/consensus/v3/monero/block"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/client"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/client/zmq"
@ -124,16 +126,18 @@ func (c *MainChain) Listen() error {
PreviousId: fullChainMain.PrevID,
Nonce: uint32(fullChainMain.Nonce),
Coinbase: transaction.CoinbaseTransaction{
Version: uint8(fullChainMain.MinerTx.Version),
UnlockTime: uint64(fullChainMain.MinerTx.UnlockTime),
InputCount: uint8(len(fullChainMain.MinerTx.Inputs)),
InputType: transaction.TxInGen,
GenHeight: fullChainMain.MinerTx.Inputs[0].Gen.Height,
Outputs: outputs,
OutputsBlobSize: 0,
TotalReward: totalReward,
Extra: extraTags,
ExtraBaseRCT: 0,
Version: uint8(fullChainMain.MinerTx.Version),
UnlockTime: uint64(fullChainMain.MinerTx.UnlockTime),
InputCount: uint8(len(fullChainMain.MinerTx.Inputs)),
InputType: transaction.TxInGen,
GenHeight: fullChainMain.MinerTx.Inputs[0].Gen.Height,
Outputs: outputs,
Extra: extraTags,
ExtraBaseRCT: 0,
AuxiliaryData: transaction.CoinbaseTransactionAuxiliaryData{
OutputsBlobSize: 0,
TotalReward: totalReward,
},
},
Transactions: fullChainMain.TxHashes,
TransactionParentIndices: nil,
@ -223,7 +227,7 @@ func (c *MainChain) HandleMainBlock(b *mainblock.Block) {
Difficulty: types.ZeroDifficulty,
Height: b.Coinbase.GenHeight,
Timestamp: b.Timestamp,
Reward: b.Coinbase.TotalReward,
Reward: b.Coinbase.AuxiliaryData.TotalReward,
Id: b.Id(),
}
@ -248,24 +252,49 @@ func (c *MainChain) HandleMainBlock(b *mainblock.Block) {
c.updateMedianTimestamp()
}()
extraMergeMiningTag := b.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining)
if extraMergeMiningTag == nil {
return
}
sidechainHashData := extraMergeMiningTag.Data
if len(sidechainHashData) != types.HashSize {
defer c.updateTip()
mergeMineTag := b.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining)
if mergeMineTag == nil {
return
}
sidechainId := types.HashFromBytes(sidechainHashData)
shareVersion := sidechain.P2PoolShareVersion(c.sidechain.Consensus(), mainData.Timestamp)
if block := c.sidechain.GetPoolBlockByTemplateId(sidechainId); block != nil {
c.p2pool.UpdateBlockFound(mainData, block)
if shareVersion < sidechain.ShareVersion_V3 {
//TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
if mergeMineTag.VarInt != types.HashSize {
return
}
if len(mergeMineTag.Data) != types.HashSize {
return
}
sidechainId := types.HashFromBytes(mergeMineTag.Data)
if block := c.sidechain.GetPoolBlockByTemplateId(sidechainId); block != nil {
c.p2pool.UpdateBlockFound(mainData, block)
} else {
c.sidechain.WatchMainChainBlock(mainData, sidechainId)
}
} else {
c.sidechain.WatchMainChainBlock(mainData, sidechainId)
}
//properly decode merge mining tag
mergeMineReader := bytes.NewReader(mergeMineTag.Data)
var mergeMiningTag merge_mining.Tag
if err := mergeMiningTag.FromReader(mergeMineReader); err != nil {
return
}
if mergeMineReader.Len() != 0 {
return
}
c.updateTip()
if block := c.sidechain.GetPoolBlockByTemplateId(mergeMiningTag.RootHash); block != nil {
c.p2pool.UpdateBlockFound(mainData, block)
} else {
c.sidechain.WatchMainChainBlock(mainData, mergeMiningTag.RootHash)
}
}
}
func (c *MainChain) GetChainMainByHeight(height uint64) *sidechain.ChainMain {

View file

@ -513,7 +513,8 @@ func (c *Client) OnConnection() {
c.Ban(DefaultBanTime, err)
return
} else {
tipHash := types.HashFromBytes(block.CoinbaseExtra(sidechain.SideTemplateId))
tipHash := block.FastSideTemplateId(c.Owner.Consensus())
if isChainTipBlockRequest {
if lastTip := c.LastKnownTip.Load(); lastTip == nil || lastTip.Side.Height <= block.Side.Height {
if _, err = c.Owner.SideChain().PreprocessBlock(block); err == nil {
@ -545,7 +546,7 @@ func (c *Client) OnConnection() {
}
}
if c.Owner.SideChain().BlockSeen(block) {
//utils.Logf("P2PClient", "Peer %s block id = %s, height = %d (nonce %d, extra_nonce %d) was received before, skipping it", c.AddressPort.String(), types.HashFromBytes(block.CoinbaseExtra(sidechain.SideTemplateId)), block.Side.Height, block.Main.Nonce, block.ExtraNonce())
//utils.Logf("P2PClient", "Peer %s block id = %s, height = %d (nonce %d, extra_nonce %d) was received before, skipping it", c.AddressPort.String(), types.HashFromBytes(block.CoinbaseExtra(sidechain.SideIdentifierHash)), block.Side.Height, block.Main.Nonce, block.ExtraNonce())
break
}
if missingBlocks, err, ban := c.Owner.SideChain().AddPoolBlockExternal(block); err != nil {
@ -605,12 +606,6 @@ func (c *Client) OnConnection() {
}
}
tipHash := types.HashFromBytes(block.CoinbaseExtra(sidechain.SideTemplateId))
c.BroadcastedHashes.Push(tipHash)
c.LastBroadcastTimestamp.Store(uint64(time.Now().Unix()))
//utils.Logf("P2PClient", "Peer %s broadcast tip is at id = %s, height = %d, main height = %d", c.AddressPort.String(), tipHash, block.Side.Height, block.Main.Coinbase.GenHeight)
if missingBlocks, err := c.Owner.SideChain().PreprocessBlock(block); err != nil {
@ -620,6 +615,12 @@ func (c *Client) OnConnection() {
//TODO: ban here, but sort blocks properly, maybe a queue to re-try?
break
} else {
tipHash := block.FastSideTemplateId(c.Owner.Consensus())
c.BroadcastedHashes.Push(tipHash)
c.LastBroadcastTimestamp.Store(uint64(time.Now().Unix()))
if lastTip := c.LastKnownTip.Load(); lastTip == nil || lastTip.Side.Height <= block.Side.Height {
c.LastKnownTip.Store(block)
}
@ -652,7 +653,7 @@ func (c *Client) OnConnection() {
}
if c.Owner.SideChain().BlockSeen(block) {
//utils.Logf("P2PClient", "Peer %s block id = %s, height = %d (nonce %d, extra_nonce %d) was received before, skipping it", c.AddressPort.String(), types.HashFromBytes(block.CoinbaseExtra(sidechain.SideTemplateId)), block.Side.Height, block.Main.Nonce, block.ExtraNonce())
//utils.Logf("P2PClient", "Peer %s block id = %s, height = %d (nonce %d, extra_nonce %d) was received before, skipping it", c.AddressPort.String(), types.HashFromBytes(block.CoinbaseExtra(sidechain.SideIdentifierHash)), block.Side.Height, block.Main.Nonce, block.ExtraNonce())
break
}

View file

@ -1,284 +0,0 @@
package sidechain
import (
"bytes"
"encoding/binary"
"git.gammaspectra.live/P2Pool/consensus/v3/monero"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/address"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"slices"
)
// BlockSaveEpochSize could be up to 256?
const BlockSaveEpochSize = 32
const (
BlockSaveOptionTemplate = 1 << 0
BlockSaveOptionDeterministicPrivateKeySeed = 1 << 1
BlockSaveOptionDeterministicBlobs = 1 << 2
BlockSaveOptionUncles = 1 << 3
BlockSaveFieldSizeInBits = 8
BlockSaveOffsetAddress = BlockSaveFieldSizeInBits
BlockSaveOffsetMainFields = BlockSaveFieldSizeInBits * 2
)
func (c *SideChain) uncompressedBlockId(block *PoolBlock) []byte {
templateId := block.SideTemplateId(c.Consensus())
buf := make([]byte, 0, 4+len(templateId))
return append([]byte("RAW\x00"), buf...)
}
func (c *SideChain) compressedBlockId(block *PoolBlock) []byte {
templateId := block.SideTemplateId(c.Consensus())
buf := make([]byte, 0, 4+len(templateId))
return append([]byte("PAK\x00"), buf...)
}
func (c *SideChain) saveBlock(block *PoolBlock) {
go func() {
c.server.Store(block)
return
//TODO: make this a worker with a queue?
if !block.Verified.Load() || block.Invalid.Load() {
blob, _ := block.MarshalBinary()
if err := c.server.SetBlob(c.uncompressedBlockId(block), blob); err != nil {
utils.Errorf("", "error saving %s: %s", block.SideTemplateId(c.Consensus()).String(), err.Error())
}
return
}
if block.Depth.Load() >= c.Consensus().ChainWindowSize {
//TODO: check for compressed blob existence before saving uncompressed
blob, _ := block.MarshalBinary()
if err := c.server.SetBlob(c.uncompressedBlockId(block), blob); err != nil {
utils.Errorf("", "error saving %s: %s", block.SideTemplateId(c.Consensus()).String(), err.Error())
}
return
}
c.sidechainLock.RLock()
defer c.sidechainLock.RUnlock()
calculatedOutputs, _ := c.calculateOutputs(block)
calcBlob, _ := calculatedOutputs.MarshalBinary()
blockBlob, _ := block.Main.Coinbase.Outputs.MarshalBinary()
storeBlob := bytes.Compare(calcBlob, blockBlob) != 0
fullBlockTemplateHeight := block.Side.Height - (block.Side.Height % BlockSaveEpochSize)
minerAddressOffset := uint64(0)
mainFieldsOffset := uint64(0)
parent := c.getParent(block)
//only store keys when not deterministic
isDeterministicPrivateKeySeed := parent != nil && c.isPoolBlockTransactionKeyIsDeterministic(block)
if isDeterministicPrivateKeySeed && block.ShareVersion() > ShareVersion_V1 {
expectedSeed := parent.Side.CoinbasePrivateKeySeed
if parent.Main.PreviousId != block.Main.PreviousId {
expectedSeed = parent.CalculateTransactionPrivateKeySeed()
}
if block.Side.CoinbasePrivateKeySeed != expectedSeed {
isDeterministicPrivateKeySeed = false
}
}
blob := make([]byte, 0, 4096*2)
var blockFlags uint64
if isDeterministicPrivateKeySeed {
blockFlags |= BlockSaveOptionDeterministicPrivateKeySeed
}
if !storeBlob {
blockFlags |= BlockSaveOptionDeterministicBlobs
}
transactionOffsets := make([]uint64, len(block.Main.Transactions))
transactionOffsetsStored := 0
parentTransactions := make([]types.Hash, 0, 512)
if block.Side.Height != fullBlockTemplateHeight {
tmp := parent
for offset := uint64(1); tmp != nil && offset < BlockSaveEpochSize; offset++ {
if !tmp.Verified.Load() || tmp.Invalid.Load() {
break
}
if minerAddressOffset == 0 && tmp.Side.PublicKey == block.Side.PublicKey {
minerAddressOffset = block.Side.Height - tmp.Side.Height
}
if mainFieldsOffset == 0 && tmp.Main.Coinbase.Version == block.Main.Coinbase.Version && tmp.Main.Coinbase.UnlockTime == block.Main.Coinbase.UnlockTime && tmp.Main.Coinbase.GenHeight == block.Main.Coinbase.GenHeight && tmp.Main.PreviousId == block.Main.PreviousId && tmp.Main.MajorVersion == block.Main.MajorVersion && tmp.Main.MinorVersion == block.Main.MinorVersion {
mainFieldsOffset = block.Side.Height - tmp.Side.Height
}
if transactionOffsetsStored != len(transactionOffsets) {
//store last offset to not spend time looking on already checked sections
prevLen := len(parentTransactions)
for _, txHash := range tmp.Main.Transactions {
//if it doesn't exist yet
if slices.Index(parentTransactions, txHash) == -1 {
parentTransactions = append(parentTransactions, txHash)
}
}
for tIndex, tOffset := range transactionOffsets {
if tOffset == 0 {
if foundIndex := slices.Index(parentTransactions[prevLen:], block.Main.Transactions[tIndex]); foundIndex != -1 {
transactionOffsets[tIndex] = uint64(prevLen) + uint64(foundIndex) + 1
transactionOffsetsStored++
}
}
}
}
// early exit
if tmp.Side.Height == fullBlockTemplateHeight || (transactionOffsetsStored == len(transactionOffsets) && mainFieldsOffset != 0 && minerAddressOffset != 0) {
break
}
tmp = c.getParent(tmp)
}
}
if parent == nil || block.Side.Height == fullBlockTemplateHeight { //store full blocks every once in a while, or when there is no parent block
blockFlags |= BlockSaveOptionTemplate
} else {
if minerAddressOffset > 0 {
blockFlags |= minerAddressOffset << BlockSaveOffsetAddress
}
if mainFieldsOffset > 0 {
blockFlags |= mainFieldsOffset << BlockSaveOffsetMainFields
}
}
if len(block.Side.Uncles) > 0 {
blockFlags |= BlockSaveOptionUncles
}
blob = binary.AppendUvarint(blob, blockFlags)
// side data
// miner address
if (blockFlags&BlockSaveOffsetAddress) == 0 || (blockFlags&BlockSaveOptionTemplate) != 0 {
blob = append(blob, block.Side.PublicKey[address.PackedAddressSpend][:]...)
blob = append(blob, block.Side.PublicKey[address.PackedAddressView][:]...)
} else {
blob = binary.AppendUvarint(blob, minerAddressOffset)
}
// private key seed, if needed
if (blockFlags&BlockSaveOptionDeterministicPrivateKeySeed) == 0 || (block.ShareVersion() > ShareVersion_V1 && (blockFlags&BlockSaveOptionTemplate) != 0) {
blob = append(blob, block.Side.CoinbasePrivateKeySeed[:]...)
//public may be needed on invalid - TODO check
//blob = append(blob, block.CoinbaseExtra(SideCoinbasePublicKey)...)
}
// parent
blob = append(blob, block.Side.Parent[:]...)
// uncles
if (blockFlags & BlockSaveOptionUncles) > 0 {
blob = binary.AppendUvarint(blob, uint64(len(block.Side.Uncles)))
for _, uncleId := range block.Side.Uncles {
blob = append(blob, uncleId[:]...)
}
}
//no height saved except on templates
if (blockFlags & BlockSaveOptionTemplate) != 0 {
blob = binary.AppendUvarint(blob, block.Side.Height)
}
//difficulty
if (blockFlags & BlockSaveOptionTemplate) != 0 {
blob = binary.AppendUvarint(blob, block.Side.Difficulty.Lo)
blob = binary.AppendUvarint(blob, block.Side.CumulativeDifficulty.Lo)
blob = binary.AppendUvarint(blob, block.Side.CumulativeDifficulty.Hi)
} else {
//store signed difference
blob = binary.AppendVarint(blob, int64(block.Side.Difficulty.Lo)-int64(parent.Side.Difficulty.Lo))
}
// main data
// header
if (blockFlags&BlockSaveOffsetMainFields) == 0 || (blockFlags&BlockSaveOptionTemplate) != 0 {
blob = append(blob, block.Main.MajorVersion)
blob = append(blob, block.Main.MinorVersion)
//timestamp is used as difference only
blob = binary.AppendUvarint(blob, block.Main.Timestamp)
blob = append(blob, block.Main.PreviousId[:]...)
blob = binary.LittleEndian.AppendUint32(blob, block.Main.Nonce)
} else {
blob = binary.AppendUvarint(blob, mainFieldsOffset)
//store signed difference
blob = binary.AppendVarint(blob, int64(block.Main.Timestamp)-int64(parent.Main.Timestamp))
blob = binary.LittleEndian.AppendUint32(blob, block.Main.Nonce)
}
// coinbase
if (blockFlags&BlockSaveOffsetMainFields) == 0 || (blockFlags&BlockSaveOptionTemplate) != 0 {
blob = append(blob, block.Main.Coinbase.Version)
blob = binary.AppendUvarint(blob, block.Main.Coinbase.UnlockTime)
blob = binary.AppendUvarint(blob, block.Main.Coinbase.GenHeight)
blob = binary.AppendUvarint(blob, block.Main.Coinbase.TotalReward-monero.TailEmissionReward)
blob = binary.AppendUvarint(blob, uint64(len(block.CoinbaseExtra(SideExtraNonce))))
blob = append(blob, block.CoinbaseExtra(SideExtraNonce)...)
} else {
blob = binary.AppendUvarint(blob, mainFieldsOffset)
//store signed difference with parent, not template
blob = binary.AppendVarint(blob, int64(block.Main.Timestamp)-int64(parent.Main.Timestamp))
blob = binary.LittleEndian.AppendUint32(blob, block.Main.Nonce)
blob = binary.AppendVarint(blob, int64(block.Main.Coinbase.TotalReward)-int64(parent.Main.Coinbase.TotalReward))
blob = binary.AppendUvarint(blob, uint64(len(block.CoinbaseExtra(SideExtraNonce))))
blob = append(blob, block.CoinbaseExtra(SideExtraNonce)...)
}
// coinbase blob, if needed
if (blockFlags & BlockSaveOptionDeterministicBlobs) == 0 {
blob = append(blob, blockBlob...)
}
//transactions
if (blockFlags & BlockSaveOptionTemplate) != 0 {
blob = binary.AppendUvarint(blob, uint64(len(block.Main.Transactions)))
for _, txId := range block.Main.Transactions {
blob = append(blob, txId[:]...)
}
} else {
blob = binary.AppendUvarint(blob, uint64(len(block.Main.Transactions)))
for i, v := range transactionOffsets {
blob = binary.AppendUvarint(blob, v)
if v == 0 {
blob = append(blob, block.Main.Transactions[i][:]...)
}
}
}
fullBlob, _ := block.MarshalBinary()
prunedBlob, _ := block.MarshalBinaryFlags(true, false)
compactBlob, _ := block.MarshalBinaryFlags(true, true)
if (blockFlags & BlockSaveOptionTemplate) != 0 {
utils.Logf("", "compress block (template) %s in compressed %d bytes, full %d bytes, pruned %d bytes, compact %d bytes", block.SideTemplateId(c.Consensus()).String(), len(blob), len(fullBlob), len(prunedBlob), len(compactBlob))
} else {
utils.Logf("", "compress block %s in compressed %d bytes, full %d bytes, pruned %d bytes, compact %d bytes", block.SideTemplateId(c.Consensus()).String(), len(blob), len(fullBlob), len(prunedBlob), len(compactBlob))
}
if err := c.server.SetBlob(c.compressedBlockId(block), blob); err != nil {
utils.Logf("error saving %s: %s", block.SideTemplateId(c.Consensus()).String(), err.Error())
}
}()
}

View file

@ -188,7 +188,7 @@ func (c *Consensus) verify() bool {
}
func (c *Consensus) CalculateSideTemplateId(share *PoolBlock) (result types.Hash) {
return c.CalculateSideTemplateIdPreAllocated(share, make([]byte, 0, max(share.Main.BufferLength(), share.Side.BufferLength())))
return c.CalculateSideTemplateIdPreAllocated(share, make([]byte, 0, max(share.Main.BufferLength(), share.Side.BufferLength(share.ShareVersion()))))
}
func (c *Consensus) CalculateSideTemplateIdPreAllocated(share *PoolBlock, buf []byte) (result types.Hash) {

View file

@ -148,6 +148,10 @@ func NetworkMajorVersion(consensus *Consensus, height uint64) uint8 {
return result
}
// P2PoolShareVersion
// P2Pool forks to v2 at 2023-03-18 21:00 UTC
// Different miners can have different timestamps,
// so a temporary mix of v1 and v2 blocks is allowed
func P2PoolShareVersion(consensus *Consensus, timestamp uint64) ShareVersion {
hardForks := consensus.HardForks

View file

@ -5,6 +5,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"git.gammaspectra.live/P2Pool/consensus/v3/merge_mining"
"git.gammaspectra.live/P2Pool/consensus/v3/monero"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/address"
mainblock "git.gammaspectra.live/P2Pool/consensus/v3/monero/block"
@ -15,7 +16,6 @@ import (
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
fasthex "github.com/tmthrgd/go-hex"
"io"
"slices"
"sync/atomic"
"unsafe"
@ -29,7 +29,9 @@ const SideExtraNonceMaxSize = SideExtraNonceSize + 10
const (
SideCoinbasePublicKey = transaction.TxExtraTagPubKey
SideExtraNonce = transaction.TxExtraTagNonce
SideTemplateId = transaction.TxExtraTagMergeMining
// SideIdentifierHash Depending on version, this can be a PoolBlock TemplateId or Merkle Root Hash
SideIdentifierHash = transaction.TxExtraTagMergeMining
)
// PoolBlockMaxTemplateSize Max P2P message size (128 KB) minus BLOCK_RESPONSE header (5 bytes)
@ -50,13 +52,15 @@ const (
ShareVersion_None ShareVersion = 0
ShareVersion_V1 ShareVersion = 1
ShareVersion_V2 ShareVersion = 2
// ShareVersion_V3 Tentative future version with merge mining support
ShareVersion_V3 ShareVersion = 3
)
type UniquePoolBlockSlice []*PoolBlock
func (s UniquePoolBlockSlice) Get(id types.Hash) *PoolBlock {
func (s UniquePoolBlockSlice) Get(consensus *Consensus, id types.Hash) *PoolBlock {
if i := slices.IndexFunc(s, func(p *PoolBlock) bool {
return bytes.Compare(p.CoinbaseExtra(SideTemplateId), id[:]) == 0
return p.FastSideTemplateId(consensus) == id
}); i != -1 {
return s[i]
}
@ -85,6 +89,8 @@ type PoolBlock struct {
Side SideData `json:"side"`
//Temporary data structures
mergeMiningTag merge_mining.Tag
cache poolBlockCache
Depth atomic.Uint64 `json:"-"`
Verified atomic.Bool `json:"-"`
@ -127,143 +133,13 @@ func (b *PoolBlock) iteratorUncles(getByTemplateId GetByTemplateIdFunc, uncleFun
return nil
}
// NewShareFromExportedBytes
// Deprecated
func NewShareFromExportedBytes(buf []byte, consensus *Consensus, cacheInterface DerivationCacheInterface) (*PoolBlock, error) {
b := &PoolBlock{}
if len(buf) < 32 {
return nil, errors.New("invalid block data")
}
reader := bytes.NewReader(buf)
var (
err error
version uint64
mainDataSize uint64
mainData []byte
sideDataSize uint64
sideData []byte
)
if err = binary.Read(reader, binary.BigEndian, &version); err != nil {
return nil, err
}
switch version {
case 1:
var mainId types.Hash
if _, err = io.ReadFull(reader, mainId[:]); err != nil {
return nil, err
}
var h types.Hash
// Read PoW hash
if _, err = io.ReadFull(reader, h[:]); err != nil {
return nil, err
}
var mainDifficulty types.Difficulty
if err = binary.Read(reader, binary.BigEndian, &mainDifficulty.Hi); err != nil {
return nil, err
}
if err = binary.Read(reader, binary.BigEndian, &mainDifficulty.Lo); err != nil {
return nil, err
}
mainDifficulty.ReverseBytes()
_ = mainDifficulty
if err = binary.Read(reader, binary.BigEndian, &mainDataSize); err != nil {
return nil, err
}
mainData = make([]byte, mainDataSize)
if _, err = io.ReadFull(reader, mainData); err != nil {
return nil, err
}
if err = binary.Read(reader, binary.BigEndian, &sideDataSize); err != nil {
return nil, err
}
sideData = make([]byte, sideDataSize)
if _, err = io.ReadFull(reader, sideData); err != nil {
return nil, err
}
/*
//Ignore error when unable to read peer
_ = func() error {
var peerSize uint64
if err = binary.Read(reader, binary.BigEndian, &peerSize); err != nil {
return err
}
b.Extra.Peer = make([]byte, peerSize)
if _, err = io.ReadFull(reader, b.Extra.Peer); err != nil {
return err
}
return nil
}()
*/
case 0:
if err = binary.Read(reader, binary.BigEndian, &mainDataSize); err != nil {
return nil, err
}
mainData = make([]byte, mainDataSize)
if _, err = io.ReadFull(reader, mainData); err != nil {
return nil, err
}
if sideData, err = io.ReadAll(reader); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown block version %d", version)
}
if err = b.Main.UnmarshalBinary(mainData); err != nil {
return nil, err
}
if expectedMajorVersion := NetworkMajorVersion(consensus, b.Main.Coinbase.GenHeight); expectedMajorVersion != b.Main.MajorVersion {
return nil, fmt.Errorf("expected major version %d at height %d, got %d", expectedMajorVersion, b.Main.Coinbase.GenHeight, b.Main.MajorVersion)
}
b.CachedShareVersion = b.CalculateShareVersion(consensus)
//TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
if t := b.Main.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining); t == nil || t.VarInt != 32 {
return nil, errors.New("wrong merge mining tag depth")
}
if err = b.Side.UnmarshalBinary(sideData, b.ShareVersion()); err != nil {
return nil, err
}
b.FillPrivateKeys(cacheInterface)
//zero cache as it can be wrong
b.cache.templateId.Store(nil)
b.cache.powHash.Store(nil)
return b, nil
}
func (b *PoolBlock) NeedsCompactTransactionFilling() bool {
return len(b.Main.TransactionParentIndices) > 0 && len(b.Main.TransactionParentIndices) == len(b.Main.Transactions) && slices.Index(b.Main.Transactions, types.ZeroHash) != -1
}
func (b *PoolBlock) FillTransactionsFromTransactionParentIndices(parent *PoolBlock) error {
func (b *PoolBlock) FillTransactionsFromTransactionParentIndices(consensus *Consensus, parent *PoolBlock) error {
if b.NeedsCompactTransactionFilling() {
if parent != nil && types.HashFromBytes(parent.CoinbaseExtra(SideTemplateId)) == b.Side.Parent {
if parent != nil && parent.FastSideTemplateId(consensus) == b.Side.Parent {
for i, parentIndex := range b.Main.TransactionParentIndices {
if parentIndex != 0 {
// p2pool stores coinbase transaction hash as well, decrease
@ -282,9 +158,9 @@ func (b *PoolBlock) FillTransactionsFromTransactionParentIndices(parent *PoolBlo
return nil
}
func (b *PoolBlock) FillTransactionParentIndices(parent *PoolBlock) bool {
func (b *PoolBlock) FillTransactionParentIndices(consensus *Consensus, parent *PoolBlock) bool {
if len(b.Main.Transactions) != len(b.Main.TransactionParentIndices) {
if parent != nil && types.HashFromBytes(parent.CoinbaseExtra(SideTemplateId)) == b.Side.Parent {
if parent != nil && parent.FastSideTemplateId(consensus) == b.Side.Parent {
b.Main.TransactionParentIndices = make([]uint64, len(b.Main.Transactions))
//do not fail if not found
for i, txHash := range b.Main.Transactions {
@ -303,9 +179,6 @@ func (b *PoolBlock) FillTransactionParentIndices(parent *PoolBlock) bool {
}
func (b *PoolBlock) CalculateShareVersion(consensus *Consensus) ShareVersion {
// P2Pool forks to v2 at 2023-03-18 21:00 UTC
// Different miners can have different timestamps,
// so a temporary mix of v1 and v2 blocks is allowed
return P2PoolShareVersion(consensus, b.Main.Timestamp)
}
@ -328,6 +201,20 @@ func (b *PoolBlock) ExtraNonce() uint32 {
return binary.LittleEndian.Uint32(extraNonce)
}
// FastSideTemplateId Returns SideTemplateId from either coinbase extra tags or pruned data, or main block if not pruned
func (b *PoolBlock) FastSideTemplateId(consensus *Consensus) types.Hash {
if b.ShareVersion() > ShareVersion_V2 {
if b.Main.Coinbase.AuxiliaryData.WasPruned {
return b.Main.Coinbase.AuxiliaryData.TemplateId
} else {
//fallback to full calculation
return b.SideTemplateId(consensus)
}
} else {
return types.HashFromBytes(b.CoinbaseExtra(SideIdentifierHash))
}
}
func (b *PoolBlock) CoinbaseExtra(tag CoinbaseExtraTag) []byte {
switch tag {
case SideExtraNonce:
@ -337,12 +224,22 @@ func (b *PoolBlock) CoinbaseExtra(tag CoinbaseExtraTag) []byte {
}
return t.Data
}
case SideTemplateId:
case SideIdentifierHash:
if t := b.Main.Coinbase.Extra.GetTag(uint8(tag)); t != nil {
if len(t.Data) != types.HashSize {
return nil
if b.ShareVersion() > ShareVersion_V2 {
mergeMineReader := bytes.NewReader(t.Data)
var mergeMiningTag merge_mining.Tag
if err := mergeMiningTag.FromReader(mergeMineReader); err != nil || mergeMineReader.Len() != 0 {
return nil
}
return mergeMiningTag.RootHash[:]
} else {
if t.VarInt != types.HashSize || len(t.Data) != types.HashSize {
return nil
}
return t.Data
}
return t.Data
}
case SideCoinbasePublicKey:
if t := b.Main.Coinbase.Extra.GetTag(uint8(tag)); t != nil {
@ -360,9 +257,10 @@ func (b *PoolBlock) MainId() types.Hash {
return b.Main.Id()
}
func (b *PoolBlock) FullId() FullId {
func (b *PoolBlock) FullId(consensus *Consensus) FullId {
var buf FullId
sidechainId := b.CoinbaseExtra(SideTemplateId)
sidechainId := b.FastSideTemplateId(consensus)
copy(buf[:], sidechainId[:])
binary.LittleEndian.PutUint32(buf[types.HashSize:], b.Main.Nonce)
copy(buf[types.HashSize+unsafe.Sizeof(b.Main.Nonce):], b.CoinbaseExtra(SideExtraNonce)[:SideExtraNonceSize])
@ -443,6 +341,10 @@ func (b *PoolBlock) CoinbaseId() types.Hash {
}
}
func (b *PoolBlock) MergeMiningTag() merge_mining.Tag {
return b.mergeMiningTag
}
func (b *PoolBlock) PowHash(hasher randomx.Hasher, f mainblock.GetSeedByHeightFunc) types.Hash {
h, _ := b.PowHashWithError(hasher, f)
return h
@ -471,7 +373,7 @@ func (b *PoolBlock) UnmarshalBinary(consensus *Consensus, derivationCache Deriva
}
func (b *PoolBlock) BufferLength() int {
return b.Main.BufferLength() + b.Side.BufferLength()
return b.Main.BufferLength() + b.Side.BufferLength(b.ShareVersion())
}
func (b *PoolBlock) MarshalBinary() ([]byte, error) {
@ -485,7 +387,7 @@ func (b *PoolBlock) MarshalBinaryFlags(pruned, compact bool) ([]byte, error) {
func (b *PoolBlock) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bool) (buf []byte, err error) {
buf = preAllocatedBuf
if buf, err = b.Main.AppendBinaryFlags(buf, pruned, compact); err != nil {
if buf, err = b.Main.AppendBinaryFlags(buf, compact, pruned, b.ShareVersion() > ShareVersion_V2); err != nil {
return nil, err
} else if buf, err = b.Side.AppendBinary(buf, b.ShareVersion()); err != nil {
return nil, err
@ -498,46 +400,56 @@ func (b *PoolBlock) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bo
}
func (b *PoolBlock) FromReader(consensus *Consensus, derivationCache DerivationCacheInterface, reader utils.ReaderAndByteReader) (err error) {
if err = b.Main.FromReader(reader); err != nil {
if err = b.Main.FromReader(reader, true, func() (containsAuxiliaryTemplateId bool) {
return b.CalculateShareVersion(consensus) > ShareVersion_V2
}); err != nil {
return err
}
if expectedMajorVersion := NetworkMajorVersion(consensus, b.Main.Coinbase.GenHeight); expectedMajorVersion != b.Main.MajorVersion {
return fmt.Errorf("expected major version %d at height %d, got %d", expectedMajorVersion, b.Main.Coinbase.GenHeight, b.Main.MajorVersion)
}
//TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
if t := b.Main.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining); t == nil || t.VarInt != 32 {
return errors.New("wrong merge mining tag depth")
}
b.CachedShareVersion = b.CalculateShareVersion(consensus)
if err = b.Side.FromReader(reader, b.ShareVersion()); err != nil {
return err
}
b.FillPrivateKeys(derivationCache)
return nil
return b.consensusDecode(consensus, derivationCache, reader)
}
// FromCompactReader used in Protocol 1.1 and above
func (b *PoolBlock) FromCompactReader(consensus *Consensus, derivationCache DerivationCacheInterface, reader utils.ReaderAndByteReader) (err error) {
if err = b.Main.FromCompactReader(reader); err != nil {
if err = b.Main.FromCompactReader(reader, true, func() (containsAuxiliaryTemplateId bool) {
return b.CalculateShareVersion(consensus) > ShareVersion_V2
}); err != nil {
return err
}
return b.consensusDecode(consensus, derivationCache, reader)
}
func (b *PoolBlock) consensusDecode(consensus *Consensus, derivationCache DerivationCacheInterface, reader utils.ReaderAndByteReader) (err error) {
if expectedMajorVersion := NetworkMajorVersion(consensus, b.Main.Coinbase.GenHeight); expectedMajorVersion != b.Main.MajorVersion {
return fmt.Errorf("expected major version %d at height %d, got %d", expectedMajorVersion, b.Main.Coinbase.GenHeight, b.Main.MajorVersion)
}
//TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
if t := b.Main.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining); t == nil || t.VarInt != 32 {
return errors.New("wrong merge mining tag depth")
if b.CachedShareVersion == ShareVersion_None {
b.CachedShareVersion = b.CalculateShareVersion(consensus)
}
b.CachedShareVersion = b.CalculateShareVersion(consensus)
mergeMineTag := b.Main.Coinbase.Extra.GetTag(transaction.TxExtraTagMergeMining)
if mergeMineTag == nil {
return errors.New("missing merge mining tag")
}
if b.ShareVersion() < ShareVersion_V3 {
//TODO: this is to comply with non-standard p2pool serialization, see https://github.com/SChernykh/p2pool/issues/249
if mergeMineTag.VarInt != types.HashSize {
return errors.New("wrong merge mining tag depth")
}
} else {
//properly decode merge mining tag
mergeMineReader := bytes.NewReader(mergeMineTag.Data)
if err = b.mergeMiningTag.FromReader(mergeMineReader); err != nil {
return err
}
if mergeMineReader.Len() != 0 {
return errors.New("wrong merge mining tag len")
}
}
if err = b.Side.FromReader(reader, b.ShareVersion()); err != nil {
return err
@ -550,13 +462,13 @@ func (b *PoolBlock) FromCompactReader(consensus *Consensus, derivationCache Deri
// PreProcessBlock processes and fills the block data from either pruned or compact modes
func (b *PoolBlock) PreProcessBlock(consensus *Consensus, derivationCache DerivationCacheInterface, preAllocatedShares Shares, difficultyByHeight mainblock.GetDifficultyByHeightFunc, getTemplateById GetByTemplateIdFunc) (missingBlocks []types.Hash, err error) {
return b.PreProcessBlockWithOutputs(getTemplateById, func() (outputs transaction.Outputs, bottomHeight uint64) {
return b.PreProcessBlockWithOutputs(consensus, getTemplateById, func() (outputs transaction.Outputs, bottomHeight uint64) {
return CalculateOutputs(b, consensus, difficultyByHeight, getTemplateById, derivationCache, preAllocatedShares, nil)
})
}
// PreProcessBlockWithOutputs processes and fills the block data from either pruned or compact modes
func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFunc, calculateOutputs func() (outputs transaction.Outputs, bottomHeight uint64)) (missingBlocks []types.Hash, err error) {
func (b *PoolBlock) PreProcessBlockWithOutputs(consensus *Consensus, getTemplateById GetByTemplateIdFunc, calculateOutputs func() (outputs transaction.Outputs, bottomHeight uint64)) (missingBlocks []types.Hash, err error) {
getTemplateByIdFillingTx := func(h types.Hash) *PoolBlock {
chain := make(UniquePoolBlockSlice, 0, 1)
@ -568,7 +480,7 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFu
break
}
if len(chain) > 1 {
if chain[len(chain)-2].FillTransactionsFromTransactionParentIndices(chain[len(chain)-1]) == nil {
if chain[len(chain)-2].FillTransactionsFromTransactionParentIndices(consensus, chain[len(chain)-1]) == nil {
if !chain[len(chain)-2].NeedsCompactTransactionFilling() {
//early abort if it can all be filled
chain = chain[:len(chain)-1]
@ -582,7 +494,7 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFu
}
//skips last entry
for i := len(chain) - 2; i >= 0; i-- {
if err := chain[i].FillTransactionsFromTransactionParentIndices(chain[i+1]); err != nil {
if err := chain[i].FillTransactionsFromTransactionParentIndices(consensus, chain[i+1]); err != nil {
return nil
}
}
@ -596,7 +508,7 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFu
missingBlocks = append(missingBlocks, b.Side.Parent)
return missingBlocks, errors.New("parent does not exist in compact block")
}
if err := b.FillTransactionsFromTransactionParentIndices(parent); err != nil {
if err := b.FillTransactionsFromTransactionParentIndices(consensus, parent); err != nil {
return nil, fmt.Errorf("error filling transactions for block: %w", err)
}
}
@ -605,7 +517,7 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFu
if parent == nil {
parent = getTemplateByIdFillingTx(b.Side.Parent)
}
b.FillTransactionParentIndices(parent)
b.FillTransactionParentIndices(consensus, parent)
}
if len(b.Main.Coinbase.Outputs) == 0 {
@ -617,8 +529,8 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(getTemplateById GetByTemplateIdFu
if outputBlob, err := b.Main.Coinbase.Outputs.AppendBinary(make([]byte, 0, b.Main.Coinbase.Outputs.BufferLength())); err != nil {
return nil, fmt.Errorf("error filling outputs for block: %s", err)
} else if uint64(len(outputBlob)) != b.Main.Coinbase.OutputsBlobSize {
return nil, fmt.Errorf("error filling outputs for block: invalid output blob size, got %d, expected %d", b.Main.Coinbase.OutputsBlobSize, len(outputBlob))
} else if uint64(len(outputBlob)) != b.Main.Coinbase.AuxiliaryData.OutputsBlobSize {
return nil, fmt.Errorf("error filling outputs for block: invalid output blob size, got %d, expected %d", b.Main.Coinbase.AuxiliaryData.OutputsBlobSize, len(outputBlob))
}
}
@ -685,7 +597,7 @@ func (b *PoolBlock) GetPrivateKeySeed() types.Hash {
func (b *PoolBlock) CalculateTransactionPrivateKeySeed() types.Hash {
if b.ShareVersion() > ShareVersion_V1 {
preAllocatedMainData := make([]byte, 0, b.Main.BufferLength())
preAllocatedSideData := make([]byte, 0, b.Side.BufferLength())
preAllocatedSideData := make([]byte, 0, b.Side.BufferLength(b.ShareVersion()))
mainData, _ := b.Main.SideChainHashingBlob(preAllocatedMainData, false)
sideData, _ := b.Side.AppendBinary(preAllocatedSideData, b.ShareVersion())
return p2poolcrypto.CalculateTransactionPrivateKeySeed(

View file

@ -40,8 +40,8 @@ func testPoolBlock(b *PoolBlock, t *testing.T, expectedBufferLength int, majorVe
t.Fatal()
}
if b.FullId().TemplateId() != templateId {
t.Logf("%s != %s", b.FullId().TemplateId(), templateId)
if b.FullId(ConsensusDefault).TemplateId() != templateId {
t.Logf("%s != %s", b.FullId(ConsensusDefault).TemplateId(), templateId)
t.Fatal()
}

View file

@ -6,6 +6,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"git.gammaspectra.live/P2Pool/consensus/v3/merge_mining"
"git.gammaspectra.live/P2Pool/consensus/v3/monero"
mainblock "git.gammaspectra.live/P2Pool/consensus/v3/monero/block"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/client"
@ -68,10 +69,11 @@ type SideChain struct {
sidechainLock sync.RWMutex
watchBlock *ChainMain
watchBlockSidechainId types.Hash
watchBlock *ChainMain
watchBlockPossibleId types.Hash
blocksByTemplateId *swiss.Map[types.Hash, *PoolBlock]
blocksByMerkleRoot *swiss.Map[types.Hash, *PoolBlock]
blocksByHeight *swiss.Map[uint64, []*PoolBlock]
blocksByHeightKeysSorted bool
blocksByHeightKeys []uint64
@ -131,8 +133,8 @@ func (c *SideChain) PreCalcFinished() bool {
func (c *SideChain) PreprocessBlock(block *PoolBlock) (missingBlocks []types.Hash, err error) {
var preAllocatedShares Shares
if len(block.Main.Coinbase.Outputs) == 0 {
//cannot use SideTemplateId() as it might not be proper to calculate yet. fetch from coinbase only here
if b := c.GetPoolBlockByTemplateId(types.HashFromBytes(block.CoinbaseExtra(SideTemplateId))); b != nil {
//cannot use SideTemplateId() as it might not be proper to calculate yet. fetch appropriate identifier from coinbase only here
if b := c.GetPoolBlockByTemplateId(block.FastSideTemplateId(c.Consensus())); b != nil {
block.Main.Coinbase.Outputs = b.Main.Coinbase.Outputs
} else {
preAllocatedShares = c.preAllocatedSharesPool.Get()
@ -143,8 +145,16 @@ func (c *SideChain) PreprocessBlock(block *PoolBlock) (missingBlocks []types.Has
return block.PreProcessBlock(c.Consensus(), c.derivationCache, preAllocatedShares, c.server.GetDifficultyByHeight, c.GetPoolBlockByTemplateId)
}
func (c *SideChain) isWatched(block *PoolBlock) bool {
if block.ShareVersion() > ShareVersion_V2 {
return c.watchBlockPossibleId == block.MergeMiningTag().RootHash
} else {
return c.watchBlockPossibleId == block.FastSideTemplateId(c.Consensus())
}
}
func (c *SideChain) fillPoolBlockTransactionParentIndices(block *PoolBlock) {
block.FillTransactionParentIndices(c.getParent(block))
block.FillTransactionParentIndices(c.Consensus(), c.getParent(block))
}
func (c *SideChain) isPoolBlockTransactionKeyIsDeterministic(block *PoolBlock) bool {
@ -230,7 +240,12 @@ func (c *SideChain) BlockSeen(block *PoolBlock) bool {
return true
}
fullId := block.FullId()
if block.ShareVersion() > ShareVersion_V2 {
// need to pre-fill to have working SideTemplateId
_, _ = c.PreprocessBlock(block)
}
fullId := block.FullId(c.Consensus())
c.seenBlocksLock.Lock()
defer c.seenBlocksLock.Unlock()
@ -243,7 +258,7 @@ func (c *SideChain) BlockSeen(block *PoolBlock) bool {
}
func (c *SideChain) BlockUnsee(block *PoolBlock) {
fullId := block.FullId()
fullId := block.FullId(c.Consensus())
c.seenBlocksLock.Lock()
defer c.seenBlocksLock.Unlock()
@ -267,7 +282,7 @@ func (c *SideChain) AddPoolBlockExternal(block *PoolBlock) (missingBlocks []type
// Technically some p2pool node could keep stuffing block with transactions until reward is less than 0.6 XMR
// But default transaction picking algorithm never does that. It's better to just ban such nodes
if block.Main.Coinbase.TotalReward < monero.TailEmissionReward {
if block.Main.Coinbase.AuxiliaryData.TotalReward < monero.TailEmissionReward {
return nil, errors.New("block reward too low"), true
}
@ -291,9 +306,24 @@ func (c *SideChain) AddPoolBlockExternal(block *PoolBlock) (missingBlocks []type
}
}
templateId := types.HashFromBytes(block.CoinbaseExtra(SideTemplateId))
if templateId != block.SideTemplateId(c.Consensus()) {
return nil, fmt.Errorf("invalid template id %s, expected %s", templateId.String(), block.SideTemplateId(c.Consensus()).String()), true
templateId := block.SideTemplateId(c.Consensus())
if block.ShareVersion() > ShareVersion_V2 {
if templateId != block.FastSideTemplateId(c.Consensus()) {
return nil, fmt.Errorf("invalid template id %s, expected %s", block.FastSideTemplateId(c.Consensus()), templateId), true
}
//verify template id against merkle proof
mmTag := block.MergeMiningTag()
auxiliarySlot := merge_mining.GetAuxiliarySlot(c.Consensus().Id, mmTag.Nonce, mmTag.NumberAuxiliaryChains)
if !block.Side.MerkleProof.Verify(templateId, int(auxiliarySlot), int(mmTag.NumberAuxiliaryChains), mmTag.RootHash) {
return nil, fmt.Errorf("could not verify template id %s merkle proof against merkle tree root hash %s (number of chains = %d, nonce = %d, auxiliary slot = %d)", templateId, mmTag.RootHash, mmTag.NumberAuxiliaryChains, mmTag.Nonce, auxiliarySlot), true
}
} else {
if templateId != types.HashFromBytes(block.CoinbaseExtra(SideIdentifierHash)) {
return nil, fmt.Errorf("invalid template id %s, expected %s", block.SideTemplateId(c.Consensus()), templateId), true
}
}
if block.Side.Difficulty.Cmp64(c.Consensus().MinimumDifficulty) < 0 {
@ -336,9 +366,9 @@ func (c *SideChain) AddPoolBlockExternal(block *PoolBlock) (missingBlocks []type
block.Verified.Store(true)
block.Invalid.Store(false)
if block.SideTemplateId(c.Consensus()) == c.watchBlockSidechainId {
if c.isWatched(block) {
c.server.UpdateBlockFound(c.watchBlock, block)
c.watchBlockSidechainId = types.ZeroHash
c.watchBlockPossibleId = types.ZeroHash
}
block.Depth.Store(otherBlock.Depth.Load())
@ -433,9 +463,9 @@ func (c *SideChain) AddPoolBlock(block *PoolBlock) (err error) {
utils.Logf("SideChain", "add_block: height = %d, id = %s, mainchain height = %d, verified = %t, total = %d", block.Side.Height, block.SideTemplateId(c.Consensus()), block.Main.Coinbase.GenHeight, block.Verified.Load(), c.blocksByTemplateId.Count())
if block.SideTemplateId(c.Consensus()) == c.watchBlockSidechainId {
if c.isWatched(block) {
c.server.UpdateBlockFound(c.watchBlock, block)
c.watchBlockSidechainId = types.ZeroHash
c.watchBlockPossibleId = types.ZeroHash
}
if l, ok := c.blocksByHeight.Get(block.Side.Height); ok {
@ -448,6 +478,10 @@ func (c *SideChain) AddPoolBlock(block *PoolBlock) (err error) {
c.blocksByHeightKeys = append(c.blocksByHeightKeys, block.Side.Height)
}
if block.ShareVersion() > ShareVersion_V2 {
c.blocksByMerkleRoot.Put(block.MergeMiningTag().RootHash, block)
}
c.updateDepths(block)
defer func() {
@ -490,7 +524,7 @@ func (c *SideChain) verifyLoop(blockToVerify *PoolBlock) (err error) {
err = invalid
}
} else if verification != nil {
//utils.Logf("SideChain", "can't verify block at height = %d, id = %s, mainchain height = %d, mined by %s: %s", block.Side.Height, block.SideTemplateId(c.Consensus()), block.Main.Coinbase.GenHeight, block.GetAddress().ToBase58(), verification.Error())
//utils.Logf("SideChain", "can't verify block at height = %d, id = %s, mainchain height = %d, mined by %s: %s", block.Side.Height, block.SideIdentifierHash(c.Consensus()), block.Main.Coinbase.GenHeight, block.GetAddress().ToBase58(), verification.Error())
block.Verified.Store(false)
block.Invalid.Store(false)
} else {
@ -545,7 +579,10 @@ func (c *SideChain) verifyLoop(blockToVerify *PoolBlock) (err error) {
}
//store for faster startup
c.saveBlock(block)
go func() {
c.server.Store(block)
return
}()
// Try to verify blocks on top of this one
for i := uint64(1); i <= UncleBlockDepth; i++ {
@ -736,8 +773,8 @@ func (c *SideChain) verifyBlock(block *PoolBlock) (verification error, invalid e
result += o.Reward
}
return
}(); totalReward != block.Main.Coinbase.TotalReward {
return nil, fmt.Errorf("invalid total reward, got %d, expected %d", block.Main.Coinbase.TotalReward, totalReward)
}(); totalReward != block.Main.Coinbase.AuxiliaryData.TotalReward {
return nil, fmt.Errorf("invalid total reward, got %d, expected %d", block.Main.Coinbase.AuxiliaryData.TotalReward, totalReward)
} else if rewards := SplitReward(c.preAllocatedRewards, totalReward, shares); len(rewards) != len(block.Main.Coinbase.Outputs) {
return nil, fmt.Errorf("invalid number of outputs, got %d, expected %d", len(block.Main.Coinbase.Outputs), len(rewards))
} else {
@ -978,8 +1015,18 @@ func (c *SideChain) pruneOldBlocks() {
c.blocksByTemplateId.Delete(templateId)
numBlocksPruned++
} else {
utils.Logf("SideChain", "blocksByHeight and blocksByTemplateId are inconsistent at height = %d, id = %s", height, block.SideTemplateId(c.Consensus()))
utils.Logf("SideChain", "blocksByHeight and blocksByTemplateId are inconsistent at height = %d, id = %s", height, templateId)
}
if block.ShareVersion() > ShareVersion_V2 {
rootHash := block.MergeMiningTag().RootHash
if c.blocksByMerkleRoot.Has(rootHash) {
c.blocksByMerkleRoot.Delete(rootHash)
} else {
utils.Logf("SideChain", "blocksByHeight and m_blocksByMerkleRoot are inconsistent at height = %d, id = %s", height, rootHash)
}
}
v = slices.Delete(v, i, i+1)
// Empty cache here
@ -1104,6 +1151,28 @@ func (c *SideChain) getPoolBlockByTemplateId(id types.Hash) *PoolBlock {
return b
}
func (c *SideChain) GetPoolBlockByMerkleRoot(id types.Hash) *PoolBlock {
c.sidechainLock.RLock()
defer c.sidechainLock.RUnlock()
return c.getPoolBlockByMerkleRoot(id)
}
func (c *SideChain) getPoolBlockByMerkleRoot(id types.Hash) *PoolBlock {
b, _ := c.blocksByMerkleRoot.Get(id)
return b
}
func (c *SideChain) GetPoolBlockByCoinbaseExtraIdentifier(version ShareVersion, id types.Hash) *PoolBlock {
if id == types.ZeroHash {
return nil
}
if version > ShareVersion_V2 {
return c.GetPoolBlockByMerkleRoot(id)
} else {
return c.GetPoolBlockByTemplateId(id)
}
}
func (c *SideChain) GetPoolBlocksByHeight(height uint64) []*PoolBlock {
c.sidechainLock.RLock()
defer c.sidechainLock.RUnlock()
@ -1168,7 +1237,7 @@ func (c *SideChain) WatchMainChainBlock(mainData *ChainMain, possibleId types.Ha
defer c.sidechainLock.Unlock()
c.watchBlock = mainData
c.watchBlockSidechainId = possibleId
c.watchBlockPossibleId = possibleId
}
func (c *SideChain) GetHighestKnownTip() *PoolBlock {

View file

@ -160,12 +160,13 @@ func TestSideChainMiniPreFork(t *testing.T) {
testSideChain(s, t, f, 2424349, 2696040, block2420028, block2420027)
}
func benchmarkResetState(tip, parent *PoolBlock, templateId types.Hash, fullId FullId, difficulty types.Difficulty, blocksByHeightKeys []uint64, s *SideChain) {
func benchmarkResetState(tip, parent *PoolBlock, templateId, merkleRoot types.Hash, fullId FullId, difficulty types.Difficulty, blocksByHeightKeys []uint64, s *SideChain) {
//Remove states in maps
s.blocksByHeight.Delete(tip.Side.Height)
s.blocksByHeightKeys = blocksByHeightKeys
s.blocksByHeightKeysSorted = true
s.blocksByTemplateId.Delete(templateId)
s.blocksByMerkleRoot.Delete(merkleRoot)
s.seenBlocks.Delete(fullId)
// Update tip and depths
@ -196,7 +197,8 @@ func benchSideChain(b *testing.B, s *SideChain, tipHash types.Hash) {
return u == tip.Side.Height
})
s.blocksByTemplateId.Delete(tip.SideTemplateId(s.Consensus()))
s.seenBlocks.Delete(tip.FullId())
s.blocksByMerkleRoot.Delete(tip.MergeMiningTag().RootHash)
s.seenBlocks.Delete(tip.FullId(s.Consensus()))
tip = s.GetParent(tip)
if tip == nil {
@ -205,7 +207,8 @@ func benchSideChain(b *testing.B, s *SideChain, tipHash types.Hash) {
}
}
templateId := tip.SideTemplateId(s.Consensus())
fullId := tip.FullId()
merkleRoot := tip.MergeMiningTag().RootHash
fullId := tip.FullId(s.Consensus())
parent := s.GetParent(tip)
@ -216,14 +219,14 @@ func benchSideChain(b *testing.B, s *SideChain, tipHash types.Hash) {
return u == tip.Side.Height
})
benchmarkResetState(tip, parent, templateId, fullId, difficulty, slices.Clone(blocksByHeightKeys), s)
benchmarkResetState(tip, parent, templateId, merkleRoot, fullId, difficulty, slices.Clone(blocksByHeightKeys), s)
var err error
b.StartTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
benchmarkResetState(tip, parent, templateId, fullId, difficulty, slices.Clone(blocksByHeightKeys), s)
benchmarkResetState(tip, parent, templateId, merkleRoot, fullId, difficulty, slices.Clone(blocksByHeightKeys), s)
b.StartTimer()
_, err, _ = s.AddPoolBlockExternal(tip)
if err != nil {
@ -333,7 +336,7 @@ func BenchmarkSideChainDefault_SplitReward(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
rewards := SplitReward(preAllocatedRewards, tip.Main.Coinbase.TotalReward, shares)
rewards := SplitReward(preAllocatedRewards, tip.Main.Coinbase.AuxiliaryData.TotalReward, shares)
if rewards == nil {
b.Error("nil rewards")
return

View file

@ -12,6 +12,8 @@ import (
"io"
)
const MaxMerkleProofSize = 7
type SideData struct {
PublicKey address.PackedAddress `json:"public_key"`
CoinbasePrivateKeySeed types.Hash `json:"coinbase_private_key_seed,omitempty"`
@ -23,7 +25,10 @@ type SideData struct {
Difficulty types.Difficulty `json:"difficulty"`
CumulativeDifficulty types.Difficulty `json:"cumulative_difficulty"`
// ExtraBuffer available in ShareVersion ShareVersion_V2 and above
// MerkleProof Merkle proof for merge mining, available in ShareVersion ShareVersion_V3 and above
MerkleProof crypto.MerkleProof `json:"merkle_proof,omitempty"`
// ExtraBuffer Arbitrary extra data, available in ShareVersion ShareVersion_V2 and above
ExtraBuffer SideDataExtraBuffer `json:"extra_buffer,omitempty"`
}
@ -34,20 +39,30 @@ type SideDataExtraBuffer struct {
SideChainExtraNonce uint32 `json:"side_chain_extra_nonce"`
}
func (b *SideData) BufferLength() int {
return crypto.PublicKeySize +
func (b *SideData) BufferLength(version ShareVersion) (size int) {
size = crypto.PublicKeySize +
crypto.PublicKeySize +
types.HashSize +
crypto.PrivateKeySize +
utils.UVarInt64Size(len(b.Uncles)) + len(b.Uncles)*types.HashSize +
utils.UVarInt64Size(b.Height) +
utils.UVarInt64Size(b.Difficulty.Lo) + utils.UVarInt64Size(b.Difficulty.Hi) +
utils.UVarInt64Size(b.CumulativeDifficulty.Lo) + utils.UVarInt64Size(b.CumulativeDifficulty.Hi) +
4*4
utils.UVarInt64Size(b.CumulativeDifficulty.Lo) + utils.UVarInt64Size(b.CumulativeDifficulty.Hi)
if version > ShareVersion_V1 {
// ExtraBuffer
size += 4 * 4
}
if version > ShareVersion_V2 {
// MerkleProof
size += utils.UVarInt64Size(len(b.MerkleProof)) + len(b.MerkleProof)*types.HashSize
}
return size
}
func (b *SideData) MarshalBinary(version ShareVersion) (buf []byte, err error) {
return b.AppendBinary(make([]byte, 0, b.BufferLength()), version)
return b.AppendBinary(make([]byte, 0, b.BufferLength(version)), version)
}
func (b *SideData) AppendBinary(preAllocatedBuf []byte, version ShareVersion) (buf []byte, err error) {
@ -69,6 +84,17 @@ func (b *SideData) AppendBinary(preAllocatedBuf []byte, version ShareVersion) (b
buf = binary.AppendUvarint(buf, b.Difficulty.Hi)
buf = binary.AppendUvarint(buf, b.CumulativeDifficulty.Lo)
buf = binary.AppendUvarint(buf, b.CumulativeDifficulty.Hi)
if version > ShareVersion_V2 {
if len(b.MerkleProof) > MaxMerkleProofSize {
return nil, fmt.Errorf("merkle proof too large: %d > %d", len(b.MerkleProof), MaxMerkleProofSize)
}
buf = append(buf, uint8(len(b.MerkleProof)))
for _, h := range b.MerkleProof {
buf = append(buf, h[:]...)
}
}
if version > ShareVersion_V1 {
buf = binary.LittleEndian.AppendUint32(buf, uint32(b.ExtraBuffer.SoftwareId))
buf = binary.LittleEndian.AppendUint32(buf, uint32(b.ExtraBuffer.SoftwareVersion))
@ -83,6 +109,9 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
var (
uncleCount uint64
uncleHash types.Hash
merkleProofSize uint8
merkleProofHash types.Hash
)
if _, err = io.ReadFull(reader, b.PublicKey[address.PackedAddressSpend][:]); err != nil {
return err
@ -139,6 +168,24 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
return err
}
}
if version > ShareVersion_V2 {
if merkleProofSize, err = reader.ReadByte(); err != nil {
return err
}
if merkleProofSize > MaxMerkleProofSize {
return fmt.Errorf("merkle proof too large: %d > %d", len(b.MerkleProof), MaxMerkleProofSize)
}
b.MerkleProof = make(crypto.MerkleProof, 0, merkleProofSize)
for i := 0; i < int(merkleProofSize); i++ {
if _, err = io.ReadFull(reader, merkleProofHash[:]); err != nil {
return err
}
b.MerkleProof = append(b.MerkleProof, merkleProofHash)
}
}
if version > ShareVersion_V1 {
if err = binary.Read(reader, binary.LittleEndian, &b.ExtraBuffer.SoftwareId); err != nil {
return fmt.Errorf("within extra buffer: %w", err)

View file

@ -28,7 +28,7 @@ func CalculateOutputs(block *PoolBlock, consensus *Consensus, difficultyByHeight
if preAllocatedRewards == nil {
preAllocatedRewards = make([]uint64, 0, len(tmpShares))
}
tmpRewards := SplitReward(preAllocatedRewards, block.Main.Coinbase.TotalReward, tmpShares)
tmpRewards := SplitReward(preAllocatedRewards, block.Main.Coinbase.AuxiliaryData.TotalReward, tmpShares)
if tmpShares == nil || tmpRewards == nil || len(tmpRewards) != len(tmpShares) {
return nil, 0

View file

@ -598,18 +598,24 @@ func (s *Server) BuildTemplate(addr address.PackedAddress, forceNewTemplate bool
}
func (s *Server) createCoinbaseTransaction(txType uint8, shares sidechain.Shares, rewards []uint64, maxRewardsAmountsWeight uint64, final bool) (tx transaction.CoinbaseTransaction, err error) {
//TODO: v3
mergeMineTag := slices.Clone(types.ZeroHash[:])
tx = transaction.CoinbaseTransaction{
Version: 2,
UnlockTime: s.minerData.Height + monero.MinerRewardUnlockTime,
InputCount: 1,
InputType: transaction.TxInGen,
GenHeight: s.minerData.Height,
TotalReward: func() (v uint64) {
for i := range rewards {
v += rewards[i]
}
return
}(),
AuxiliaryData: transaction.CoinbaseTransactionAuxiliaryData{
TotalReward: func() (v uint64) {
for i := range rewards {
v += rewards[i]
}
return
}(),
},
Extra: transaction.ExtraTags{
transaction.ExtraTag{
Tag: transaction.TxExtraTagPubKey,
@ -623,10 +629,11 @@ func (s *Server) createCoinbaseTransaction(txType uint8, shares sidechain.Shares
Data: make(types.Bytes, sidechain.SideExtraNonceSize),
},
transaction.ExtraTag{
//TODO: fix this for V3
Tag: transaction.TxExtraTagMergeMining,
VarInt: 32,
VarInt: uint64(len(mergeMineTag)),
HasVarInt: true,
Data: slices.Clone(types.ZeroHash[:]),
Data: mergeMineTag,
},
},
ExtraBaseRCT: 0,

View file

@ -211,7 +211,7 @@ func (tpl *Template) HashingBlob(hasher *sha3.HasherState, preAllocatedBuffer []
}
func TemplateFromPoolBlock(b *sidechain.PoolBlock) (tpl *Template, err error) {
if b.ShareVersion() < sidechain.ShareVersion_V1 {
if b.ShareVersion() != sidechain.ShareVersion_V2 {
return nil, errors.New("unsupported share version")
}
totalLen := b.BufferLength()

View file

@ -20,7 +20,7 @@ func TestTemplate(t *testing.T) {
preAllocatedBuffer := make([]byte, 0, len(tpl.Buffer))
blockTemplateId := types.HashFromBytes(b.CoinbaseExtra(sidechain.SideTemplateId))
blockTemplateId := b.FastSideTemplateId(sidechain.ConsensusDefault)
if tplBuf := tpl.Blob(preAllocatedBuffer, b.Main.Nonce, b.ExtraNonce(), b.Side.ExtraBuffer.RandomNumber, b.Side.ExtraBuffer.SideChainExtraNonce, blockTemplateId); bytes.Compare(tplBuf, buf) != 0 {
if len(tplBuf) == len(buf) {

View file

@ -15,6 +15,16 @@ type MinerData struct {
MedianWeight uint64 `json:"median_weight"`
AlreadyGeneratedCoins uint64 `json:"already_generated_coins"`
MedianTimestamp uint64 `json:"median_timestamp"`
TimeReceived time.Time `json:"time_received"`
TxBacklog mempool.Mempool `json:"tx_backlog"`
TimeReceived time.Time `json:"time_received"`
AuxiliaryChains []AuxiliaryChainData `json:"aux_chains,omitempty"`
AuxiliaryNonce uint32 `json:"aux_nonce,omitempty"`
}
type AuxiliaryChainData struct {
UniqueId types.Hash
Data types.Hash
Difficulty types.Difficulty
}