Compare commits

...

5 commits

15 changed files with 519 additions and 233 deletions

View file

@ -12,8 +12,11 @@ import (
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"io"
"math"
)
const MaxTransactionCount = uint64(math.MaxUint64) / types.HashSize
type Block struct {
MajorVersion uint8 `json:"major_version"`
MinorVersion uint8 `json:"minor_version"`
@ -51,7 +54,8 @@ func (b *Block) MarshalBinary() (buf []byte, err error) {
}
func (b *Block) BufferLength() int {
return 1 + 1 +
return utils.UVarInt64Size(b.MajorVersion) +
utils.UVarInt64Size(b.MinorVersion) +
utils.UVarInt64Size(b.Timestamp) +
types.HashSize +
4 +
@ -65,14 +69,18 @@ func (b *Block) MarshalBinaryFlags(compact, pruned, containsAuxiliaryTemplateId
func (b *Block) AppendBinaryFlags(preAllocatedBuf []byte, compact, pruned, containsAuxiliaryTemplateId bool) (buf []byte, err error) {
buf = preAllocatedBuf
buf = append(buf, b.MajorVersion)
if b.MajorVersion > monero.HardForkSupportedVersion {
return nil, fmt.Errorf("unsupported version %d", b.MajorVersion)
}
buf = append(buf, b.MinorVersion)
if b.MinorVersion < b.MajorVersion {
return nil, fmt.Errorf("minor version %d smaller than major %d", b.MinorVersion, b.MajorVersion)
}
buf = binary.AppendUvarint(buf, uint64(b.MajorVersion))
buf = binary.AppendUvarint(buf, uint64(b.MinorVersion))
buf = binary.AppendUvarint(buf, b.Timestamp)
buf = append(buf, b.PreviousId[:]...)
buf = binary.LittleEndian.AppendUint32(buf, b.Nonce)
@ -117,17 +125,38 @@ func (b *Block) UnmarshalBinary(data []byte, canBePruned bool, f PrunedFlagsFunc
func (b *Block) FromReaderFlags(reader utils.ReaderAndByteReader, compact, canBePruned bool, f PrunedFlagsFunc) (err error) {
var (
txCount uint64
transactionHash types.Hash
txCount uint64
majorVersion, minorVersion uint64
transactionHash types.Hash
)
if b.MajorVersion, err = reader.ReadByte(); err != nil {
if majorVersion, err = binary.ReadUvarint(reader); err != nil {
return err
}
if b.MinorVersion, err = reader.ReadByte(); err != nil {
if majorVersion > monero.HardForkSupportedVersion {
return fmt.Errorf("unsupported version %d", majorVersion)
}
if minorVersion, err = binary.ReadUvarint(reader); err != nil {
return err
}
if minorVersion < majorVersion {
return fmt.Errorf("minor version %d smaller than major version %d", minorVersion, majorVersion)
}
if majorVersion > math.MaxUint8 {
return fmt.Errorf("unsupported major version %d", majorVersion)
}
if minorVersion > math.MaxUint8 {
return fmt.Errorf("unsupported minor version %d", minorVersion)
}
b.MajorVersion = uint8(majorVersion)
b.MinorVersion = uint8(majorVersion)
if b.Timestamp, err = binary.ReadUvarint(reader); err != nil {
return err
}
@ -157,43 +186,43 @@ func (b *Block) FromReaderFlags(reader utils.ReaderAndByteReader, compact, canBe
if txCount, err = binary.ReadUvarint(reader); err != nil {
return err
}
} else if txCount > MaxTransactionCount {
return fmt.Errorf("transaction count count too large: %d > %d", txCount, MaxTransactionCount)
} else if txCount > 0 {
if compact {
// preallocate with soft cap
b.Transactions = make([]types.Hash, 0, min(8192, txCount))
b.TransactionParentIndices = make([]uint64, 0, min(8192, txCount))
if compact {
if txCount < 8192 {
b.Transactions = make([]types.Hash, 0, txCount)
b.TransactionParentIndices = make([]uint64, 0, txCount)
}
var parentIndex uint64
for i := 0; i < int(txCount); i++ {
if parentIndex, err = binary.ReadUvarint(reader); err != nil {
return err
}
if parentIndex == 0 {
//not in lookup
if _, err = io.ReadFull(reader, transactionHash[:]); err != nil {
var parentIndex uint64
for i := 0; i < int(txCount); i++ {
if parentIndex, err = binary.ReadUvarint(reader); err != nil {
return err
}
if parentIndex == 0 {
//not in lookup
if _, err = io.ReadFull(reader, transactionHash[:]); err != nil {
return err
}
b.Transactions = append(b.Transactions, transactionHash)
} else {
b.Transactions = append(b.Transactions, types.ZeroHash)
}
b.TransactionParentIndices = append(b.TransactionParentIndices, parentIndex)
}
} else {
// preallocate with soft cap
b.Transactions = make([]types.Hash, 0, min(8192, txCount))
for i := 0; i < int(txCount); i++ {
if _, err = io.ReadFull(reader, transactionHash[:]); err != nil {
return err
}
b.Transactions = append(b.Transactions, transactionHash)
} else {
b.Transactions = append(b.Transactions, types.ZeroHash)
}
b.TransactionParentIndices = append(b.TransactionParentIndices, parentIndex)
}
} else {
if txCount < 8192 {
b.Transactions = make([]types.Hash, 0, txCount)
}
for i := 0; i < int(txCount); i++ {
if _, err = io.ReadFull(reader, transactionHash[:]); err != nil {
return err
}
b.Transactions = append(b.Transactions, transactionHash)
}
}

View file

@ -38,6 +38,7 @@ type CoinbaseTransactionAuxiliaryData struct {
// TotalReward amount of reward existing Outputs. Used by p2pool serialized pruned blocks, filled regardless
TotalReward uint64 `json:"total_reward"`
// TemplateId Required by sidechain.GetOutputs to speed up repeated broadcasts from different peers
// This must be filled when preprocessing
TemplateId types.Hash `json:"template_id,omitempty"`
}

View file

@ -40,6 +40,12 @@ const (
// PoolBlockMaxTemplateSize Max P2P message size (128 KB) minus BLOCK_RESPONSE header (5 bytes)
const PoolBlockMaxTemplateSize = 128*1024 - (1 + 4)
// PoolBlockMaxSideChainHeight 1000 years at 1 block/second. It should be enough for any normal use.
const PoolBlockMaxSideChainHeight = 31556952000
// PoolBlockMaxCumulativeDifficulty 1000 years at 1 TH/s. It should be enough for any normal use.
var PoolBlockMaxCumulativeDifficulty = types.NewDifficulty(13019633956666736640, 1710)
type UniquePoolBlockSlice []*PoolBlock
func (s UniquePoolBlockSlice) Get(consensus *Consensus, id types.Hash) *PoolBlock {
@ -256,6 +262,7 @@ func (b *PoolBlock) ShareVersion() ShareVersion {
}
func (b *PoolBlock) ShareVersionSignaling() ShareVersion {
// Signaling before V2 hardfork
if b.ShareVersion() == ShareVersion_V1 && (b.ExtraNonce()&0xFF000000 == 0xFF000000) {
return ShareVersion_V2
}
@ -272,7 +279,7 @@ func (b *PoolBlock) ExtraNonce() uint32 {
// FastSideTemplateId Returns SideTemplateId from either coinbase extra tags or pruned data, or main block if not pruned
func (b *PoolBlock) FastSideTemplateId(consensus *Consensus) types.Hash {
if b.ShareVersion() > ShareVersion_V2 {
if b.ShareVersion() >= ShareVersion_V3 {
if b.Main.Coinbase.AuxiliaryData.WasPruned {
return b.Main.Coinbase.AuxiliaryData.TemplateId
} else {
@ -295,7 +302,8 @@ func (b *PoolBlock) CoinbaseExtra(tag CoinbaseExtraTag) []byte {
}
case SideIdentifierHash:
if t := b.Main.Coinbase.Extra.GetTag(uint8(tag)); t != nil {
if b.ShareVersion() > ShareVersion_V2 {
if b.ShareVersion() >= ShareVersion_V3 {
// new merge mining tag
mergeMineReader := bytes.NewReader(t.Data)
var mergeMiningTag merge_mining.Tag
if err := mergeMiningTag.FromReader(mergeMineReader); err != nil || mergeMineReader.Len() != 0 {
@ -456,7 +464,7 @@ func (b *PoolBlock) MarshalBinaryFlags(pruned, compact bool) ([]byte, error) {
func (b *PoolBlock) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bool) (buf []byte, err error) {
buf = preAllocatedBuf
if buf, err = b.Main.AppendBinaryFlags(buf, compact, pruned, b.ShareVersion() > ShareVersion_V2); err != nil {
if buf, err = b.Main.AppendBinaryFlags(buf, compact, pruned, b.ShareVersion() >= ShareVersion_V3); err != nil {
return nil, err
} else if buf, err = b.Side.AppendBinary(buf, b.ShareVersion()); err != nil {
return nil, err
@ -470,7 +478,7 @@ func (b *PoolBlock) AppendBinaryFlags(preAllocatedBuf []byte, pruned, compact bo
func (b *PoolBlock) FromReader(consensus *Consensus, derivationCache DerivationCacheInterface, reader utils.ReaderAndByteReader) (err error) {
if err = b.Main.FromReader(reader, true, func() (containsAuxiliaryTemplateId bool) {
return b.CalculateShareVersion(consensus) > ShareVersion_V2
return b.CalculateShareVersion(consensus) >= ShareVersion_V3
}); err != nil {
return err
}
@ -481,7 +489,7 @@ func (b *PoolBlock) FromReader(consensus *Consensus, derivationCache DerivationC
// FromCompactReader used in Protocol 1.1 and above
func (b *PoolBlock) FromCompactReader(consensus *Consensus, derivationCache DerivationCacheInterface, reader utils.ReaderAndByteReader) (err error) {
if err = b.Main.FromCompactReader(reader, true, func() (containsAuxiliaryTemplateId bool) {
return b.CalculateShareVersion(consensus) > ShareVersion_V2
return b.CalculateShareVersion(consensus) >= ShareVersion_V3
}); err != nil {
return err
}
@ -603,6 +611,12 @@ func (b *PoolBlock) PreProcessBlockWithOutputs(consensus *Consensus, getTemplate
}
}
if b.ShareVersion() >= ShareVersion_V3 && b.Main.Coinbase.AuxiliaryData.WasPruned && b.Main.Coinbase.AuxiliaryData.TemplateId == types.ZeroHash {
// Fill template id for pruned broadcasts
templateId := b.SideTemplateId(consensus)
b.Main.Coinbase.AuxiliaryData.TemplateId = templateId
}
return nil, nil
}
@ -611,7 +625,7 @@ func (b *PoolBlock) NeedsPreProcess() bool {
}
func (b *PoolBlock) FillPrivateKeys(derivationCache DerivationCacheInterface) {
if b.ShareVersion() > ShareVersion_V1 {
if b.ShareVersion() >= ShareVersion_V2 {
if b.Side.CoinbasePrivateKey == crypto.ZeroPrivateKeyBytes {
//Fill Private Key
kP := derivationCache.GetDeterministicTransactionKey(b.GetPrivateKeySeed(), b.Main.PreviousId)
@ -651,7 +665,7 @@ func (b *PoolBlock) IsProofHigherThanDifficultyWithError(hasher randomx.Hasher,
}
func (b *PoolBlock) GetPrivateKeySeed() types.Hash {
if b.ShareVersion() > ShareVersion_V1 {
if b.ShareVersion() >= ShareVersion_V2 {
return b.Side.CoinbasePrivateKeySeed
}
@ -664,7 +678,7 @@ func (b *PoolBlock) GetPrivateKeySeed() types.Hash {
}
func (b *PoolBlock) CalculateTransactionPrivateKeySeed() types.Hash {
if b.ShareVersion() > ShareVersion_V1 {
if b.ShareVersion() >= ShareVersion_V2 {
preAllocatedMainData := make([]byte, 0, b.Main.BufferLength())
preAllocatedSideData := make([]byte, 0, b.Side.BufferLength(b.ShareVersion()))
mainData, _ := b.Main.SideChainHashingBlob(preAllocatedMainData, false)

View file

@ -147,7 +147,7 @@ func (c *SideChain) PreprocessBlock(block *PoolBlock) (missingBlocks []types.Has
}
func (c *SideChain) isWatched(block *PoolBlock) bool {
if block.ShareVersion() > ShareVersion_V2 {
if block.ShareVersion() >= ShareVersion_V3 {
return c.watchBlockPossibleId == block.MergeMiningTag().RootHash
} else {
return c.watchBlockPossibleId == block.FastSideTemplateId(c.Consensus())
@ -304,7 +304,7 @@ func (c *SideChain) PoolBlockExternalVerify(block *PoolBlock) (missingBlocks []t
templateId := block.SideTemplateId(c.Consensus())
if block.ShareVersion() > ShareVersion_V2 {
if block.ShareVersion() >= ShareVersion_V3 {
if templateId != block.FastSideTemplateId(c.Consensus()) {
return nil, fmt.Errorf("invalid template id %s, expected %s", block.FastSideTemplateId(c.Consensus()), templateId), true
}
@ -499,7 +499,7 @@ func (c *SideChain) AddPoolBlock(block *PoolBlock) (err error) {
c.blocksByHeightKeys = append(c.blocksByHeightKeys, block.Side.Height)
}
if block.ShareVersion() > ShareVersion_V2 {
if block.ShareVersion() >= ShareVersion_V3 {
c.blocksByMerkleRoot.Put(block.MergeMiningTag().RootHash, block)
}
@ -552,7 +552,7 @@ func (c *SideChain) verifyLoop(blockToVerify *PoolBlock) (err error) {
block.Verified.Store(true)
block.Invalid.Store(false)
if block.ShareVersion() > ShareVersion_V1 {
if block.ShareVersion() >= ShareVersion_V2 {
utils.Logf("SideChain", "verified block at height = %d, depth = %d, id = %s, mainchain height = %d, mined by %s via %s %s", block.Side.Height, block.Depth.Load(), block.SideTemplateId(c.Consensus()), block.Main.Coinbase.GenHeight, block.GetAddress().ToBase58(c.Consensus().NetworkType.AddressNetwork()), block.Side.ExtraBuffer.SoftwareId, block.Side.ExtraBuffer.SoftwareVersion)
} else {
if signalingVersion := block.ShareVersionSignaling(); signalingVersion > ShareVersion_None {
@ -627,7 +627,7 @@ func (c *SideChain) verifyBlock(block *PoolBlock) (verification error, invalid e
len(block.Side.Uncles) != 0 ||
block.Side.Difficulty.Cmp64(c.Consensus().MinimumDifficulty) != 0 ||
block.Side.CumulativeDifficulty.Cmp64(c.Consensus().MinimumDifficulty) != 0 ||
(block.ShareVersion() > ShareVersion_V1 && block.Side.CoinbasePrivateKeySeed != c.Consensus().Id) {
(block.ShareVersion() >= ShareVersion_V2 && block.Side.CoinbasePrivateKeySeed != c.Consensus().Id) {
return nil, errors.New("genesis block has invalid parameters")
}
//this does not verify coinbase outputs, but that's fine
@ -660,7 +660,7 @@ func (c *SideChain) verifyBlock(block *PoolBlock) (verification error, invalid e
return nil, errors.New("parent is invalid")
}
if block.ShareVersion() > ShareVersion_V1 {
if block.ShareVersion() >= ShareVersion_V2 {
expectedSeed := parent.Side.CoinbasePrivateKeySeed
if parent.Main.PreviousId != block.Main.PreviousId {
expectedSeed = parent.CalculateTransactionPrivateKeySeed()
@ -1037,7 +1037,7 @@ func (c *SideChain) pruneOldBlocks() {
utils.Logf("SideChain", "blocksByHeight and blocksByTemplateId are inconsistent at height = %d, id = %s", height, templateId)
}
if block.ShareVersion() > ShareVersion_V2 {
if block.ShareVersion() >= ShareVersion_V3 {
rootHash := block.MergeMiningTag().RootHash
if c.blocksByMerkleRoot.Has(rootHash) {
c.blocksByMerkleRoot.Delete(rootHash)

View file

@ -250,13 +250,19 @@ func TestMain(m *testing.M) {
if isBenchmark {
benchLoadedSideChain = NewSideChain(GetFakeTestServer(ConsensusDefault))
f, err := os.Open("testdata/sidechain_dump.dat")
f, err := os.Open("testdata/sidechain_dump.dat.gz")
if err != nil {
panic(err)
}
defer f.Close()
testSideChain(benchLoadedSideChain, nil, f, 4957203, 2870010)
r, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
defer r.Close()
testSideChain(benchLoadedSideChain, nil, r, 4957203, 2870010)
tip := benchLoadedSideChain.GetChainTip()

View file

@ -10,20 +10,26 @@ import (
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"io"
"math"
)
// MaxMerkleProofSize Maximum number of proofs in field
// TODO: generate this from merkle proof parameters and slots?
const MaxMerkleProofSize = 7
const MaxUncleCount = uint64(math.MaxUint64) / types.HashSize
type SideData struct {
PublicKey address.PackedAddress `json:"public_key"`
CoinbasePrivateKeySeed types.Hash `json:"coinbase_private_key_seed,omitempty"`
// CoinbasePrivateKey filled or calculated on decoding
CoinbasePrivateKey crypto.PrivateKeyBytes `json:"coinbase_private_key"`
Parent types.Hash `json:"parent"`
Uncles []types.Hash `json:"uncles,omitempty"`
Height uint64 `json:"height"`
Difficulty types.Difficulty `json:"difficulty"`
CumulativeDifficulty types.Difficulty `json:"cumulative_difficulty"`
CoinbasePrivateKey crypto.PrivateKeyBytes `json:"coinbase_private_key"`
// Parent Template Id of the parent of this share, or zero if genesis
Parent types.Hash `json:"parent"`
// Uncles List of Template Ids of the uncles this share contains
Uncles []types.Hash `json:"uncles,omitempty"`
Height uint64 `json:"height"`
Difficulty types.Difficulty `json:"difficulty"`
CumulativeDifficulty types.Difficulty `json:"cumulative_difficulty"`
// MerkleProof Merkle proof for merge mining, available in ShareVersion ShareVersion_V3 and above
MerkleProof crypto.MerkleProof `json:"merkle_proof,omitempty"`
@ -40,8 +46,7 @@ type SideDataExtraBuffer struct {
}
func (b *SideData) BufferLength(version ShareVersion) (size int) {
size = crypto.PublicKeySize +
crypto.PublicKeySize +
size = crypto.PublicKeySize*2 +
types.HashSize +
crypto.PrivateKeySize +
utils.UVarInt64Size(len(b.Uncles)) + len(b.Uncles)*types.HashSize +
@ -49,11 +54,11 @@ func (b *SideData) BufferLength(version ShareVersion) (size int) {
utils.UVarInt64Size(b.Difficulty.Lo) + utils.UVarInt64Size(b.Difficulty.Hi) +
utils.UVarInt64Size(b.CumulativeDifficulty.Lo) + utils.UVarInt64Size(b.CumulativeDifficulty.Hi)
if version > ShareVersion_V1 {
if version >= ShareVersion_V2 {
// ExtraBuffer
size += 4 * 4
}
if version > ShareVersion_V2 {
if version >= ShareVersion_V3 {
// MerkleProof
size += utils.UVarInt64Size(len(b.MerkleProof)) + len(b.MerkleProof)*types.HashSize
}
@ -69,7 +74,7 @@ func (b *SideData) AppendBinary(preAllocatedBuf []byte, version ShareVersion) (b
buf = preAllocatedBuf
buf = append(buf, b.PublicKey[address.PackedAddressSpend][:]...)
buf = append(buf, b.PublicKey[address.PackedAddressView][:]...)
if version > ShareVersion_V1 {
if version >= ShareVersion_V2 {
buf = append(buf, b.CoinbasePrivateKeySeed[:]...)
} else {
buf = append(buf, b.CoinbasePrivateKey[:]...)
@ -85,7 +90,7 @@ func (b *SideData) AppendBinary(preAllocatedBuf []byte, version ShareVersion) (b
buf = binary.AppendUvarint(buf, b.CumulativeDifficulty.Lo)
buf = binary.AppendUvarint(buf, b.CumulativeDifficulty.Hi)
if version > ShareVersion_V2 {
if version >= ShareVersion_V3 {
if len(b.MerkleProof) > MaxMerkleProofSize {
return nil, fmt.Errorf("merkle proof too large: %d > %d", len(b.MerkleProof), MaxMerkleProofSize)
}
@ -95,7 +100,7 @@ func (b *SideData) AppendBinary(preAllocatedBuf []byte, version ShareVersion) (b
}
}
if version > ShareVersion_V1 {
if version >= ShareVersion_V2 {
buf = binary.LittleEndian.AppendUint32(buf, uint32(b.ExtraBuffer.SoftwareId))
buf = binary.LittleEndian.AppendUint32(buf, uint32(b.ExtraBuffer.SoftwareVersion))
buf = binary.LittleEndian.AppendUint32(buf, b.ExtraBuffer.RandomNumber)
@ -111,8 +116,8 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
uncleHash types.Hash
merkleProofSize uint8
merkleProofHash types.Hash
)
if _, err = io.ReadFull(reader, b.PublicKey[address.PackedAddressSpend][:]); err != nil {
return err
}
@ -120,8 +125,9 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
return err
}
if version > ShareVersion_V1 {
//needs preprocessing
if version >= ShareVersion_V2 {
// Read private key seed instead of private key. Only on ShareVersion_V2 and above
// needs preprocessing
if _, err = io.ReadFull(reader, b.CoinbasePrivateKeySeed[:]); err != nil {
return err
}
@ -130,25 +136,35 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
return err
}
}
if _, err = io.ReadFull(reader, b.Parent[:]); err != nil {
return err
}
if uncleCount, err = binary.ReadUvarint(reader); err != nil {
return err
}
} else if uncleCount > MaxUncleCount {
return fmt.Errorf("uncle count too large: %d > %d", uncleCount, MaxUncleCount)
} else if uncleCount > 0 {
// preallocate for append, with 64 as soft limit
b.Uncles = make([]types.Hash, 0, min(64, uncleCount))
for i := 0; i < int(uncleCount); i++ {
if _, err = io.ReadFull(reader, uncleHash[:]); err != nil {
return err
for i := 0; i < int(uncleCount); i++ {
if _, err = io.ReadFull(reader, uncleHash[:]); err != nil {
return err
}
b.Uncles = append(b.Uncles, uncleHash)
}
//TODO: check if copy is needed
b.Uncles = append(b.Uncles, uncleHash)
}
if b.Height, err = binary.ReadUvarint(reader); err != nil {
return err
}
if b.Height > PoolBlockMaxSideChainHeight {
return fmt.Errorf("side block height too high (%d > %d)", b.Height, PoolBlockMaxSideChainHeight)
}
{
if b.Difficulty.Lo, err = binary.ReadUvarint(reader); err != nil {
return err
@ -169,24 +185,30 @@ func (b *SideData) FromReader(reader utils.ReaderAndByteReader, version ShareVer
}
}
if version > ShareVersion_V2 {
if b.CumulativeDifficulty.Cmp(PoolBlockMaxCumulativeDifficulty) > 0 {
return fmt.Errorf("side block cumulative difficulty too large (%s > %s)", b.CumulativeDifficulty.StringNumeric(), PoolBlockMaxCumulativeDifficulty.StringNumeric())
}
// Read merkle proof list of hashes. Only on ShareVersion_V3 and above
if version >= ShareVersion_V3 {
if merkleProofSize, err = reader.ReadByte(); err != nil {
return err
}
if merkleProofSize > MaxMerkleProofSize {
return fmt.Errorf("merkle proof too large: %d > %d", len(b.MerkleProof), MaxMerkleProofSize)
}
b.MerkleProof = make(crypto.MerkleProof, 0, merkleProofSize)
} else if merkleProofSize > MaxMerkleProofSize {
return fmt.Errorf("merkle proof too large: %d > %d", merkleProofSize, MaxMerkleProofSize)
} else if merkleProofSize > 0 {
// preallocate
b.MerkleProof = make(crypto.MerkleProof, merkleProofSize)
for i := 0; i < int(merkleProofSize); i++ {
if _, err = io.ReadFull(reader, merkleProofHash[:]); err != nil {
return err
for i := 0; i < int(merkleProofSize); i++ {
if _, err = io.ReadFull(reader, b.MerkleProof[i][:]); err != nil {
return err
}
}
b.MerkleProof = append(b.MerkleProof, merkleProofHash)
}
}
if version > ShareVersion_V1 {
// Read share extra buffer. Only on ShareVersion_V2 and above
if version >= ShareVersion_V2 {
if err = binary.Read(reader, binary.LittleEndian, &b.ExtraBuffer.SoftwareId); err != nil {
return fmt.Errorf("within extra buffer: %w", err)
}

View file

@ -10,7 +10,6 @@ import (
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"git.gammaspectra.live/P2Pool/consensus/v3/utils"
"git.gammaspectra.live/P2Pool/sha3"
"math"
"math/bits"
"slices"
)
@ -104,7 +103,7 @@ func IterateBlocksInPPLNSWindow(tip *PoolBlock, consensus *Consensus, difficulty
maxPplnsWeight := types.MaxDifficulty
if sidechainVersion > ShareVersion_V1 {
if sidechainVersion >= ShareVersion_V2 {
maxPplnsWeight = mainchainDiff.Mul64(2)
}
@ -201,7 +200,7 @@ func BlocksInPPLNSWindow(tip *PoolBlock, consensus *Consensus, difficultyByHeigh
maxPplnsWeight := types.MaxDifficulty
if sidechainVersion > ShareVersion_V1 {
if sidechainVersion >= ShareVersion_V2 {
maxPplnsWeight = mainchainDiff.Mul64(2)
}
@ -318,7 +317,7 @@ func ShuffleShares[T any](shares []T, shareVersion ShareVersion, privateKeySeed
// ShuffleSequence Iterates through a swap sequence according to consensus parameters.
func ShuffleSequence(shareVersion ShareVersion, privateKeySeed types.Hash, items int, swap func(i, j int)) {
n := uint64(items)
if shareVersion > ShareVersion_V1 && n > 1 {
if shareVersion >= ShareVersion_V2 && n > 1 {
seed := crypto.PooledKeccak256(privateKeySeed[:]).Uint64()
if seed == 0 {
@ -428,17 +427,18 @@ func NextDifficulty(consensus *Consensus, timestamps []uint64, difficultyData []
deltaTimestamp = timestampUpperBound - timestampLowerBound
}
var minDifficulty = types.Difficulty{Hi: math.MaxUint64, Lo: math.MaxUint64}
var maxDifficulty types.Difficulty
minDifficulty := types.MaxDifficulty
maxDifficulty := types.ZeroDifficulty
for i := range difficultyData {
dd := &difficultyData[i]
// Pick only the cumulative difficulty from specifically the entries that are within the timestamp upper and low bounds
if timestampLowerBound <= difficultyData[i].timestamp && difficultyData[i].timestamp <= timestampUpperBound {
if minDifficulty.Cmp(difficultyData[i].cumulativeDifficulty) > 0 {
minDifficulty = difficultyData[i].cumulativeDifficulty
if timestampLowerBound <= dd.timestamp && dd.timestamp <= timestampUpperBound {
if minDifficulty.Cmp(dd.cumulativeDifficulty) > 0 {
minDifficulty = dd.cumulativeDifficulty
}
if maxDifficulty.Cmp(difficultyData[i].cumulativeDifficulty) < 0 {
maxDifficulty = difficultyData[i].cumulativeDifficulty
if maxDifficulty.Cmp(dd.cumulativeDifficulty) < 0 {
maxDifficulty = dd.cumulativeDifficulty
}
}
}
@ -472,6 +472,7 @@ func SplitRewardAllocate(reward uint64, shares Shares) (rewards []uint64) {
func SplitReward(preAllocatedRewards []uint64, reward uint64, shares Shares) (rewards []uint64) {
var totalWeight types.Difficulty
for i := range shares {
totalWeight = totalWeight.Add(shares[i].Weight)
}
@ -481,16 +482,29 @@ func SplitReward(preAllocatedRewards []uint64, reward uint64, shares Shares) (re
return nil
}
rewards = preAllocatedRewards[:0]
var w types.Difficulty
var rewardGiven uint64
for _, share := range shares {
w = w.Add(share.Weight)
nextValue := w.Mul64(reward).Div(totalWeight)
rewards = append(rewards, nextValue.Lo-rewardGiven)
rewardGiven = nextValue.Lo
rewards = slices.Grow(preAllocatedRewards, len(shares))[:len(shares)]
if totalWeight.Hi == 0 {
//fast path for 64-bit ops
var w, hi, lo uint64
for i, share := range shares {
w += share.Weight.Lo
hi, lo = bits.Mul64(w, reward)
//nextValue
_, lo = utils.Div128(hi, lo, totalWeight.Lo)
rewards[i] = lo - rewardGiven
rewardGiven = lo
}
} else {
var w types.Difficulty
for i, share := range shares {
w = w.Add(share.Weight)
nextValue := w.Mul64(reward).Div(totalWeight)
rewards[i] = nextValue.Lo - rewardGiven
rewardGiven = nextValue.Lo
}
}
// Double check that we gave out the exact amount

100
p2pool/stratum/mapping.go Normal file
View file

@ -0,0 +1,100 @@
package stratum
import (
"git.gammaspectra.live/P2Pool/consensus/v3/p2pool/sidechain"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
"slices"
)
const ShuffleMappingZeroKeyIndex = 0
type ShuffleMapping struct {
// Including the index mapping contains a new miner in the list
Including []int
// Excluding the index mapping doesn't contain a new miner.
// len(Excluding) = len(Including) - 1 (unless len(Including) == 1, where it's also 1)
Excluding []int
}
// BuildShuffleMapping Creates a mapping of source to destination miner output post shuffle
// This uses two mappings, one where a new miner is added to the list, and one where the count stays the same
// Usual usage will place Zero key in index 0
func BuildShuffleMapping(n int, shareVersion sidechain.ShareVersion, transactionPrivateKeySeed types.Hash, oldMappings ShuffleMapping) (mappings ShuffleMapping) {
if n <= 1 {
return ShuffleMapping{
Including: []int{0},
Excluding: []int{0},
}
}
shuffleSequence1 := make([]int, n)
for i := range shuffleSequence1 {
shuffleSequence1[i] = i
}
shuffleSequence2 := make([]int, n-1)
for i := range shuffleSequence2 {
shuffleSequence2[i] = i
}
sidechain.ShuffleSequence(shareVersion, transactionPrivateKeySeed, n, func(i, j int) {
shuffleSequence1[i], shuffleSequence1[j] = shuffleSequence1[j], shuffleSequence1[i]
})
sidechain.ShuffleSequence(shareVersion, transactionPrivateKeySeed, n-1, func(i, j int) {
shuffleSequence2[i], shuffleSequence2[j] = shuffleSequence2[j], shuffleSequence2[i]
})
mappings.Including = slices.Grow(oldMappings.Including, n)[:n]
mappings.Excluding = slices.Grow(oldMappings.Excluding, n-1)[:n-1]
for i := range shuffleSequence1 {
mappings.Including[shuffleSequence1[i]] = i
}
//Flip
for i := range shuffleSequence2 {
mappings.Excluding[shuffleSequence2[i]] = i
}
return mappings
}
// ApplyShuffleMapping Applies a shuffle mapping depending on source length
// Returns nil in case no source length matches shuffle mapping
func ApplyShuffleMapping[T any](v []T, mappings ShuffleMapping) []T {
n := len(v)
result := make([]T, n)
if n == len(mappings.Including) {
for i := range v {
result[mappings.Including[i]] = v[i]
}
} else if n == len(mappings.Excluding) {
for i := range v {
result[mappings.Excluding[i]] = v[i]
}
} else {
return nil
}
return result
}
type ShuffleMappingIndices [][3]int
func (m ShuffleMapping) RangePossibleIndices(f func(i, ix0, ix1, ix2 int)) {
n := len(m.Including)
var ix0, ix1, ix2 int
for i := 0; i < n; i++ {
// Count with all + miner
ix0 = m.Including[i]
if i > ShuffleMappingZeroKeyIndex {
// Count with all + miner shifted to a slot before
ix1 = m.Including[i-1]
// Count with all miners minus one
ix2 = m.Including[i-1]
} else {
ix1 = -1
ix2 = -1
}
f(i, ix0, ix1, ix2)
}
}

View file

@ -0,0 +1,40 @@
package stratum
import (
"git.gammaspectra.live/P2Pool/consensus/v3/p2pool/sidechain"
"slices"
"testing"
)
func TestShuffleMapping(t *testing.T) {
const n = 16
// Shuffle only exists on ShareVersion_V2 and above
// TODO: whenever different consensus shuffle is added, add test for it
const shareVersion = sidechain.ShareVersion_V2
var seed = zeroExtraBaseRCTHash
mappings := BuildShuffleMapping(n, shareVersion, seed, ShuffleMapping{})
seq := make([]int, n)
for i := range seq {
seq[i] = i
}
seq1 := slices.Clone(seq)
//test that regular shuffle will correspond to a mapping applied shuffle
sidechain.ShuffleShares(seq1, shareVersion, seed)
seq2 := ApplyShuffleMapping(seq, mappings)
if slices.Compare(seq1, seq2) != 0 {
for i := range seq1 {
if seq1[i] != seq2[i] {
t.Logf("%d %d *** @ %d", seq1[i], seq2[i], i)
} else {
t.Logf("%d %d @ %d", seq1[i], seq2[i], i)
}
}
t.Fatal()
}
}

View file

@ -56,7 +56,7 @@ type NewTemplateData struct {
Window struct {
ReservedShareIndex int
Shares sidechain.Shares
ShuffleMapping [2][]int
ShuffleMapping ShuffleMapping
EphemeralPubKeyCache map[ephemeralPubKeyCacheKey]*ephemeralPubKeyCacheEntry
}
}
@ -324,7 +324,7 @@ func (s *Server) fillNewTemplateData(currentDifficulty types.Difficulty) error {
}
s.newTemplateData.TotalReward = finalReward
s.newTemplateData.Window.ShuffleMapping = BuildShuffleMapping(len(s.newTemplateData.Window.Shares), s.newTemplateData.ShareVersion, s.newTemplateData.TransactionPrivateKeySeed)
s.newTemplateData.Window.ShuffleMapping = BuildShuffleMapping(len(s.newTemplateData.Window.Shares), s.newTemplateData.ShareVersion, s.newTemplateData.TransactionPrivateKeySeed, s.newTemplateData.Window.ShuffleMapping)
s.newTemplateData.Window.EphemeralPubKeyCache = make(map[ephemeralPubKeyCacheKey]*ephemeralPubKeyCacheEntry)
@ -334,29 +334,34 @@ func (s *Server) fillNewTemplateData(currentDifficulty types.Difficulty) error {
//TODO: parallelize this
hasher := crypto.GetKeccak256Hasher()
defer crypto.PutKeccak256Hasher(hasher)
for i, m := range PossibleIndicesForShuffleMapping(s.newTemplateData.Window.ShuffleMapping) {
if i == 0 {
// Skip zero key
continue
}
share := s.newTemplateData.Window.Shares[i]
var k ephemeralPubKeyCacheKey
copy(k[:], share.Address.Bytes())
// generate ephemeral pubkeys based on indices
for _, index := range m {
if index == -1 {
continue
}
binary.LittleEndian.PutUint64(k[crypto.PublicKeySize*2:], uint64(index))
if e, ok := oldPubKeyCache[k]; ok {
s.newTemplateData.Window.EphemeralPubKeyCache[k] = e
} else {
var e ephemeralPubKeyCacheEntry
e.PublicKey, e.ViewTag = s.sidechain.DerivationCache().GetEphemeralPublicKey(&share.Address, txPrivateKeySlice, txPrivateKeyScalar, uint64(index), hasher)
s.newTemplateData.Window.EphemeralPubKeyCache[k] = &e
}
var tempPubKey ephemeralPubKeyCacheKey
generateEphPubKeyForIndex := func(index int, addr *address.PackedAddress) {
if index == -1 {
return
}
copy(tempPubKey[:], addr.Bytes())
binary.LittleEndian.PutUint64(tempPubKey[crypto.PublicKeySize*2:], uint64(index))
if e, ok := oldPubKeyCache[tempPubKey]; ok {
s.newTemplateData.Window.EphemeralPubKeyCache[tempPubKey] = e
} else {
var e ephemeralPubKeyCacheEntry
e.PublicKey, e.ViewTag = s.sidechain.DerivationCache().GetEphemeralPublicKey(addr, txPrivateKeySlice, txPrivateKeyScalar, uint64(index), hasher)
s.newTemplateData.Window.EphemeralPubKeyCache[tempPubKey] = &e
}
}
s.newTemplateData.Window.ShuffleMapping.RangePossibleIndices(func(i int, ix0, ix1, ix2 int) {
if i == ShuffleMappingZeroKeyIndex {
// Skip zero key
return
}
share := s.newTemplateData.Window.Shares[i]
generateEphPubKeyForIndex(ix0, &share.Address)
generateEphPubKeyForIndex(ix1, &share.Address)
generateEphPubKeyForIndex(ix2, &share.Address)
})
s.newTemplateData.Ready = true
@ -364,78 +369,6 @@ func (s *Server) fillNewTemplateData(currentDifficulty types.Difficulty) error {
}
func BuildShuffleMapping(n int, shareVersion sidechain.ShareVersion, transactionPrivateKeySeed types.Hash) (mappings [2][]int) {
if n <= 1 {
return [2][]int{{0}, {0}}
}
shuffleSequence1 := make([]int, n)
for i := range shuffleSequence1 {
shuffleSequence1[i] = i
}
shuffleSequence2 := make([]int, n-1)
for i := range shuffleSequence2 {
shuffleSequence2[i] = i
}
sidechain.ShuffleSequence(shareVersion, transactionPrivateKeySeed, n, func(i, j int) {
shuffleSequence1[i], shuffleSequence1[j] = shuffleSequence1[j], shuffleSequence1[i]
})
sidechain.ShuffleSequence(shareVersion, transactionPrivateKeySeed, n-1, func(i, j int) {
shuffleSequence2[i], shuffleSequence2[j] = shuffleSequence2[j], shuffleSequence2[i]
})
mappings[0] = make([]int, n)
mappings[1] = make([]int, n-1)
//Flip
for i := range shuffleSequence1 {
mappings[0][shuffleSequence1[i]] = i
}
for i := range shuffleSequence2 {
mappings[1][shuffleSequence2[i]] = i
}
return mappings
}
func ApplyShuffleMapping[T any](v []T, mappings [2][]int) []T {
n := len(v)
result := make([]T, n)
if n == len(mappings[0]) {
for i := range v {
result[mappings[0][i]] = v[i]
}
} else if n == len(mappings[1]) {
for i := range v {
result[mappings[1][i]] = v[i]
}
}
return result
}
func PossibleIndicesForShuffleMapping(mappings [2][]int) [][3]int {
n := len(mappings[0])
result := make([][3]int, n)
for i := 0; i < n; i++ {
// Count with all + miner
result[i][0] = mappings[0][i]
if i > 0 {
// Count with all + miner shifted to a slot before
result[i][1] = mappings[0][i-1]
// Count with all miners minus one
result[i][2] = mappings[1][i-1]
} else {
result[i][1] = -1
result[i][2] = -1
}
}
return result
}
func (s *Server) BuildTemplate(addr address.PackedAddress, forceNewTemplate bool) (tpl *Template, jobCounter uint64, difficultyTarget types.Difficulty, seedHash types.Hash, err error) {
var zeroAddress address.PackedAddress
@ -970,6 +903,8 @@ func (s *Server) Listen(listen string) error {
RpcId: rpcId,
Conn: conn,
decoder: utils.NewJSONDecoder(conn),
// Default to donation address if not specified
Address: address.FromBase58(types.DonationAddress).ToPackedAddress(),
}
// Use deadline
@ -1025,6 +960,7 @@ func (s *Server) Listen(listen string) error {
client.RigId = str
}
if str, ok := m["login"].(string); ok {
//TODO: support merge mining addresses
a := address.FromBase58(str)
if a != nil && a.Network == addressNetwork {
client.Address = a.ToPackedAddress()

View file

@ -3,6 +3,7 @@ package stratum
import (
"compress/gzip"
"fmt"
"git.gammaspectra.live/P2Pool/consensus/v3/monero"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/address"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/client"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/crypto"
@ -14,7 +15,6 @@ import (
"os"
"path"
"runtime"
"slices"
"testing"
"time"
_ "unsafe"
@ -24,6 +24,14 @@ var preLoadedMiniSideChain *sidechain.SideChain
var preLoadedPoolBlock *sidechain.PoolBlock
var submitBlockFunc = func(block *sidechain.PoolBlock) (err error) {
if blob, err := block.MarshalBinary(); err == nil {
_, err = client.GetDefaultClient().SubmitBlock(blob)
return err
}
return err
}
func init() {
utils.GlobalLogLevel = 0
@ -60,6 +68,8 @@ func getMinerData() *p2pooltypes.MinerData {
}
func TestMain(m *testing.M) {
client.SetDefaultClientSettings(os.Getenv("MONEROD_RPC_URL"))
if buf, err := os.ReadFile("testdata/block.dat"); err != nil {
panic(err)
} else {
@ -70,7 +80,6 @@ func TestMain(m *testing.M) {
}
_ = sidechain.ConsensusMini.InitHasher(2)
client.SetDefaultClientSettings(os.Getenv("MONEROD_RPC_URL"))
preLoadedMiniSideChain = sidechain.NewSideChain(sidechain.GetFakeTestServer(sidechain.ConsensusMini))
@ -96,9 +105,7 @@ func TestMain(m *testing.M) {
}
func TestStratumServer(t *testing.T) {
stratumServer := NewServer(preLoadedMiniSideChain, func(block *sidechain.PoolBlock) error {
return nil
})
stratumServer := NewServer(preLoadedMiniSideChain, submitBlockFunc)
minerData := getMinerData()
tip := preLoadedMiniSideChain.GetChainTip()
stratumServer.HandleMinerData(minerData)
@ -144,40 +151,85 @@ func TestStratumServer(t *testing.T) {
}
}
func TestShuffleMapping(t *testing.T) {
const n = 16
const shareVersion = sidechain.ShareVersion_V2
var seed = zeroExtraBaseRCTHash
mappings := BuildShuffleMapping(n, shareVersion, seed)
seq := make([]int, n)
for i := range seq {
seq[i] = i
func TestStratumServer_GenesisV2(t *testing.T) {
consensus := sidechain.NewConsensus(sidechain.NetworkMainnet, "test", "", "", 10, 1000, 100, 20)
consensus.HardForks = []monero.HardFork{
{uint8(sidechain.ShareVersion_V2), 0, 0, 0},
}
seq1 := slices.Clone(seq)
err := consensus.InitHasher(1)
if err != nil {
t.Fatal(err)
}
defer consensus.GetHasher().Close()
sidechain.ShuffleShares(seq1, shareVersion, seed)
seq2 := ApplyShuffleMapping(seq, mappings)
sideChain := sidechain.NewSideChain(sidechain.GetFakeTestServer(consensus))
if slices.Compare(seq1, seq2) != 0 {
for i := range seq1 {
if seq1[i] != seq2[i] {
t.Logf("%d %d *** @ %d", seq1[i], seq2[i], i)
} else {
t.Logf("%d %d @ %d", seq1[i], seq2[i], i)
stratumServer := NewServer(sideChain, submitBlockFunc)
minerData := getMinerData()
stratumServer.HandleMinerData(minerData)
func() {
//Process all incoming changes first
for {
select {
case f := <-stratumServer.incomingChanges:
if f() {
stratumServer.Update()
}
default:
return
}
}
}()
tpl, _, _, seedHash, err := stratumServer.BuildTemplate(address.FromBase58(types.DonationAddress).ToPackedAddress(), false)
if err != nil {
t.Fatal(err)
}
if seedHash != minerData.SeedHash {
t.Fatal()
}
if tpl.MainHeight != minerData.Height {
t.Fatal()
}
if tpl.MainParent != minerData.PrevId {
t.Fatal()
}
// verify genesis parameters
if tpl.SideHeight != 0 {
t.Fatal()
}
if tpl.SideParent != types.ZeroHash {
t.Fatal()
}
sideData, err := tpl.SideData(consensus)
if err != nil {
t.Fatal(err)
}
if sideData.CoinbasePrivateKeySeed != consensus.Id {
t.Fatal()
}
if sideData.CumulativeDifficulty.Cmp64(consensus.MinimumDifficulty) != 0 {
t.Fatal()
}
if sideData.Difficulty.Cmp64(consensus.MinimumDifficulty) != 0 {
t.Fatal()
}
}
func BenchmarkServer_FillTemplate(b *testing.B) {
stratumServer := NewServer(preLoadedMiniSideChain, func(block *sidechain.PoolBlock) error {
return nil
})
stratumServer := NewServer(preLoadedMiniSideChain, submitBlockFunc)
minerData := getMinerData()
tip := preLoadedMiniSideChain.GetChainTip()
stratumServer.minerData = minerData
@ -206,9 +258,7 @@ func BenchmarkServer_FillTemplate(b *testing.B) {
}
func BenchmarkServer_BuildTemplate(b *testing.B) {
stratumServer := NewServer(preLoadedMiniSideChain, func(block *sidechain.PoolBlock) error {
return nil
})
stratumServer := NewServer(preLoadedMiniSideChain, submitBlockFunc)
minerData := getMinerData()
tip := preLoadedMiniSideChain.GetChainTip()
stratumServer.minerData = minerData

View file

@ -3,6 +3,7 @@ package stratum
import (
"encoding/binary"
"errors"
mainblock "git.gammaspectra.live/P2Pool/consensus/v3/monero/block"
"git.gammaspectra.live/P2Pool/consensus/v3/monero/crypto"
"git.gammaspectra.live/P2Pool/consensus/v3/p2pool/sidechain"
"git.gammaspectra.live/P2Pool/consensus/v3/types"
@ -28,6 +29,9 @@ type Template struct {
// TransactionsOffset Start of transactions section
TransactionsOffset int
// TemplateSideDataOffset Start of side data section
TemplateSideDataOffset int
// TemplateExtraBufferOffset offset of 4*uint32
TemplateExtraBufferOffset int
@ -44,6 +48,7 @@ type Template struct {
func (tpl *Template) Write(writer io.Writer, nonce, extraNonce, sideRandomNumber, sideExtraNonce uint32, templateId types.Hash) error {
var uint32Buf [4]byte
// write main data just before nonce
if _, err := writer.Write(tpl.Buffer[:tpl.NonceOffset]); err != nil {
return err
}
@ -53,6 +58,7 @@ func (tpl *Template) Write(writer io.Writer, nonce, extraNonce, sideRandomNumber
return err
}
// write main data just before extra nonce in coinbase
if _, err := writer.Write(tpl.Buffer[tpl.NonceOffset+4 : tpl.ExtraNonceOffset]); err != nil {
return err
}
@ -62,12 +68,17 @@ func (tpl *Template) Write(writer io.Writer, nonce, extraNonce, sideRandomNumber
return err
}
// write remaining main data, then write side data just before merge mining tag in coinbase
if _, err := writer.Write(tpl.Buffer[tpl.ExtraNonceOffset+4 : tpl.TemplateIdOffset]); err != nil {
return err
}
//todo: support merge mining merkle root hash
if _, err := writer.Write(templateId[:]); err != nil {
return err
}
// write main data and side data up to the end of side data extra
if _, err := writer.Write(tpl.Buffer[tpl.TemplateIdOffset+types.HashSize : tpl.TemplateExtraBufferOffset+4*2]); err != nil {
return err
}
@ -112,6 +123,22 @@ func (tpl *Template) TemplateId(hasher *sha3.HasherState, preAllocatedBuffer []b
hasher.Reset()
}
func (tpl *Template) MainBlock() (b mainblock.Block, err error) {
err = b.UnmarshalBinary(tpl.Buffer, false, nil)
if err != nil {
return b, err
}
return b, nil
}
func (tpl *Template) SideData(consensus *sidechain.Consensus) (d sidechain.SideData, err error) {
err = d.UnmarshalBinary(tpl.Buffer[tpl.TemplateSideDataOffset:], tpl.ShareVersion(consensus))
if err != nil {
return d, err
}
return d, nil
}
func (tpl *Template) Timestamp() uint64 {
t, _ := binary.Uvarint(tpl.Buffer[2:])
return t
@ -240,6 +267,7 @@ func TemplateFromPoolBlock(b *sidechain.PoolBlock) (tpl *Template, err error) {
tpl.ExtraNonceOffset = tpl.NonceOffset + 4 + (coinbaseLength - (b.Main.Coinbase.Extra[1].BufferLength() + b.Main.Coinbase.Extra[2].BufferLength() + 1)) + 1 + utils.UVarInt64Size(b.Main.Coinbase.Extra[1].VarInt)
tpl.TemplateIdOffset = tpl.NonceOffset + 4 + (coinbaseLength - (b.Main.Coinbase.Extra[2].BufferLength() + 1)) + 1 + utils.UVarInt64Size(b.Main.Coinbase.Extra[2].VarInt)
tpl.TemplateSideDataOffset = mainBufferLength
tpl.TemplateExtraBufferOffset = totalLen - 4*4
// Set places to zeroes where necessary

View file

@ -53,6 +53,39 @@ func TestDifficulty_CheckPoW(t *testing.T) {
}
}
func TestDifficulty_CheckPoW_Native(t *testing.T) {
if !moneroDifficulty.CheckPoW_Native(powHash) {
t.Errorf("%s does not pass PoW %s", powHash, moneroDifficulty)
}
if !sidechainDifficulty.CheckPoW_Native(powHash) {
t.Errorf("%s does not pass PoW %s", powHash, sidechainDifficulty)
}
if !powDifficulty.CheckPoW_Native(powHash) {
t.Errorf("%s does not pass PoW %s", powHash, powDifficulty)
}
powHash2 := powHash
powHash2[len(powHash2)-1]++
if moneroDifficulty.CheckPoW_Native(powHash2) {
t.Errorf("%s does pass PoW %s incorrectly", powHash2, moneroDifficulty)
}
if sidechainDifficulty.CheckPoW_Native(powHash2) {
t.Errorf("%s does pass PoW %s incorrectly", powHash2, sidechainDifficulty)
}
powHash3 := powHash
powHash3[len(powHash2)-9]++
if powDifficulty.CheckPoW_Native(powHash3) {
t.Errorf("%s does pass PoW %s incorrectly", powHash3, powDifficulty)
}
}
func BenchmarkDifficulty_CheckPoW(b *testing.B) {
b.ReportAllocs()

View file

@ -11,11 +11,11 @@ func PreviousPowerOfTwo(x uint64) int {
if x == 0 {
return 0
}
return 1 << (64 - bits.LeadingZeros64(x) - 1)
return 1 << (bits.Len64(x) - 1)
}
const (
VarIntLen1 uint64 = 1 << ((iota + 1) * 7)
VarIntLen1 = uint64(1 << ((iota + 1) * 7))
VarIntLen2
VarIntLen3
VarIntLen4
@ -57,7 +57,7 @@ func UVarInt64SliceSize[T uint64 | int](v []T) (n int) {
return
}
func UVarInt64Size[T uint64 | int](v T) (n int) {
func UVarInt64Size[T uint64 | int | uint8](v T) (n int) {
x := uint64(v)
if x < VarIntLen1 {

13
utils/uint128.go Normal file
View file

@ -0,0 +1,13 @@
package utils
import "math/bits"
func Div128(hi, lo, y uint64) (hiQuo, loQuo uint64) {
if hi < y {
loQuo, _ = bits.Div64(hi, lo, y)
} else {
hiQuo, loQuo = bits.Div64(0, hi, y)
loQuo, _ = bits.Div64(loQuo, lo, y)
}
return
}