Support large pages, implement aligned / paged / large paged allocators
Some checks failed
continuous-integration/drone/push Build is failing

This commit is contained in:
DataHoarder 2024-05-02 16:11:08 +02:00
parent 9aa3631f37
commit faf152cb5e
Signed by: DataHoarder
SSH key fingerprint: SHA256:OLTRf6Fl87G52SiR7sWLGNzlJt4WOX+tfI2yxo0z7xk
21 changed files with 449 additions and 180 deletions

View file

@ -1,7 +0,0 @@
package randomx
func assertAlignedTo16(ptr uintptr) {
if ptr&0b1111 != 0 {
panic("not aligned to 16")
}
}

View file

@ -1,15 +1,16 @@
package randomx
import (
"errors"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/argon2"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/blake2"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/keys"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
"runtime"
"slices"
"unsafe"
)
type MemoryBlock [128]uint64
type MemoryBlock [argon2.BlockSize / 8]uint64
func (m *MemoryBlock) GetLine(addr uint64) *RegisterLine {
addr >>= 3
@ -17,7 +18,7 @@ func (m *MemoryBlock) GetLine(addr uint64) *RegisterLine {
}
type Cache struct {
blocks []MemoryBlock
blocks *[RANDOMX_ARGON_MEMORY]MemoryBlock
programs [RANDOMX_PROGRAM_COUNT]SuperScalarProgram
@ -44,10 +45,30 @@ type Cache struct {
// * (2) the RANDOMX_FLAG_JIT is set and JIT compilation is not supported on the current platform
// * (3) an invalid or unsupported RANDOMX_FLAG_ARGON2 value is set
// */
func NewCache(flags Flags) *Cache {
return &Cache{
flags: flags,
func NewCache(flags Flags) (c *Cache, err error) {
var blocks *[RANDOMX_ARGON_MEMORY]MemoryBlock
if flags.Has(RANDOMX_FLAG_LARGE_PAGES) {
if largePageAllocator == nil {
return nil, errors.New("huge pages not supported")
}
blocks, err = memory.Allocate[[RANDOMX_ARGON_MEMORY]MemoryBlock](largePageAllocator)
if err != nil {
return nil, err
}
} else {
blocks, err = memory.Allocate[[RANDOMX_ARGON_MEMORY]MemoryBlock](cacheLineAlignedAllocator)
if err != nil {
return nil, err
}
}
return &Cache{
flags: flags,
blocks: blocks,
}, nil
}
func (c *Cache) hasInitializedJIT() bool {
@ -64,24 +85,26 @@ func (c *Cache) Close() error {
}
}
}
return nil
if c.flags.Has(RANDOMX_FLAG_LARGE_PAGES) {
return memory.Free(largePageAllocator, c.blocks)
} else {
return memory.Free(cacheLineAlignedAllocator, c.blocks)
}
}
// Init Initializes the cache memory and SuperscalarHash using the provided key value.
// Does nothing if called again with the same key value.
func (c *Cache) Init(key []byte) {
//TODO: cache key and do not regenerate
kkey := slices.Clone(key)
argonBlocks := unsafe.Slice((*argon2.Block)(unsafe.Pointer(c.blocks)), len(c.blocks))
argonBlocks := argon2.BuildBlocks(kkey, []byte(RANDOMX_ARGON_SALT), RANDOMX_ARGON_ITERATIONS, RANDOMX_ARGON_MEMORY, RANDOMX_ARGON_LANES)
memoryBlocks := unsafe.Slice((*MemoryBlock)(unsafe.Pointer(unsafe.SliceData(argonBlocks))), int(unsafe.Sizeof(argon2.Block{}))/int(unsafe.Sizeof(MemoryBlock{}))*len(argonBlocks))
c.blocks = memoryBlocks
argon2.BuildBlocks(argonBlocks, key, []byte(RANDOMX_ARGON_SALT), RANDOMX_ARGON_ITERATIONS, RANDOMX_ARGON_MEMORY, RANDOMX_ARGON_LANES)
const nonce uint32 = 0
gen := blake2.New(kkey, nonce)
gen := blake2.New(key, nonce)
for i := range c.programs {
// build a superscalar program
prog := BuildSuperScalarProgram(gen)
@ -91,6 +114,8 @@ func (c *Cache) Init(key []byte) {
// fallback if can't compile program
if c.jitPrograms[i] == nil {
c.programs[i] = prog
} else if err := memory.PageReadExecute(c.jitPrograms[i]); err != nil {
c.programs[i] = prog
} else {
c.programs[i] = SuperScalarProgram{prog[0]}
}
@ -112,7 +137,7 @@ func (c *Cache) getMixBlock(addr uint64) *RegisterLine {
return c.blocks[block].GetLine(addr % 1024)
}
func (c *Cache) GetMemory() []MemoryBlock {
func (c *Cache) GetMemory() *[RANDOMX_ARGON_MEMORY]MemoryBlock {
return c.blocks
}

View file

@ -5,7 +5,11 @@ import "testing"
func Test_Cache_Init(t *testing.T) {
t.Parallel()
cache := NewCache(GetFlags())
cache, err := NewCache(GetFlags())
if err != nil {
t.Fatal(err)
}
defer cache.Close()
cache.Init(Tests[1].key)
memory := cache.GetMemory()
@ -47,7 +51,11 @@ func Test_Cache_InitDataset(t *testing.T) {
flags := GetFlags()
flags &^= RANDOMX_FLAG_JIT
cache := NewCache(flags)
cache, err := NewCache(flags)
if err != nil {
t.Fatal(err)
}
defer cache.Close()
cache.Init(Tests[1].key)
var datasetItem RegisterLine
@ -70,7 +78,11 @@ func Test_Cache_InitDataset(t *testing.T) {
t.Skip("not supported on this platform")
}
cache := NewCache(flags)
cache, err := NewCache(flags)
if err != nil {
t.Fatal(err)
}
defer cache.Close()
cache.Init(Tests[1].key)
if !cache.hasInitializedJIT() {
t.Skip("not supported on this platform")

View file

@ -8,7 +8,10 @@ import (
func Test_CalculateCommitment(t *testing.T) {
t.Parallel()
cache := NewCache(GetFlags())
cache, err := NewCache(GetFlags())
if err != nil {
t.Fatal(err)
}
defer cache.Close()
test := Tests[1]

View file

@ -29,7 +29,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package randomx
import "git.gammaspectra.live/P2Pool/go-randomx/v3/internal/argon2"
import (
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/argon2"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
)
// see reference configuration.h
// Cache size in KiB. Must be a power of 2.
@ -111,3 +114,7 @@ const STOREL3CONDITION = 14
func isZeroOrPowerOf2(x uint32) bool {
return (x & (x - 1)) == 0
}
var largePageAllocator = memory.NewLargePageAllocator()
var pageAllocator = memory.NewPageAllocator()
var cacheLineAlignedAllocator = memory.NewAlignedAllocator(CacheLineSize)

View file

@ -2,8 +2,8 @@ package randomx
import (
"errors"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
"sync"
"unsafe"
)
const DatasetSize = RANDOMX_DATASET_BASE_SIZE + RANDOMX_DATASET_EXTRA_SIZE
@ -12,6 +12,7 @@ const DatasetItemCount = DatasetSize / CacheLineSize
type Dataset struct {
memory []RegisterLine
flags Flags
}
// NewDataset Creates a randomx_dataset structure and allocates memory for RandomX Dataset.
@ -30,14 +31,27 @@ func NewDataset(flags Flags) (result *Dataset, err error) {
}
}()
//todo: implement large pages, align allocation
alignedMemory := make([]RegisterLine, DatasetItemCount)
assertAlignedTo16(uintptr(unsafe.Pointer(unsafe.SliceData(alignedMemory))))
var alignedMemory []RegisterLine
//todo: err on not large pages
if flags.Has(RANDOMX_FLAG_LARGE_PAGES) {
if largePageAllocator == nil {
return nil, errors.New("huge pages not supported")
}
alignedMemory, err = memory.AllocateSlice[RegisterLine](largePageAllocator, DatasetItemCount)
if err != nil {
return nil, err
}
} else {
alignedMemory, err = memory.AllocateSlice[RegisterLine](cacheLineAlignedAllocator, DatasetItemCount)
if err != nil {
return nil, err
}
}
return &Dataset{
memory: alignedMemory,
flags: flags,
}, nil
}
@ -70,7 +84,11 @@ func (d *Dataset) InitDataset(cache *Cache, startItem, itemCount uint64) {
}
func (d *Dataset) Close() error {
return nil
if d.flags.Has(RANDOMX_FLAG_LARGE_PAGES) {
return memory.FreeSlice(largePageAllocator, d.memory)
} else {
return memory.FreeSlice(cacheLineAlignedAllocator, d.memory)
}
}
func (d *Dataset) InitDatasetParallel(cache *Cache, n int) {

10
exec.go
View file

@ -1,5 +1,15 @@
package randomx
import "git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
type SuperScalarProgramFunc []byte
type VMProgramFunc []byte
func (f SuperScalarProgramFunc) Close() error {
return memory.FreeSlice(pageAllocator, f)
}
func (f VMProgramFunc) Close() error {
return memory.FreeSlice(pageAllocator, f)
}

View file

@ -1,28 +0,0 @@
//go:build !unix || disable_jit || purego
package randomx
func (f SuperScalarProgramFunc) Close() error {
return nil
}
func (f VMProgramFunc) Close() error {
return nil
}
func mapProgram(program []byte, size int) []byte {
return nil
}
func mapProgramRW(execFunc []byte) {
}
func mapProgramRX(execFunc []byte) {
}
// mapProgramRWX insecure!
func mapProgramRWX(execFunc []byte) {
}

View file

@ -1,85 +0,0 @@
//go:build unix && !disable_jit && !purego
package randomx
import (
"golang.org/x/sys/unix"
)
func (f SuperScalarProgramFunc) Close() error {
return unix.Munmap(f)
}
func (f VMProgramFunc) Close() error {
return unix.Munmap(f)
}
func mapProgramRW(execFunc []byte) {
err := unix.Mprotect(execFunc, unix.PROT_READ|unix.PROT_WRITE)
if err != nil {
defer func() {
// unmap if we err
err := unix.Munmap(execFunc)
if err != nil {
panic(err)
}
}()
panic(err)
}
}
func mapProgramRX(execFunc []byte) {
err := unix.Mprotect(execFunc, unix.PROT_READ|unix.PROT_EXEC)
if err != nil {
defer func() {
// unmap if we err
err := unix.Munmap(execFunc)
if err != nil {
panic(err)
}
}()
panic(err)
}
}
// mapProgramRWX insecure!
func mapProgramRWX(execFunc []byte) {
err := unix.Mprotect(execFunc, unix.PROT_READ|unix.PROT_WRITE|unix.PROT_EXEC)
if err != nil {
defer func() {
// unmap if we err
err := unix.Munmap(execFunc)
if err != nil {
panic(err)
}
}()
panic(err)
}
}
func mapProgram(program []byte, size int) []byte {
// Read and Write only
execFunc, err := unix.Mmap(-1, 0, max(size, len(program)), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANONYMOUS)
if err != nil {
panic(err)
}
// Introduce machine code into the memory region
copy(execFunc, program)
// uphold W^X
// Read and Exec only
err = unix.Mprotect(execFunc, unix.PROT_READ|unix.PROT_EXEC)
if err != nil {
defer func() {
// unmap if we err
err := unix.Munmap(execFunc)
if err != nil {
panic(err)
}
}()
panic(err)
}
return execFunc
}

View file

@ -19,7 +19,7 @@ func (f Flags) HasJIT() bool {
const RANDOMX_FLAG_DEFAULT Flags = 0
const (
// RANDOMX_FLAG_LARGE_PAGES not implemented
// RANDOMX_FLAG_LARGE_PAGES Select large page allocation for dataset
RANDOMX_FLAG_LARGE_PAGES = Flags(1 << iota)
// RANDOMX_FLAG_HARD_AES Selects between hardware or software AES
RANDOMX_FLAG_HARD_AES

View file

@ -1,6 +1,9 @@
package argon2
import "golang.org/x/crypto/blake2b"
import (
"encoding/binary"
"golang.org/x/crypto/blake2b"
)
import (
_ "golang.org/x/crypto/argon2"
@ -16,20 +19,49 @@ const syncPoints = 4
//go:linkname initHash golang.org/x/crypto/argon2.initHash
func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte
//go:linkname initBlocks golang.org/x/crypto/argon2.initBlocks
func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []Block
//go:linkname processBlocks golang.org/x/crypto/argon2.processBlocks
func processBlocks(B []Block, time, memory, threads uint32, mode int)
// BuildBlocks From golang.org/x/crypto/argon2.deriveKey without last deriveKey call
func BuildBlocks(password, salt []byte, time, memory uint32, threads uint8) []Block {
//go:linkname blake2bHash golang.org/x/crypto/argon2.blake2bHash
func blake2bHash(out []byte, in []byte)
// initBlocks From golang.org/x/crypto/argon2.initBlocks with external memory allocation
func initBlocks(B []Block, h0 *[blake2b.Size + 8]byte, memory, threads uint32) {
var block0 [1024]byte
clear(B)
for lane := uint32(0); lane < threads; lane++ {
j := lane * (memory / threads)
binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
blake2bHash(block0[:], h0[:])
for i := range B[j+0] {
B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
}
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
blake2bHash(block0[:], h0[:])
for i := range B[j+1] {
B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
}
}
}
// BuildBlocks From golang.org/x/crypto/argon2.deriveKey without last deriveKey call and external memory allocation
func BuildBlocks(B []Block, password, salt []byte, time, memory uint32, threads uint8) {
if time < 1 {
panic("argon2: number of rounds too small")
}
if threads < 1 {
panic("argon2: parallelism degree too low")
}
if len(B) != int(memory) {
panic("argon2: invalid block size")
}
const mode = 0 /* argon2d */
const keyLen = 0
h0 := initHash(password, salt, nil, nil, time, memory, uint32(threads), keyLen, mode)
@ -38,8 +70,7 @@ func BuildBlocks(password, salt []byte, time, memory uint32, threads uint8) []Bl
if memory < 2*syncPoints*uint32(threads) {
memory = 2 * syncPoints * uint32(threads)
}
B := initBlocks(&h0, memory, uint32(threads))
processBlocks(B, time, memory, uint32(threads), mode)
return B
initBlocks(B, &h0, memory, uint32(threads))
processBlocks(B, time, memory, uint32(threads), mode)
}

View file

@ -0,0 +1,32 @@
package memory
import "unsafe"
type AlignedAllocator uint64
func NewAlignedAllocator(alignment uint64) Allocator {
if !isZeroOrPowerOf2(alignment) {
panic("alignment must be a power of 2")
}
return AlignedAllocator(alignment)
}
func (a AlignedAllocator) AllocMemory(size uint64) ([]byte, error) {
if a <= 4 {
//slice allocations are 16-byte aligned, fast path
return make([]byte, size, max(size, uint64(a))), nil
}
memory := make([]byte, size+uint64(a))
ptr := uintptr(unsafe.Pointer(unsafe.SliceData(memory)))
align := uint64(a) - (uint64(ptr) & (uint64(a) - 1))
if align == uint64(a) {
return memory[:size:size], nil
}
return memory[align : align+size : align+size], nil
}
func (a AlignedAllocator) FreeMemory(memory []byte) error {
//let gc free
return nil
}

45
internal/memory/alloc.go Normal file
View file

@ -0,0 +1,45 @@
package memory
import (
"unsafe"
)
type Allocator interface {
AllocMemory(size uint64) ([]byte, error)
FreeMemory(memory []byte) error
}
func Allocate[T any](a Allocator) (*T, error) {
var zeroType T
mem, err := a.AllocMemory(uint64(unsafe.Sizeof(zeroType)))
if err != nil {
return nil, err
}
return (*T)(unsafe.Pointer(unsafe.SliceData(mem))), nil
}
func Free[T any](a Allocator, v *T) error {
var zeroType T
return a.FreeMemory(unsafe.Slice((*byte)(unsafe.Pointer(v)), uint64(unsafe.Sizeof(zeroType))))
}
func AllocateSlice[T any, T2 ~int | ~uint64 | ~uint32](a Allocator, size T2) ([]T, error) {
var zeroType T
mem, err := a.AllocMemory(uint64(unsafe.Sizeof(zeroType)) * uint64(size))
if err != nil {
return nil, err
}
return unsafe.Slice((*T)(unsafe.Pointer(unsafe.SliceData(mem))), size), nil
}
func FreeSlice[T any](a Allocator, v []T) error {
var zeroType T
return a.FreeMemory(unsafe.Slice((*byte)(unsafe.Pointer(unsafe.SliceData(v))), uint64(unsafe.Sizeof(zeroType))*uint64(len(v))))
}
func isZeroOrPowerOf2(x uint64) bool {
return (x & (x - 1)) == 0
}

View file

@ -0,0 +1,45 @@
//go:build freebsd && !purego
package memory
import (
"golang.org/x/sys/unix"
)
type LargePageAllocator struct {
}
func NewLargePageAllocator() Allocator {
return LargePageAllocator{}
}
/*
* Request specific alignment (n == log2 of the desired alignment).
*
* MAP_ALIGNED_SUPER requests optimal superpage alignment, but does
* not enforce a specific alignment.
*/
//#define MAP_ALIGNED(n) ((n) << MAP_ALIGNMENT_SHIFT)
//#define MAP_ALIGNMENT_SHIFT 24
//#define MAP_ALIGNMENT_MASK MAP_ALIGNED(0xff)
//#define MAP_ALIGNED_SUPER MAP_ALIGNED(1) /* align on a superpage */
const MAP_ALIGNED_SUPER = 1 << 24
func (a LargePageAllocator) AllocMemory(size uint64) ([]byte, error) {
memory, err := unix.Mmap(-1, 0, int(size), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANONYMOUS|MAP_ALIGNED_SUPER)
if err != nil {
return nil, err
}
return memory, nil
}
func (a LargePageAllocator) FreeMemory(memory []byte) error {
if memory == nil {
return nil
}
return unix.Munmap(memory)
}

View file

@ -0,0 +1,10 @@
//go:build openbsd || netbsd || dragonfly || darwin || ios || !unix || purego
package memory
var LargePageNoMemoryErr error
// NewLargePageAllocator Not supported in platform
func NewLargePageAllocator() Allocator {
return nil
}

View file

@ -0,0 +1,31 @@
//go:build unix && !(freebsd || openbsd || netbsd || dragonfly || darwin || ios) && !purego
package memory
import (
"golang.org/x/sys/unix"
)
type LargePageAllocator struct {
}
func NewLargePageAllocator() Allocator {
return LargePageAllocator{}
}
func (a LargePageAllocator) AllocMemory(size uint64) ([]byte, error) {
memory, err := unix.Mmap(-1, 0, int(size), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANONYMOUS|unix.MAP_HUGETLB|unix.MAP_POPULATE)
if err != nil {
return nil, err
}
return memory, nil
}
func (a LargePageAllocator) FreeMemory(memory []byte) error {
if memory == nil {
return nil
}
return unix.Munmap(memory)
}

View file

@ -0,0 +1,9 @@
//go:build !unix || purego
package memory
var PageNoMemoryErr error
func NewPageAllocator() Allocator {
return nil
}

View file

@ -0,0 +1,46 @@
//go:build unix && !purego
package memory
import (
"golang.org/x/sys/unix"
)
var PageNoMemoryErr = unix.ENOMEM
type PageAllocator struct {
}
func NewPageAllocator() Allocator {
return PageAllocator{}
}
func (a PageAllocator) AllocMemory(size uint64) ([]byte, error) {
memory, err := unix.Mmap(-1, 0, int(size), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANONYMOUS)
if err != nil {
return nil, err
}
return memory, nil
}
func (a PageAllocator) FreeMemory(memory []byte) error {
if memory == nil {
return nil
}
return unix.Munmap(memory)
}
func PageReadWrite(memory []byte) error {
return unix.Mprotect(memory, unix.PROT_READ|unix.PROT_WRITE)
}
func PageReadExecute(memory []byte) error {
return unix.Mprotect(memory, unix.PROT_READ|unix.PROT_EXEC)
}
// PageReadWriteExecute Insecure!
func PageReadWriteExecute(memory []byte) error {
return unix.Mprotect(memory, unix.PROT_READ|unix.PROT_WRITE|unix.PROT_EXEC)
}

View file

@ -31,7 +31,9 @@ package randomx
import (
"encoding/hex"
"errors"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/aes"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
"os"
"runtime"
"slices"
@ -66,6 +68,7 @@ var Tests = []testdata{
func testFlags(name string, flags Flags) (f Flags, skip bool) {
flags |= GetFlags()
flags &^= RANDOMX_FLAG_LARGE_PAGES
nn := strings.Split(name, "/")
switch nn[len(nn)-1] {
@ -84,6 +87,11 @@ func testFlags(name string, flags Flags) (f Flags, skip bool) {
if aes.NewHardAES() == nil {
return flags, true
}
case "largepages":
flags |= RANDOMX_FLAG_LARGE_PAGES
if largePageAllocator == nil {
return flags, true
}
}
return flags, false
@ -91,7 +99,7 @@ func testFlags(name string, flags Flags) (f Flags, skip bool) {
func Test_RandomXLight(t *testing.T) {
t.Parallel()
for _, n := range []string{"interpreter", "compiler", "softaes", "hardaes"} {
for _, n := range []string{"interpreter", "compiler", "softaes", "hardaes", "largepages"} {
t.Run(n, func(t *testing.T) {
t.Parallel()
tFlags, skip := testFlags(t.Name(), 0)
@ -99,9 +107,12 @@ func Test_RandomXLight(t *testing.T) {
t.Skip("not supported on this platform")
}
c := NewCache(tFlags)
if c == nil {
t.Fatal("nil cache")
c, err := NewCache(tFlags)
if err != nil {
if tFlags.Has(RANDOMX_FLAG_LARGE_PAGES) && errors.Is(err, memory.PageNoMemoryErr) {
t.Skip("cannot allocate memory")
}
t.Fatal(err)
}
defer func() {
err := c.Close()
@ -153,9 +164,12 @@ func Test_RandomXBatch(t *testing.T) {
t.Skip("not supported on this platform")
}
c := NewCache(tFlags)
if c == nil {
t.Fatal("nil cache")
c, err := NewCache(tFlags)
if tFlags.Has(RANDOMX_FLAG_LARGE_PAGES) && errors.Is(err, memory.PageNoMemoryErr) {
t.Skip("cannot allocate memory")
}
if err != nil {
t.Fatal(err)
}
defer func() {
err := c.Close()
@ -206,7 +220,7 @@ func Test_RandomXFull(t *testing.T) {
t.Skip("Skipping full mode in CI environment")
}
for _, n := range []string{"interpreter", "compiler", "softaes", "hardaes"} {
for _, n := range []string{"interpreter", "compiler", "softaes", "hardaes", "largepages"} {
t.Run(n, func(t *testing.T) {
tFlags, skip := testFlags(t.Name(), RANDOMX_FLAG_FULL_MEM)
@ -214,9 +228,12 @@ func Test_RandomXFull(t *testing.T) {
t.Skip("not supported on this platform")
}
c := NewCache(tFlags)
if c == nil {
t.Fatal("nil cache")
c, err := NewCache(tFlags)
if tFlags.Has(RANDOMX_FLAG_LARGE_PAGES) && errors.Is(err, memory.PageNoMemoryErr) {
t.Skip("cannot allocate memory")
}
if err != nil {
t.Fatal(err)
}
defer func() {
err := c.Close()
@ -288,13 +305,22 @@ func TestMain(m *testing.M) {
flags |= RANDOMX_FLAG_FULL_MEM
var err error
//init light and full dataset
BenchmarkCache = NewCache(flags)
BenchmarkCache, err = NewCache(flags | RANDOMX_FLAG_LARGE_PAGES)
if err != nil {
BenchmarkCache, err = NewCache(flags)
if err != nil {
panic(err)
}
}
defer BenchmarkCache.Close()
BenchmarkCache.Init(BenchmarkTest.key)
BenchmarkDataset, err = NewDataset(flags | RANDOMX_FLAG_FULL_MEM)
BenchmarkDataset, err = NewDataset(flags | RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_LARGE_PAGES)
if err != nil {
panic(err)
BenchmarkDataset, err = NewDataset(flags | RANDOMX_FLAG_FULL_MEM)
if err != nil {
panic(err)
}
}
defer BenchmarkDataset.Close()
BenchmarkDataset.InitDatasetParallel(BenchmarkCache, runtime.NumCPU())

View file

@ -4,6 +4,7 @@ package randomx
import (
"encoding/binary"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
"unsafe"
)
@ -90,5 +91,11 @@ func generateSuperscalarCode(scalarProgram SuperScalarProgram) SuperScalarProgra
program = append(program, RET)
return mapProgram(program, len(program))
pagedMemory, err := memory.AllocateSlice[byte](pageAllocator, len(program))
if err != nil {
return nil
}
copy(pagedMemory, program)
return pagedMemory
}

50
vm.go
View file

@ -32,6 +32,7 @@ package randomx
import (
"errors"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/aes"
"git.gammaspectra.live/P2Pool/go-randomx/v3/internal/memory"
"math"
"runtime"
"unsafe"
@ -91,6 +92,17 @@ func NewVM(flags Flags, cache *Cache, dataset *Dataset) (*VM, error) {
return nil, errors.New("nil dataset in full mode")
}
pad, err := memory.Allocate[ScratchPad](cacheLineAlignedAllocator)
if err != nil {
return nil, err
}
registerFile, err := memory.Allocate[RegisterFile](cacheLineAlignedAllocator)
if err != nil {
return nil, err
}
_ = pad
_ = registerFile
vm := &VM{
Cache: cache,
Dataset: dataset,
@ -99,9 +111,6 @@ func NewVM(flags Flags, cache *Cache, dataset *Dataset) (*VM, error) {
registerFile: new(RegisterFile),
}
assertAlignedTo16(uintptr(unsafe.Pointer(vm.pad)))
assertAlignedTo16(uintptr(unsafe.Pointer(vm.registerFile)))
if flags.Has(RANDOMX_FLAG_HARD_AES) {
vm.AES = aes.NewHardAES()
}
@ -111,9 +120,17 @@ func NewVM(flags Flags, cache *Cache, dataset *Dataset) (*VM, error) {
}
if flags.HasJIT() {
vm.jitProgram = mapProgram(nil, int(RandomXCodeSize))
vm.jitProgram, err = memory.AllocateSlice[byte](pageAllocator, int(RandomXCodeSize))
if err != nil {
return nil, err
}
if !flags.Has(RANDOMX_FLAG_SECURE) {
mapProgramRWX(vm.jitProgram)
err = memory.PageReadWriteExecute(vm.jitProgram)
if err != nil {
vm.jitProgram.Close()
return nil, err
}
}
}
@ -167,18 +184,30 @@ func (vm *VM) run() {
if vm.jitProgram != nil {
if vm.Dataset == nil { //light mode
if vm.flags.Has(RANDOMX_FLAG_SECURE) {
mapProgramRW(vm.jitProgram)
err := memory.PageReadWrite(vm.jitProgram)
if err != nil {
panic(err)
}
jitProgram = vm.program.generateCode(vm.jitProgram, nil)
mapProgramRX(vm.jitProgram)
err = memory.PageReadExecute(vm.jitProgram)
if err != nil {
panic(err)
}
} else {
jitProgram = vm.program.generateCode(vm.jitProgram, nil)
}
} else {
// full mode and we have JIT
if vm.flags.Has(RANDOMX_FLAG_SECURE) {
mapProgramRW(vm.jitProgram)
err := memory.PageReadWrite(vm.jitProgram)
if err != nil {
panic(err)
}
jitProgram = vm.program.generateCode(vm.jitProgram, &readReg)
mapProgramRX(vm.jitProgram)
err = memory.PageReadExecute(vm.jitProgram)
if err != nil {
panic(err)
}
} else {
jitProgram = vm.program.generateCode(vm.jitProgram, &readReg)
}
@ -374,6 +403,9 @@ func (vm *VM) CalculateHashLast(output *[RANDOMX_HASH_SIZE]byte) {
// Close Releases all memory occupied by the structure.
func (vm *VM) Close() error {
memory.Free(cacheLineAlignedAllocator, vm.pad)
memory.Free(cacheLineAlignedAllocator, vm.registerFile)
if vm.jitProgram != nil {
return vm.jitProgram.Close()
}