184 lines
4.4 KiB
Go
184 lines
4.4 KiB
Go
package assembler
|
|
|
|
import (
|
|
"git.gammaspectra.live/WeebDataHoarder/compute-go/assembler/obj"
|
|
memory "git.gammaspectra.live/WeebDataHoarder/compute-go/malloc"
|
|
"git.gammaspectra.live/WeebDataHoarder/compute-go/types"
|
|
"runtime"
|
|
"slices"
|
|
"unsafe"
|
|
)
|
|
|
|
type DataAllocator struct {
|
|
allocator memory.Allocator
|
|
pinner runtime.Pinner
|
|
}
|
|
|
|
func NewDataAllocator(abi *types.ABIDefinition) *DataAllocator {
|
|
return &DataAllocator{
|
|
// allow allocating 512-bit data
|
|
allocator: memory.NewAlignedAllocator(max(512/8, uint64(abi.Arch.Alignment))),
|
|
}
|
|
}
|
|
|
|
func (a *DataAllocator) VAlloc128(val []byte) types.MustAddressable128 {
|
|
const n = 128 / 8
|
|
_ = val[n]
|
|
|
|
return types.VecMem128[types.Vec8x16[uint8]](a.VAlloc(val[:n+1]))
|
|
}
|
|
|
|
func (a *DataAllocator) VAlloc256(val []byte) types.MustAddressable256 {
|
|
const n = 256 / 8
|
|
_ = val[n]
|
|
|
|
return types.VecMem256[types.Vec8x32[uint8]](a.VAlloc(val[:n+1]))
|
|
}
|
|
|
|
func (a *DataAllocator) VAlloc512(val []byte) types.MustAddressable512 {
|
|
const n = 512 / 8
|
|
_ = val[n]
|
|
|
|
return types.VecMem512[types.Vec8x64[uint8]](a.VAlloc(val[:n+1]))
|
|
}
|
|
|
|
func (a *DataAllocator) VAlloc(val []byte) types.Addressable {
|
|
data, _ := a.allocator.AllocMemory(uint64(len(val)))
|
|
a.pinner.Pin(unsafe.SliceData(data))
|
|
copy(data, val)
|
|
|
|
return types.Address{
|
|
Type: obj.TYPE_CONST,
|
|
Offset: int64(uintptr(unsafe.Pointer(unsafe.SliceData(data)))),
|
|
}
|
|
}
|
|
|
|
func (a *DataAllocator) FreeAll() {
|
|
a.pinner.Unpin()
|
|
}
|
|
|
|
type RegisterAllocator struct {
|
|
vIndex []types.MustAddressable128
|
|
vAlloc []bool
|
|
|
|
gIndex []int16
|
|
gAlloc []bool
|
|
|
|
abi *types.ABIDefinition
|
|
}
|
|
|
|
func NewRegisterAllocator(abi *types.ABIDefinition) *RegisterAllocator {
|
|
alloc := &RegisterAllocator{
|
|
vIndex: slices.Clone(abi.AllocatorVectorRegisters),
|
|
vAlloc: make([]bool, len(abi.AllocatorVectorRegisters)),
|
|
gIndex: slices.Clone(abi.AllocatorGeneralRegisters),
|
|
gAlloc: make([]bool, len(abi.AllocatorGeneralRegisters)),
|
|
}
|
|
return alloc
|
|
}
|
|
|
|
func (a *RegisterAllocator) VAlloc128() types.MustAddressable128 {
|
|
if i := slices.Index(a.vAlloc, false); i == -1 {
|
|
panic("out of registers")
|
|
} else {
|
|
a.vAlloc[i] = true
|
|
return a.vIndex[i]
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) VAlloc256() types.MustAddressable256 {
|
|
return a.VAlloc128().As256()
|
|
}
|
|
|
|
func (a *RegisterAllocator) VAlloc512() types.MustAddressable512 {
|
|
return a.VAlloc128().As512()
|
|
}
|
|
|
|
func (a *RegisterAllocator) VFree(regs ...types.Addressable) {
|
|
for _, r := range regs {
|
|
addr := r.Addr()
|
|
if addr.Type != obj.TYPE_REG {
|
|
continue
|
|
}
|
|
if i := slices.IndexFunc(a.vIndex, func(a types.MustAddressable128) bool {
|
|
return a.Addr().Reg == addr.Reg
|
|
}); i == -1 {
|
|
panic("invalid")
|
|
} else /*if !a.vAlloc[i] {
|
|
panic("double free")
|
|
} else*/{
|
|
a.vAlloc[i] = false
|
|
}
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) ReserveFuncParams(params *ABIParamResultInfo) {
|
|
for _, param := range params.InParams() {
|
|
for _, reg := range param.Registers {
|
|
if int(reg) >= len(a.abi.ABIIntegerRegisters) {
|
|
a.Reserve(types.Register(a.abi.ABIFloatRegisters[int(reg)-len(a.abi.ABIIntegerRegisters)]))
|
|
} else {
|
|
a.Reserve(types.Register(a.abi.ABIIntegerRegisters[reg]))
|
|
}
|
|
}
|
|
}
|
|
|
|
for _, param := range params.OutParams() {
|
|
for _, reg := range param.Registers {
|
|
if int(reg) >= len(a.abi.ABIIntegerRegisters) {
|
|
a.Reserve(types.Register(a.abi.ABIFloatRegisters[int(reg)-len(a.abi.ABIIntegerRegisters)]))
|
|
} else {
|
|
a.Reserve(types.Register(a.abi.ABIIntegerRegisters[reg]))
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) Reserve(r types.Registerable) bool {
|
|
if i := slices.Index(a.gIndex, r.Reg()); i == -1 {
|
|
return true
|
|
} else if a.gAlloc[i] {
|
|
return false
|
|
} else {
|
|
a.gAlloc[i] = true
|
|
return true
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) Alloc() types.Register {
|
|
if i := slices.Index(a.gAlloc, false); i == -1 {
|
|
panic("out of registers")
|
|
} else {
|
|
a.gAlloc[i] = true
|
|
return types.Register(a.gIndex[i])
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) AllocArgOutput(arg AddressableList) types.Register {
|
|
if len(arg) == 1 {
|
|
addr := arg[0].Addr()
|
|
if addr.Type == obj.TYPE_REG && a.Reserve(types.Register(addr.Reg)) {
|
|
// map 1:1 to existing register!
|
|
return types.Register(addr.Reg)
|
|
}
|
|
}
|
|
|
|
if i := slices.Index(a.gAlloc, false); i == -1 {
|
|
panic("out of registers")
|
|
} else {
|
|
a.gAlloc[i] = true
|
|
return types.Register(a.gIndex[i])
|
|
}
|
|
}
|
|
|
|
func (a *RegisterAllocator) Free(regs ...types.Registerable) {
|
|
for _, r := range regs {
|
|
if i := slices.Index(a.gIndex, r.Reg()); i == -1 {
|
|
//panic("invalid")
|
|
} else /*if !a.gAlloc[i] {
|
|
panic("double free")
|
|
} else*/{
|
|
a.gAlloc[i] = false
|
|
}
|
|
}
|
|
}
|