blob: 78b1707f1b29e149c5ac1ee596fc84193ef3fe69 [file] [log] [blame]
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// mkpreempt generates the asyncPreempt functions for each
// architecture.
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"strings"
)
// Copied from cmd/compile/internal/ssa/gen/*Ops.go
var regNames386 = []string{
"AX",
"CX",
"DX",
"BX",
"SP",
"BP",
"SI",
"DI",
"X0",
"X1",
"X2",
"X3",
"X4",
"X5",
"X6",
"X7",
}
var regNamesAMD64 = []string{
"AX",
"CX",
"DX",
"BX",
"SP",
"BP",
"SI",
"DI",
"R8",
"R9",
"R10",
"R11",
"R12",
"R13",
"R14",
"R15",
"X0",
"X1",
"X2",
"X3",
"X4",
"X5",
"X6",
"X7",
"X8",
"X9",
"X10",
"X11",
"X12",
"X13",
"X14",
"X15",
}
var out io.Writer
var arches = map[string]func(){
"386": gen386,
"amd64": genAMD64,
"arm": genARM,
"arm64": notImplemented,
"mips64x": notImplemented,
"mipsx": notImplemented,
"ppc64x": notImplemented,
"s390x": notImplemented,
"wasm": genWasm,
}
var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true}
func main() {
flag.Parse()
if flag.NArg() > 0 {
out = os.Stdout
for _, arch := range flag.Args() {
gen, ok := arches[arch]
if !ok {
log.Fatalf("unknown arch %s", arch)
}
header(arch)
gen()
}
return
}
for arch, gen := range arches {
f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch))
if err != nil {
log.Fatal(err)
}
out = f
header(arch)
gen()
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
}
func header(arch string) {
fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
if beLe[arch] {
base := arch[:len(arch)-1]
fmt.Fprintf(out, "// +build %s %sle\n\n", base, base)
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
func p(f string, args ...interface{}) {
fmted := fmt.Sprintf(f, args...)
fmt.Fprintf(out, "\t%s\n", strings.Replace(fmted, "\n", "\n\t", -1))
}
func label(l string) {
fmt.Fprintf(out, "%s\n", l)
}
type layout struct {
stack int
regs []regPos
sp string // stack pointer register
}
type regPos struct {
pos int
op string
reg string
// If this register requires special save and restore, these
// give those operations with a %d placeholder for the stack
// offset.
save, restore string
}
func (l *layout) add(op, reg string, size int) {
l.regs = append(l.regs, regPos{op: op, reg: reg, pos: l.stack})
l.stack += size
}
func (l *layout) addSpecial(save, restore string, size int) {
l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack})
l.stack += size
}
func (l *layout) save() {
for _, reg := range l.regs {
if reg.save != "" {
p(reg.save, reg.pos)
} else {
p("%s %s, %d(%s)", reg.op, reg.reg, reg.pos, l.sp)
}
}
}
func (l *layout) restore() {
for i := len(l.regs) - 1; i >= 0; i-- {
reg := l.regs[i]
if reg.restore != "" {
p(reg.restore, reg.pos)
} else {
p("%s %d(%s), %s", reg.op, reg.pos, l.sp, reg.reg)
}
}
}
func gen386() {
p("PUSHFL")
// Save general purpose registers.
var l = layout{sp: "SP"}
for _, reg := range regNames386 {
if reg == "SP" || strings.HasPrefix(reg, "X") {
continue
}
l.add("MOVL", reg, 4)
}
// Save the 387 state.
l.addSpecial(
"FSAVE %d(SP)\nFLDCW runtime·controlWord64(SB)",
"FRSTOR %d(SP)",
108)
// Save SSE state only if supported.
lSSE := layout{stack: l.stack, sp: "SP"}
for i := 0; i < 8; i++ {
lSSE.add("MOVUPS", fmt.Sprintf("X%d", i), 16)
}
p("ADJSP $%d", lSSE.stack)
p("NOP SP")
l.save()
p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse")
lSSE.save()
label("nosse:")
p("CALL ·asyncPreempt2(SB)")
p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse2")
lSSE.restore()
label("nosse2:")
l.restore()
p("ADJSP $%d", -lSSE.stack)
p("POPFL")
p("RET")
}
func genAMD64() {
// Assign stack offsets.
var l = layout{sp: "SP"}
for _, reg := range regNamesAMD64 {
if reg == "SP" || reg == "BP" {
continue
}
if strings.HasPrefix(reg, "X") {
l.add("MOVUPS", reg, 16)
} else {
l.add("MOVQ", reg, 8)
}
}
// TODO: MXCSR register?
p("PUSHQ BP")
p("MOVQ SP, BP")
p("// Save flags before clobbering them")
p("PUSHFQ")
p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
p("ADJSP $%d", l.stack)
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
p("NOP SP")
l.save()
p("CALL ·asyncPreempt2(SB)")
l.restore()
p("ADJSP $%d", -l.stack)
p("POPFQ")
p("POPQ BP")
p("RET")
}
func genARM() {
// Add integer registers R0-R12.
// R13 (SP), R14 (LR), R15 (PC) are special and not saved here.
var l = layout{sp: "R13", stack: 4} // add LR slot
for i := 0; i <= 12; i++ {
reg := fmt.Sprintf("R%d", i)
if i == 10 {
continue // R10 is g register, no need to save/restore
}
l.add("MOVW", reg, 4)
}
// Add flag register.
l.addSpecial(
"MOVW CPSR, R0\nMOVW R0, %d(R13)",
"MOVW %d(R13), R0\nMOVW R0, CPSR",
4)
// Add floating point registers F0-F15 and flag register.
var lfp = layout{stack: l.stack, sp: "R13"}
lfp.addSpecial(
"MOVW FPCR, R0\nMOVW R0, %d(R13)",
"MOVW %d(R13), R0\nMOVW R0, FPCR",
4)
for i := 0; i <= 15; i++ {
reg := fmt.Sprintf("F%d", i)
lfp.add("MOVD", reg, 8)
}
p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR
l.save()
p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp") // test goarm, and skip FP registers if goarm=5.
lfp.save()
label("nofp:")
p("CALL ·asyncPreempt2(SB)")
p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp2") // test goarm, and skip FP registers if goarm=5.
lfp.restore()
label("nofp2:")
l.restore()
p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it
p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall)
p("UNDEF") // shouldn't get here
}
func genWasm() {
p("// No async preemption on wasm")
p("UNDEF")
}
func notImplemented() {
p("// Not implemented yet")
p("JMP ·abort(SB)")
}