blob: cc95da7e64529b0d6846a4e7cfe6646ea0ab54c5 [file] [log] [blame]
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// System calls and other sys.stuff for AMD64, FreeBSD
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
#include "go_asm.h"
#include "go_tls.h"
#include "textflag.h"
#include "cgo/abi_amd64.h"
TEXT runtime·sys_umtx_op(SB),NOSPLIT,$0
MOVQ addr+0(FP), DI
MOVL mode+8(FP), SI
MOVL val+12(FP), DX
MOVQ uaddr1+16(FP), R10
MOVQ ut+24(FP), R8
MOVL $454, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, ret+32(FP)
RET
TEXT runtime·thr_new(SB),NOSPLIT,$0
MOVQ param+0(FP), DI
MOVL size+8(FP), SI
MOVL $455, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, ret+16(FP)
RET
TEXT runtime·thr_start(SB),NOSPLIT,$0
MOVQ DI, R13 // m
// set up FS to point at m->tls
LEAQ m_tls(R13), DI
CALL runtime·settls(SB) // smashes DI
// set up m, g
get_tls(CX)
MOVQ m_g0(R13), DI
MOVQ R13, g_m(DI)
MOVQ DI, g(CX)
CALL runtime·stackcheck(SB)
CALL runtime·mstart(SB)
MOVQ 0, AX // crash (not reached)
// Exit the entire program (like C exit)
TEXT runtime·exit(SB),NOSPLIT,$-8
MOVL code+0(FP), DI // arg 1 exit status
MOVL $1, AX
SYSCALL
MOVL $0xf1, 0xf1 // crash
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $0, DI // arg 1 long *state
MOVL $431, AX // thr_exit
SYSCALL
MOVL $0xf1, 0xf1 // crash
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
MOVL mode+8(FP), SI // arg 2 flags
MOVL perm+12(FP), DX // arg 3 mode
MOVL $5, AX
SYSCALL
JCC 2(PC)
MOVL $-1, AX
MOVL AX, ret+16(FP)
RET
TEXT runtime·closefd(SB),NOSPLIT,$-8
MOVL fd+0(FP), DI // arg 1 fd
MOVL $6, AX
SYSCALL
JCC 2(PC)
MOVL $-1, AX
MOVL AX, ret+8(FP)
RET
TEXT runtime·read(SB),NOSPLIT,$-8
MOVL fd+0(FP), DI // arg 1 fd
MOVQ p+8(FP), SI // arg 2 buf
MOVL n+16(FP), DX // arg 3 count
MOVL $3, AX
SYSCALL
JCC 2(PC)
NEGQ AX // caller expects negative errno
MOVL AX, ret+24(FP)
RET
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
MOVL flags+0(FP), SI
MOVL $542, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, errno+16(FP)
RET
TEXT runtime·write1(SB),NOSPLIT,$-8
MOVQ fd+0(FP), DI // arg 1 fd
MOVQ p+8(FP), SI // arg 2 buf
MOVL n+16(FP), DX // arg 3 count
MOVL $4, AX
SYSCALL
JCC 2(PC)
NEGQ AX // caller expects negative errno
MOVL AX, ret+24(FP)
RET
TEXT runtime·thr_self(SB),NOSPLIT,$0-8
// thr_self(&0(FP))
LEAQ ret+0(FP), DI // arg 1
MOVL $432, AX
SYSCALL
RET
TEXT runtime·thr_kill(SB),NOSPLIT,$0-16
// thr_kill(tid, sig)
MOVQ tid+0(FP), DI // arg 1 id
MOVQ sig+8(FP), SI // arg 2 sig
MOVL $433, AX
SYSCALL
RET
TEXT runtime·raiseproc(SB),NOSPLIT,$0
// getpid
MOVL $20, AX
SYSCALL
// kill(self, sig)
MOVQ AX, DI // arg 1 pid
MOVL sig+0(FP), SI // arg 2 sig
MOVL $37, AX
SYSCALL
RET
TEXT runtime·setitimer(SB), NOSPLIT, $-8
MOVL mode+0(FP), DI
MOVQ new+8(FP), SI
MOVQ old+16(FP), DX
MOVL $83, AX
SYSCALL
RET
// func fallback_walltime() (sec int64, nsec int32)
TEXT runtime·fallback_walltime(SB), NOSPLIT, $32-12
MOVL $232, AX // clock_gettime
MOVQ $0, DI // CLOCK_REALTIME
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
MOVQ 16(SP), DX // nsec
// sec is in AX, nsec in DX
MOVQ AX, sec+0(FP)
MOVL DX, nsec+8(FP)
RET
TEXT runtime·fallback_nanotime(SB), NOSPLIT, $32-8
MOVL $232, AX
MOVQ $4, DI // CLOCK_MONOTONIC
LEAQ 8(SP), SI
SYSCALL
MOVQ 8(SP), AX // sec
MOVQ 16(SP), DX // nsec
// sec is in AX, nsec in DX
// return nsec in AX
IMULQ $1000000000, AX
ADDQ DX, AX
MOVQ AX, ret+0(FP)
RET
TEXT runtime·asmSigaction(SB),NOSPLIT,$0
MOVQ sig+0(FP), DI // arg 1 sig
MOVQ new+8(FP), SI // arg 2 act
MOVQ old+16(FP), DX // arg 3 oact
MOVL $416, AX
SYSCALL
JCC 2(PC)
MOVL $-1, AX
MOVL AX, ret+24(FP)
RET
TEXT runtime·callCgoSigaction(SB),NOSPLIT,$16
MOVQ sig+0(FP), DI // arg 1 sig
MOVQ new+8(FP), SI // arg 2 act
MOVQ old+16(FP), DX // arg 3 oact
MOVQ _cgo_sigaction(SB), AX
MOVQ SP, BX // callee-saved
ANDQ $~15, SP // alignment as per amd64 psABI
CALL AX
MOVQ BX, SP
MOVL AX, ret+24(FP)
RET
TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
MOVQ fn+0(FP), AX
MOVL sig+8(FP), DI
MOVQ info+16(FP), SI
MOVQ ctx+24(FP), DX
PUSHQ BP
MOVQ SP, BP
ANDQ $~15, SP // alignment for x86_64 ABI
CALL AX
MOVQ BP, SP
POPQ BP
RET
// Called using C ABI.
TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
// Call into the Go signal handler
NOP SP // disable vet stack checking
ADJSP $24
MOVQ DI, 0(SP) // sig
MOVQ SI, 8(SP) // info
MOVQ DX, 16(SP) // ctx
CALL ·sigtrampgo(SB)
ADJSP $-24
POP_REGS_HOST_TO_ABI0()
RET
// Called using C ABI.
TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
// Call into the Go signal handler
NOP SP // disable vet stack checking
ADJSP $24
MOVL DI, 0(SP) // sig
MOVQ SI, 8(SP) // info
MOVQ DX, 16(SP) // ctx
CALL ·sigprofNonGo(SB)
ADJSP $-24
POP_REGS_HOST_TO_ABI0()
RET
// Used instead of sigtramp in programs that use cgo.
// Arguments from kernel are in DI, SI, DX.
TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
// If no traceback function, do usual sigtramp.
MOVQ runtime·cgoTraceback(SB), AX
TESTQ AX, AX
JZ sigtramp
// If no traceback support function, which means that
// runtime/cgo was not linked in, do usual sigtramp.
MOVQ _cgo_callers(SB), AX
TESTQ AX, AX
JZ sigtramp
// Figure out if we are currently in a cgo call.
// If not, just do usual sigtramp.
get_tls(CX)
MOVQ g(CX),AX
TESTQ AX, AX
JZ sigtrampnog // g == nil
MOVQ g_m(AX), AX
TESTQ AX, AX
JZ sigtramp // g.m == nil
MOVL m_ncgo(AX), CX
TESTL CX, CX
JZ sigtramp // g.m.ncgo == 0
MOVQ m_curg(AX), CX
TESTQ CX, CX
JZ sigtramp // g.m.curg == nil
MOVQ g_syscallsp(CX), CX
TESTQ CX, CX
JZ sigtramp // g.m.curg.syscallsp == 0
MOVQ m_cgoCallers(AX), R8
TESTQ R8, R8
JZ sigtramp // g.m.cgoCallers == nil
MOVL m_cgoCallersUse(AX), CX
TESTL CX, CX
JNZ sigtramp // g.m.cgoCallersUse != 0
// Jump to a function in runtime/cgo.
// That function, written in C, will call the user's traceback
// function with proper unwind info, and will then call back here.
// The first three arguments, and the fifth, are already in registers.
// Set the two remaining arguments now.
MOVQ runtime·cgoTraceback(SB), CX
MOVQ $runtime·sigtramp(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
sigtramp:
JMP runtime·sigtramp(SB)
sigtrampnog:
// Signal arrived on a non-Go thread. If this is SIGPROF, get a
// stack trace.
CMPL DI, $27 // 27 == SIGPROF
JNZ sigtramp
// Lock sigprofCallersUse.
MOVL $0, AX
MOVL $1, CX
MOVQ $runtime·sigprofCallersUse(SB), R11
LOCK
CMPXCHGL CX, 0(R11)
JNZ sigtramp // Skip stack trace if already locked.
// Jump to the traceback function in runtime/cgo.
// It will call back to sigprofNonGo, via sigprofNonGoWrapper, to convert
// the arguments to the Go calling convention.
// First three arguments to traceback function are in registers already.
MOVQ runtime·cgoTraceback(SB), CX
MOVQ $runtime·sigprofCallers(SB), R8
MOVQ $runtime·sigprofNonGoWrapper<>(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
TEXT runtime·mmap(SB),NOSPLIT,$0
MOVQ addr+0(FP), DI // arg 1 addr
MOVQ n+8(FP), SI // arg 2 len
MOVL prot+16(FP), DX // arg 3 prot
MOVL flags+20(FP), R10 // arg 4 flags
MOVL fd+24(FP), R8 // arg 5 fid
MOVL off+28(FP), R9 // arg 6 offset
MOVL $477, AX
SYSCALL
JCC ok
MOVQ $0, p+32(FP)
MOVQ AX, err+40(FP)
RET
ok:
MOVQ AX, p+32(FP)
MOVQ $0, err+40(FP)
RET
TEXT runtime·munmap(SB),NOSPLIT,$0
MOVQ addr+0(FP), DI // arg 1 addr
MOVQ n+8(FP), SI // arg 2 len
MOVL $73, AX
SYSCALL
JCC 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·madvise(SB),NOSPLIT,$0
MOVQ addr+0(FP), DI
MOVQ n+8(FP), SI
MOVL flags+16(FP), DX
MOVQ $75, AX // madvise
SYSCALL
JCC 2(PC)
MOVL $-1, AX
MOVL AX, ret+24(FP)
RET
TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
MOVQ new+0(FP), DI
MOVQ old+8(FP), SI
MOVQ $53, AX
SYSCALL
JCC 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·usleep(SB),NOSPLIT,$16
MOVL $0, DX
MOVL usec+0(FP), AX
MOVL $1000000, CX
DIVL CX
MOVQ AX, 0(SP) // tv_sec
MOVL $1000, AX
MULL DX
MOVQ AX, 8(SP) // tv_nsec
MOVQ SP, DI // arg 1 - rqtp
MOVQ $0, SI // arg 2 - rmtp
MOVL $240, AX // sys_nanosleep
SYSCALL
RET
// set tls base to DI
TEXT runtime·settls(SB),NOSPLIT,$8
ADDQ $8, DI // adjust for ELF: wants to use -8(FS) for g and m
MOVQ DI, 0(SP)
MOVQ SP, SI
MOVQ $129, DI // AMD64_SET_FSBASE
MOVQ $165, AX // sysarch
SYSCALL
JCC 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·sysctl(SB),NOSPLIT,$0
MOVQ mib+0(FP), DI // arg 1 - name
MOVL miblen+8(FP), SI // arg 2 - namelen
MOVQ out+16(FP), DX // arg 3 - oldp
MOVQ size+24(FP), R10 // arg 4 - oldlenp
MOVQ dst+32(FP), R8 // arg 5 - newp
MOVQ ndst+40(FP), R9 // arg 6 - newlen
MOVQ $202, AX // sys___sysctl
SYSCALL
JCC 4(PC)
NEGQ AX
MOVL AX, ret+48(FP)
RET
MOVL $0, AX
MOVL AX, ret+48(FP)
RET
TEXT runtime·osyield(SB),NOSPLIT,$-4
MOVL $331, AX // sys_sched_yield
SYSCALL
RET
TEXT runtime·sigprocmask(SB),NOSPLIT,$0
MOVL how+0(FP), DI // arg 1 - how
MOVQ new+8(FP), SI // arg 2 - set
MOVQ old+16(FP), DX // arg 3 - oset
MOVL $340, AX // sys_sigprocmask
SYSCALL
JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
// int32 runtime·kqueue(void);
TEXT runtime·kqueue(SB),NOSPLIT,$0
MOVQ $0, DI
MOVQ $0, SI
MOVQ $0, DX
MOVL $362, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, ret+0(FP)
RET
// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
TEXT runtime·kevent(SB),NOSPLIT,$0
MOVL kq+0(FP), DI
MOVQ ch+8(FP), SI
MOVL nch+16(FP), DX
MOVQ ev+24(FP), R10
MOVL nev+32(FP), R8
MOVQ ts+40(FP), R9
MOVL $363, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, ret+48(FP)
RET
// void runtime·closeonexec(int32 fd);
TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVL fd+0(FP), DI // fd
MOVQ $2, SI // F_SETFD
MOVQ $1, DX // FD_CLOEXEC
MOVL $92, AX // fcntl
SYSCALL
RET
// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
TEXT runtime·cpuset_getaffinity(SB), NOSPLIT, $0-44
MOVQ level+0(FP), DI
MOVQ which+8(FP), SI
MOVQ id+16(FP), DX
MOVQ size+24(FP), R10
MOVQ mask+32(FP), R8
MOVL $487, AX
SYSCALL
JCC 2(PC)
NEGQ AX
MOVL AX, ret+40(FP)
RET