compiler, reflect, runtime: Use static chain for closures.

Change from using __go_set_closure to passing the closure
value in the static chain field.  Uses new backend support for
setting the closure chain in a call from C via
__builtin_call_with_static_chain.  Uses new support in libffi
for Go closures.

The old architecture specific support for reflect.MakeFunc is
removed, replaced by the libffi support.

All work done by Richard Henderson.

	* go-gcc.cc (Gcc_backend::call_expression): Add chain_expr argument.
	(Gcc_backend::static_chain_variable): New method.

From-SVN: r219776
This commit is contained in:
Richard Henderson 2015-01-16 14:58:53 -08:00 committed by Ian Lance Taylor
parent 21cb351825
commit 38bf819a5f
28 changed files with 174 additions and 2380 deletions

View File

@ -1,3 +1,8 @@
2015-01-16 Richard Henderson <rth@redhat.com>
* go-gcc.cc (Gcc_backend::call_expression): Add chain_expr argument.
(Gcc_backend::static_chain_variable): New method.
2015-01-09 Ian Lance Taylor <iant@google.com>
* config-lang.in (lang_dirs): Define.

View File

@ -322,7 +322,7 @@ class Gcc_backend : public Backend
Bexpression*
call_expression(Bexpression* fn, const std::vector<Bexpression*>& args,
Location);
Bexpression* static_chain, Location);
// Statements.
@ -402,6 +402,9 @@ class Gcc_backend : public Backend
parameter_variable(Bfunction*, const std::string&, Btype*, bool,
Location);
Bvariable*
static_chain_variable(Bfunction*, const std::string&, Btype*, Location);
Bvariable*
temporary_variable(Bfunction*, Bblock*, Btype*, Bexpression*, bool,
Location, Bstatement**);
@ -1808,7 +1811,7 @@ Gcc_backend::array_index_expression(Bexpression* array, Bexpression* index,
Bexpression*
Gcc_backend::call_expression(Bexpression* fn_expr,
const std::vector<Bexpression*>& fn_args,
Location location)
Bexpression* chain_expr, Location location)
{
tree fn = fn_expr->get_tree();
if (fn == error_mark_node || TREE_TYPE(fn) == error_mark_node)
@ -1868,6 +1871,9 @@ Gcc_backend::call_expression(Bexpression* fn_expr,
excess_type != NULL_TREE ? excess_type : rettype,
fn, nargs, args);
if (chain_expr)
CALL_EXPR_STATIC_CHAIN (ret) = chain_expr->get_tree();
if (excess_type != NULL_TREE)
{
// Calling convert here can undo our excess precision change.
@ -2489,6 +2495,40 @@ Gcc_backend::parameter_variable(Bfunction* function, const std::string& name,
return new Bvariable(decl);
}
// Make a static chain variable.
Bvariable*
Gcc_backend::static_chain_variable(Bfunction* function, const std::string& name,
Btype* btype, Location location)
{
tree type_tree = btype->get_tree();
if (type_tree == error_mark_node)
return this->error_variable();
tree decl = build_decl(location.gcc_location(), PARM_DECL,
get_identifier_from_string(name), type_tree);
tree fndecl = function->get_tree();
DECL_CONTEXT(decl) = fndecl;
DECL_ARG_TYPE(decl) = type_tree;
TREE_USED(decl) = 1;
DECL_ARTIFICIAL(decl) = 1;
DECL_IGNORED_P(decl) = 1;
TREE_READONLY(decl) = 1;
struct function *f = DECL_STRUCT_FUNCTION(fndecl);
if (f == NULL)
{
push_struct_function(fndecl);
pop_cfun();
f = DECL_STRUCT_FUNCTION(fndecl);
}
gcc_assert(f->static_chain_decl == NULL);
f->static_chain_decl = decl;
DECL_STATIC_CHAIN(fndecl) = 1;
go_preserve_from_gc(decl);
return new Bvariable(decl);
}
// Make a temporary variable.
Bvariable*

View File

@ -375,7 +375,7 @@ class Backend
// Create an expression for a call to FN with ARGS.
virtual Bexpression*
call_expression(Bexpression* fn, const std::vector<Bexpression*>& args,
Location) = 0;
Bexpression* static_chain, Location) = 0;
// Statements.
@ -529,6 +529,11 @@ class Backend
Btype* type, bool is_address_taken,
Location location) = 0;
// Create a static chain parameter. This is the closure parameter.
virtual Bvariable*
static_chain_variable(Bfunction* function, const std::string& name,
Btype* type, Location location) = 0;
// Create a temporary variable. A temporary variable has no name,
// just a type. We pass in FUNCTION and BLOCK in case they are
// needed. If INIT is not NULL, the variable should be initialized

View File

@ -6321,6 +6321,7 @@ Bound_method_expression::create_thunk(Gogo* gogo, const Method* method,
Variable* cvar = new Variable(closure_type, NULL, false, false, false, loc);
cvar->set_is_used();
cvar->set_is_closure();
Named_object* cp = Named_object::make_variable("$closure", NULL, cvar);
new_no->func_value()->set_closure_var(cp);
@ -9328,19 +9329,11 @@ Call_expression::do_get_backend(Translate_context* context)
fn_args[0] = first_arg->get_backend(context);
}
if (!has_closure_arg)
go_assert(closure == NULL);
Bexpression* bclosure = NULL;
if (has_closure_arg)
bclosure = closure->get_backend(context);
else
{
// Pass the closure argument by calling the function function
// __go_set_closure. In the order_evaluations pass we have
// ensured that if any parameters contain call expressions, they
// will have been moved out to temporary variables.
go_assert(closure != NULL);
Expression* set_closure =
Runtime::make_call(Runtime::SET_CLOSURE, location, 1, closure);
fn = Expression::make_compound(set_closure, fn, location);
}
go_assert(closure == NULL);
Bexpression* bfn = fn->get_backend(context);
@ -9356,7 +9349,8 @@ Call_expression::do_get_backend(Translate_context* context)
bfn = gogo->backend()->convert_expression(bft, bfn, location);
}
Bexpression* call = gogo->backend()->call_expression(bfn, fn_args, location);
Bexpression* call = gogo->backend()->call_expression(bfn, fn_args,
bclosure, location);
if (this->results_ != NULL)
{
@ -11132,6 +11126,7 @@ Interface_field_reference_expression::create_thunk(Gogo* gogo,
Variable* cvar = new Variable(closure_type, NULL, false, false, false, loc);
cvar->set_is_used();
cvar->set_is_closure();
Named_object* cp = Named_object::make_variable("$closure", NULL, cvar);
new_no->func_value()->set_closure_var(cp);

View File

@ -698,7 +698,8 @@ Gogo::init_imports(std::vector<Bstatement*>& init_stmts)
Bexpression* pfunc_code =
this->backend()->function_code_expression(pfunc, unknown_loc);
Bexpression* pfunc_call =
this->backend()->call_expression(pfunc_code, empty_args, unknown_loc);
this->backend()->call_expression(pfunc_code, empty_args,
NULL, unknown_loc);
init_stmts.push_back(this->backend()->expression_statement(pfunc_call));
}
}
@ -1413,7 +1414,7 @@ Gogo::write_globals()
this->backend()->function_code_expression(initfn, func_loc);
Bexpression* call = this->backend()->call_expression(func_code,
empty_args,
func_loc);
NULL, func_loc);
init_stmts.push_back(this->backend()->expression_statement(call));
}
@ -3915,6 +3916,7 @@ Build_recover_thunks::function(Named_object* orig_no)
Variable* orig_closure_var = orig_closure_no->var_value();
Variable* new_var = new Variable(orig_closure_var->type(), NULL, false,
false, false, location);
new_var->set_is_closure();
snprintf(buf, sizeof buf, "closure.%u", count);
++count;
Named_object* new_closure_no = Named_object::make_variable(buf, NULL,
@ -4518,6 +4520,7 @@ Function::closure_var()
Variable* var = new Variable(Type::make_pointer_type(struct_type),
NULL, false, false, false, loc);
var->set_is_used();
var->set_is_closure();
this->closure_var_ = Named_object::make_variable("$closure", NULL, var);
// Note that the new variable is not in any binding contour.
}
@ -5188,18 +5191,12 @@ Function::build(Gogo* gogo, Named_object* named_function)
return;
}
// If we need a closure variable, fetch it by calling a runtime
// function. The caller will have called __go_set_closure before
// the function call.
// If we need a closure variable, make sure to create it.
// It gets installed in the function as a side effect of creation.
if (this->closure_var_ != NULL)
{
Bvariable* closure_bvar =
this->closure_var_->get_backend_variable(gogo, named_function);
vars.push_back(closure_bvar);
Expression* closure =
Runtime::make_call(Runtime::GET_CLOSURE, this->location_, 0);
var_inits.push_back(closure->get_backend(&context));
go_assert(this->closure_var_->var_value()->is_closure());
this->closure_var_->get_backend_variable(gogo, named_function);
}
if (this->block_ != NULL)
@ -5733,7 +5730,8 @@ Variable::Variable(Type* type, Expression* init, bool is_global,
Location location)
: type_(type), init_(init), preinit_(NULL), location_(location),
backend_(NULL), is_global_(is_global), is_parameter_(is_parameter),
is_receiver_(is_receiver), is_varargs_parameter_(false), is_used_(false),
is_closure_(false), is_receiver_(is_receiver),
is_varargs_parameter_(false), is_used_(false),
is_address_taken_(false), is_non_escaping_address_taken_(false),
seen_(false), init_is_lowered_(false), init_is_flattened_(false),
type_from_init_tuple_(false), type_from_range_index_(false),
@ -6287,7 +6285,10 @@ Variable::get_backend_variable(Gogo* gogo, Named_object* function,
Bfunction* bfunction = function->func_value()->get_decl();
bool is_address_taken = (this->is_non_escaping_address_taken_
&& !this->is_in_heap());
if (is_parameter)
if (this->is_closure())
bvar = backend->static_chain_variable(bfunction, n, btype,
this->location_);
else if (is_parameter)
bvar = backend->parameter_variable(bfunction, n, btype,
is_address_taken,
this->location_);

View File

@ -1364,6 +1364,18 @@ class Variable
is_parameter() const
{ return this->is_parameter_; }
// Return whether this is a closure (static chain) parameter.
bool
is_closure() const
{ return this->is_closure_; }
// Change this parameter to be a closure.
void
set_is_closure()
{
this->is_closure_ = true;
}
// Return whether this is the receiver parameter of a method.
bool
is_receiver() const
@ -1585,6 +1597,8 @@ class Variable
bool is_global_ : 1;
// Whether this is a function parameter.
bool is_parameter_ : 1;
// Whether this is a closure parameter.
bool is_closure_ : 1;
// Whether this is the receiver parameter of a method.
bool is_receiver_ : 1;
// Whether this is the varargs parameter of a function.

View File

@ -230,12 +230,6 @@ DEF_GO_RUNTIME(NEW_NOPOINTERS, "__go_new_nopointers", P2(TYPE, UINTPTR), R1(POIN
// Start a new goroutine.
DEF_GO_RUNTIME(GO, "__go_go", P2(FUNC_PTR, POINTER), R0())
// Get the function closure.
DEF_GO_RUNTIME(GET_CLOSURE, "__go_get_closure", P0(), R1(POINTER))
// Set the function closure.
DEF_GO_RUNTIME(SET_CLOSURE, "__go_set_closure", P1(POINTER), R0())
// Defer a function.
DEF_GO_RUNTIME(DEFER, "__go_defer", P3(BOOLPTR, FUNC_PTR, POINTER), R0())

View File

@ -938,44 +938,10 @@ go_path_files = \
go/path/match.go \
go/path/path.go
if LIBGO_IS_X86_64
go_reflect_makefunc_file = \
go/reflect/makefuncgo_amd64.go
go_reflect_makefunc_s_file = \
go/reflect/makefunc_amd64.S
else
if LIBGO_IS_386
go_reflect_makefunc_file = \
go/reflect/makefuncgo_386.go
go_reflect_makefunc_s_file = \
go/reflect/makefunc_386.S
else
if LIBGO_IS_S390
go_reflect_makefunc_file = \
go/reflect/makefuncgo_s390.go
go_reflect_makefunc_s_file = \
go/reflect/makefunc_s390.c
else
if LIBGO_IS_S390X
go_reflect_makefunc_file = \
go/reflect/makefuncgo_s390x.go \
go/reflect/makefuncgo_s390.go
go_reflect_makefunc_s_file = \
go/reflect/makefunc_s390.c
else
go_reflect_makefunc_file =
go_reflect_makefunc_s_file = \
go/reflect/makefunc_dummy.c
endif
endif
endif
endif
go_reflect_files = \
go/reflect/deepequal.go \
go/reflect/makefunc.go \
go/reflect/makefunc_ffi.go \
$(go_reflect_makefunc_file) \
go/reflect/type.go \
go/reflect/value.go
go_reflect_makefunc_c_file = \
@ -1897,7 +1863,6 @@ libgo_go_objs = \
os.lo \
path.lo \
reflect-go.lo \
reflect/makefunc.lo \
reflect/makefunc_ffi_c.lo \
regexp.lo \
runtime-go.lo \
@ -2316,9 +2281,6 @@ reflect-go.lo: $(go_reflect_files)
$(BUILDPACKAGE)
reflect/check: $(CHECK_DEPS)
@$(CHECK)
reflect/makefunc.lo: $(go_reflect_makefunc_s_file)
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<
reflect/makefunc_ffi_c.lo: $(go_reflect_makefunc_c_file)
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<

View File

@ -141,11 +141,11 @@ am__DEPENDENCIES_1 =
am__DEPENDENCIES_2 = bufio.lo bytes.lo bytes/index.lo crypto.lo \
encoding.lo errors.lo expvar.lo flag.lo fmt.lo hash.lo html.lo \
image.lo io.lo log.lo math.lo mime.lo net.lo os.lo path.lo \
reflect-go.lo reflect/makefunc.lo reflect/makefunc_ffi_c.lo \
regexp.lo runtime-go.lo sort.lo strconv.lo strings.lo \
strings/index.lo sync.lo syscall.lo syscall/errno.lo \
syscall/signame.lo syscall/wait.lo testing.lo time-go.lo \
unicode.lo archive/tar.lo archive/zip.lo compress/bzip2.lo \
reflect-go.lo reflect/makefunc_ffi_c.lo regexp.lo \
runtime-go.lo sort.lo strconv.lo strings.lo strings/index.lo \
sync.lo syscall.lo syscall/errno.lo syscall/signame.lo \
syscall/wait.lo testing.lo time-go.lo unicode.lo \
archive/tar.lo archive/zip.lo compress/bzip2.lo \
compress/flate.lo compress/gzip.lo compress/lzw.lo \
compress/zlib.lo container/heap.lo container/list.lo \
container/ring.lo crypto/aes.lo crypto/cipher.lo crypto/des.lo \
@ -1125,40 +1125,10 @@ go_path_files = \
go/path/match.go \
go/path/path.go
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_FALSE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_file =
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_file = \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefuncgo_s390x.go \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefuncgo_s390.go
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390_TRUE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_file = \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390_TRUE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefuncgo_s390.go
@LIBGO_IS_386_TRUE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_file = \
@LIBGO_IS_386_TRUE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefuncgo_386.go
@LIBGO_IS_X86_64_TRUE@go_reflect_makefunc_file = \
@LIBGO_IS_X86_64_TRUE@ go/reflect/makefuncgo_amd64.go
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_FALSE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_s_file = \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_FALSE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefunc_dummy.c
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_s_file = \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390X_TRUE@@LIBGO_IS_S390_FALSE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefunc_s390.c
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390_TRUE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_s_file = \
@LIBGO_IS_386_FALSE@@LIBGO_IS_S390_TRUE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefunc_s390.c
@LIBGO_IS_386_TRUE@@LIBGO_IS_X86_64_FALSE@go_reflect_makefunc_s_file = \
@LIBGO_IS_386_TRUE@@LIBGO_IS_X86_64_FALSE@ go/reflect/makefunc_386.S
@LIBGO_IS_X86_64_TRUE@go_reflect_makefunc_s_file = \
@LIBGO_IS_X86_64_TRUE@ go/reflect/makefunc_amd64.S
go_reflect_files = \
go/reflect/deepequal.go \
go/reflect/makefunc.go \
go/reflect/makefunc_ffi.go \
$(go_reflect_makefunc_file) \
go/reflect/type.go \
go/reflect/value.go
@ -1963,7 +1933,6 @@ libgo_go_objs = \
os.lo \
path.lo \
reflect-go.lo \
reflect/makefunc.lo \
reflect/makefunc_ffi_c.lo \
regexp.lo \
runtime-go.lo \
@ -4657,9 +4626,6 @@ reflect-go.lo: $(go_reflect_files)
$(BUILDPACKAGE)
reflect/check: $(CHECK_DEPS)
@$(CHECK)
reflect/makefunc.lo: $(go_reflect_makefunc_s_file)
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<
reflect/makefunc_ffi_c.lo: $(go_reflect_makefunc_c_file)
@$(MKDIR_P) reflect
$(LTCOMPILE) -c -o $@ $<

View File

@ -7,25 +7,24 @@
package reflect
import (
"runtime"
"unsafe"
)
// makeFuncImpl is the closure value implementing the function
// returned by MakeFunc.
type makeFuncImpl struct {
code uintptr
typ *funcType
fn func([]Value) []Value
// These first three words are layed out like ffi_go_closure.
code uintptr
ffi_cif unsafe.Pointer
ffi_fun func(unsafe.Pointer, unsafe.Pointer)
typ *funcType
fn func([]Value) []Value
// For gccgo we use the same entry point for functions and for
// method values.
method int
rcvr Value
// When using FFI, hold onto the FFI closure for the garbage
// collector.
ffi *ffiData
}
// MakeFunc returns a new function of the given Type
@ -58,37 +57,17 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
t := typ.common()
ftyp := (*funcType)(unsafe.Pointer(t))
var code uintptr
var ffi *ffiData
switch runtime.GOARCH {
case "amd64", "386", "s390", "s390x":
// Indirect Go func value (dummy) to obtain actual
// code address. (A Go func value is a pointer to a C
// function pointer. http://golang.org/s/go11func.)
dummy := makeFuncStub
code = **(**uintptr)(unsafe.Pointer(&dummy))
default:
code, ffi = makeFuncFFI(ftyp, fn)
}
impl := &makeFuncImpl{
code: code,
typ: ftyp,
fn: fn,
method: -1,
ffi: ffi,
}
makeFuncFFI(ftyp, impl)
return Value{t, unsafe.Pointer(&impl), flag(Func) | flagIndir}
}
// makeFuncStub is an assembly function that is the code half of
// the function returned from MakeFunc. It expects a *callReflectFunc
// as its context register, and its job is to invoke callReflect(ctxt, frame)
// where ctxt is the context register and frame is a pointer to the first
// word in the passed-in argument frame.
func makeFuncStub()
// makeMethodValue converts v from the rcvr+method index representation
// of a method value to an actual method func value, which is
// basically the receiver value with a special bit set, into a true
@ -123,16 +102,7 @@ func makeMethodValue(op string, v Value) Value {
rcvr: rcvr,
}
switch runtime.GOARCH {
case "amd64", "386":
// Indirect Go func value (dummy) to obtain actual
// code address. (A Go func value is a pointer to a C
// function pointer. http://golang.org/s/go11func.)
dummy := makeFuncStub
fv.code = **(**uintptr)(unsafe.Pointer(&dummy))
default:
fv.code, fv.ffi = makeFuncFFI(ftyp, fv.call)
}
makeFuncFFI(ftyp, fv)
return Value{ft, unsafe.Pointer(&fv), v.flag&flagRO | flag(Func) | flagIndir}
}
@ -158,16 +128,7 @@ func makeValueMethod(v Value) Value {
rcvr: v,
}
switch runtime.GOARCH {
case "amd64", "386", "s390", "s390x":
// Indirect Go func value (dummy) to obtain actual
// code address. (A Go func value is a pointer to a C
// function pointer. http://golang.org/s/go11func.)
dummy := makeFuncStub
impl.code = **(**uintptr)(unsafe.Pointer(&dummy))
default:
impl.code, impl.ffi = makeFuncFFI(ftyp, impl.call)
}
makeFuncFFI(ftyp, impl)
return Value{t, unsafe.Pointer(&impl), v.flag&flagRO | flag(Func) | flagIndir}
}

View File

@ -1,230 +0,0 @@
/* Copyright 2013 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
MakeFunc 386 assembly code. */
#include "config.h"
.globl reflect.makeFuncStub
#ifdef __ELF__
.type reflect.makeFuncStub,@function
#endif
reflect.makeFuncStub:
.LFB1:
/* Go does not provide any equivalent to the regparm function
attribute, so on Go we do not need to worry about passing
parameters in registers. We just pass a pointer to the
arguments on the stack.
We do need to pick up the return values, though, so we pass
a pointer to a struct that looks like this.
struct {
esp uint32 // 0x0
eax uint32 // 0x4
st0 float64 // 0x8
sr bool // 0x10
sf bool // 0x11
}
The sr field is set by the function to a non-zero value if
the function takes a struct hidden pointer that must be
popped off the stack. */
pushl %ebp
.LCFI0:
movl %esp, %ebp
.LCFI1:
pushl %ebx /* In case this is PIC. */
subl $36, %esp /* Enough for args and to align stack. */
.LCFI2:
#ifdef __PIC__
call __x86.get_pc_thunk.bx
addl $_GLOBAL_OFFSET_TABLE_, %ebx
#endif
leal 8(%ebp), %eax /* Set esp field in struct. */
movl %eax, -24(%ebp)
/* For MakeFunc functions that call recover. */
movl 4(%ebp), %eax
movl %eax, (%esp)
#ifdef __PIC__
call __go_makefunc_can_recover@PLT
#else
call __go_makefunc_can_recover
#endif
#ifdef __PIC__
call __go_get_closure@PLT
#else
call __go_get_closure
#endif
movl %eax, 4(%esp)
leal -24(%ebp), %eax
movl %eax, (%esp)
#ifdef __PIC__
call reflect.MakeFuncStubGo@PLT
#else
call reflect.MakeFuncStubGo
#endif
/* MakeFunc functions can no longer call recover. */
#ifdef __PIC__
call __go_makefunc_returning@PLT
#else
call __go_makefunc_returning
#endif
/* Set return registers. */
movl -20(%ebp), %eax
cmpb $0, -7(%ebp)
je 2f
fldl -16(%ebp)
#ifdef __SSE2__
/* In case we are compiling with -msseregparm. This won't work
correctly if only SSE1 is supported, but that seems unlikely. */
movsd -16(%ebp), %xmm0
#endif
2:
movb -8(%ebp), %dl
addl $36, %esp
popl %ebx
.LCFI3:
popl %ebp
.LCFI4:
testb %dl,%dl
jne 1f
ret
1:
ret $4
.LFE1:
#ifdef __ELF__
.size reflect.makeFuncStub, . - reflect.makeFuncStub
#endif
#ifdef __PIC__
#ifdef HAVE_AS_COMDAT_GAS
.section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
#else
/* Sun as needs a different syntax. */
.section .text.__x86.get_pc_thunk.bx%__x86.get_pc_thunk.bx,"ax",@progbits
.group __x86.get_pc_thunk.bx,.text.__x86.get_pc_thunk.bx%__x86.get_pc_thunk.bx,#comdat
#endif
.globl __x86.get_pc_thunk.bx
.hidden __x86.get_pc_thunk.bx
#ifdef __ELF__
.type __x86.get_pc_thunk.bx, @function
#endif
__x86.get_pc_thunk.bx:
.LFB2:
movl (%esp), %ebx
ret
.LFE2:
#ifdef __ELF__
.size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
#endif
#endif
#ifdef __ELF__
#if defined __PIC__
# if defined __sun__ && defined __svr4__
/* 32-bit Solaris 2/x86 uses datarel encoding for PIC. GNU ld before 2.22
doesn't correctly sort .eh_frame_hdr with mixed encodings, so match this. */
# define FDE_ENCODING 0x30 /* datarel */
# define FDE_ENCODE(X) X@GOTOFF
# else
# define FDE_ENCODING 0x1b /* pcrel sdata4 */
# if defined HAVE_AS_X86_PCREL
# define FDE_ENCODE(X) X-.
# else
# define FDE_ENCODE(X) X@rel
# endif
# endif
#else
# define FDE_ENCODING 0 /* absolute */
# define FDE_ENCODE(X) X
#endif
.section .eh_frame,EH_FRAME_FLAGS,@progbits
.Lframe1:
.long .LECIE1-.LSCIE1 /* Length of Common Information Entry */
.LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 0x1 /* .uleb128 0x1; CIE Code Alignment Factor */
.byte 0x7c /* .sleb128 -4; CIE Data Alignment Factor */
.byte 0x8 /* CIE RA Column */
.byte 0x1 /* .uleb128 0x1; Augmentation size */
.byte FDE_ENCODING
.byte 0xc /* DW_CFA_def_cfa */
.byte 0x4 /* .uleb128 0x4 */
.byte 0x4 /* .uleb128 0x4 */
.byte 0x88 /* DW_CFA_offset, column 0x8 */
.byte 0x1 /* .uleb128 0x1 */
.align 4
.LECIE1:
.LSFDE1:
.long .LEFDE1-.LASFDE1 /* FDE Length */
.LASFDE1:
.long .LASFDE1-.Lframe1 /* FDE CIE offset */
.long FDE_ENCODE(.LFB1) /* FDE initial location */
.long .LFE1-.LFB1 /* FDE address range */
.byte 0x0 /* .uleb128 0x0; Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI0-.LFB1
.byte 0xe /* DW_CFA_def_cfa_offset */
.byte 0x8 /* .uleb128 0x8 */
.byte 0x85 /* DW_CFA_offset, column 0x5 */
.byte 0x2 /* .uleb128 0x2 */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI1-.LCFI0
.byte 0xd /* DW_CFA_def_cfa_register */
.byte 0x5 /* .uleb128 0x5 */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI2-.LCFI1
.byte 0x83 /* .DW_CFA_offset, column 0x3 */
.byte 0x3 /* .uleb128 0x3 */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI3-.LCFI2
.byte 0xc3 /* DW_CFA_restore, column 0x3 */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI4-.LCFI3
.byte 0xc5 /* DW_CFA_restore, column 0x5 */
.byte 0xc /* DW_CFA_def_cfa */
.byte 0x4 /* .uleb128 0x4 */
.byte 0x4 /* .uleb128 0x4 */
.align 4
.LEFDE1:
#ifdef __PIC__
.LSFDE2:
.long .LEFDE2-.LASFDE2 /* FDE Length */
.LASFDE2:
.long .LASFDE2-.Lframe1 /* FDE CIE offset */
.long FDE_ENCODE(.LFB2) /* FDE initial location */
.long .LFE2-.LFB2 /* FDE address range */
.byte 0x0 /* .uleb128 0x0; Augmentation size */
.align 4
.LEFDE2:
#endif /* __PIC__ */
#endif /* __ELF__ */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif

View File

@ -1,177 +0,0 @@
# Copyright 2013 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# MakeFunc amd64 assembly code.
#include "config.h"
.global reflect.makeFuncStub
#ifdef __ELF__
.type reflect.makeFuncStub,@function
#endif
reflect.makeFuncStub:
.LFB1:
# Store all the parameter registers in a struct that looks
# like:
# struct {
# rax uint64 // 0x0
# rdi uint64 // 0x8
# rsi uint64 // 0x10
# rdx uint64 // 0x18
# rcx uint64 // 0x20
# r8 uint64 // 0x28
# r9 uint64 // 0x30
# rsp uint64 // 0x38 Pointer to arguments on stack.
# xmm0 [2]uint64 // 0x40
# xmm1 [2]uint64 // 0x50
# xmm2 [2]uint64 // 0x60
# xmm3 [2]uint64 // 0x70
# xmm4 [2]uint64 // 0x80
# xmm5 [2]uint64 // 0x90
# xmm6 [2]uint64 // 0xa0
# xmm7 [2]uint64 // 0xb0
# };
pushq %rbp
.LCFI0:
movq %rsp, %rbp
.LCFI1:
subq $0xc0, %rsp # Space for struct on stack.
movq %rax, 0x0(%rsp)
movq %rdi, 0x8(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdx, 0x18(%rsp)
movq %rcx, 0x20(%rsp)
movq %r8, 0x28(%rsp)
movq %r9, 0x30(%rsp)
leaq 16(%rbp), %rax
movq %rax, 0x38(%rsp)
movdqa %xmm0, 0x40(%rsp)
movdqa %xmm1, 0x50(%rsp)
movdqa %xmm2, 0x60(%rsp)
movdqa %xmm3, 0x70(%rsp)
movdqa %xmm4, 0x80(%rsp)
movdqa %xmm5, 0x90(%rsp)
movdqa %xmm6, 0xa0(%rsp)
movdqa %xmm7, 0xb0(%rsp)
/* For MakeFunc functions that call recover. */
movq 8(%rbp), %rdi
#ifdef __PIC__
call __go_makefunc_can_recover@PLT
#else
call __go_makefunc_can_recover
#endif
# Get function type.
#ifdef __PIC__
call __go_get_closure@PLT
#else
call __go_get_closure
#endif
movq %rax, %rsi
movq %rsp, %rdi
#ifdef __PIC__
call reflect.MakeFuncStubGo@PLT
#else
call reflect.MakeFuncStubGo
#endif
/* MakeFunc functions can no longer call recover. */
#ifdef __PIC__
call __go_makefunc_returning@PLT
#else
call __go_makefunc_returning
#endif
# The structure will be updated with any return values. Load
# all possible return registers before returning to the caller.
movq 0x0(%rsp), %rax
movq 0x18(%rsp), %rdx
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movdqa 0x40(%rsp), %xmm0
movdqa 0x50(%rsp), %xmm1
# long double values are returned on the floating point stack,
# but we don't worry about that since Go doesn't have a long
# double type.
leave
.LCFI2:
ret
.LFE1:
#ifdef __ELF__
.size reflect.makeFuncStub, . - reflect.makeFuncStub
#endif
#ifdef __ELF__
#ifdef HAVE_AS_X86_64_UNWIND_SECTION_TYPE
.section .eh_frame,"a",@unwind
#else
.section .eh_frame,"a",@progbits
#endif
.Lframe1:
.long .LECIE1-.LSCIE1 /* Length of Common Information Entry */
.LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.uleb128 1 /* CIE Code Alignment Factor */
.sleb128 -8 /* CIE Data Alignment Factor */
.byte 0x10 /* CIE RA Column */
.uleb128 1 /* Augmentation size */
.byte 0x1b /* FDE Encoding (pcrel sdata4) */
.byte 0xc /* DW_CFA_def_cfa, %rsp offset 8 */
.uleb128 7
.uleb128 8
.byte 0x80+16 /* DW_CFA_offset, %rip offset 1*-8 */
.uleb128 1
.align 8
.LECIE1:
.LSFDE1:
.long .LEFDE1-.LASFDE1 /* FDE Length */
.LASFDE1:
.long .LASFDE1-.Lframe1 /* FDE CIE offset */
#if HAVE_AS_X86_PCREL
.long .LFB1-. /* FDE initial location */
#else
.long .LFB1@rel
#endif
.long .LFE1-.LFB1 /* FDE address range */
.uleb128 0x0 /* Augmentation size */
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI0-.LFB1
.byte 0xe /* DW_CFA_def_cfa_offset */
.uleb128 16
.byte 0x86 /* DW_CFA_offset, column 0x6 */
.uleb128 2
.byte 0x4 /* DW_CFA_advance_loc4 */
.long .LCFI1-.LCFI0
.byte 0xd /* DW_CFA_def_cfa_register */
.uleb128 6
.byte 0x2 /* DW_CFA_advance_loc1 */
.byte .LCFI2-.LCFI1
.byte 0xc /* DW_CFA_def_cfa */
.uleb128 7
.uleb128 8
.align 8
.LEFDE1:
#endif /* __ELF__ */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif

View File

@ -1,15 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
/* Dummy function for processors that implement MakeFunc using FFI
rather than having builtin support. */
void makeFuncStub (void) __asm__ ("reflect.makeFuncStub");
void makeFuncStub (void)
{
runtime_throw ("impossible call to makeFuncStub");
}

View File

@ -5,52 +5,27 @@
package reflect
import (
"runtime"
"unsafe"
)
// The ffi function, written in C, allocates an FFI closure. It
// returns the code and data pointers. When the code pointer is
// called, it will call callback. CIF is an FFI data structure
// allocated as part of the closure, and is returned to ensure that
// the GC retains it.
func ffi(ftyp *funcType, callback func(unsafe.Pointer, unsafe.Pointer)) (code uintptr, data uintptr, cif unsafe.Pointer)
// The makeFuncFFI function, written in C, fills in an FFI closure.
// It arranges for ffiCall to be invoked directly from FFI.
func makeFuncFFI(ftyp *funcType, impl *makeFuncImpl)
// The ffiFree function, written in C, releases the FFI closure.
func ffiFree(uintptr)
// FFICallbackGo implements the Go side of the libffi callback.
// It is exported so that C code can call it.
//
// The call chain arriving here looks like
// some_go_caller
// ->some_ffi_internals
// ->ffi_callback (in C)
// ->FFICallbackGo
//
// The ffi_callback handles __go_makefunc_can_recover, and
// then passes off the data as received from ffi here.
// An ffiData holds the information needed to preserve an FFI closure
// for the garbage collector.
type ffiData struct {
code uintptr
data uintptr
cif unsafe.Pointer
callback func(unsafe.Pointer, unsafe.Pointer)
}
// The makeFuncFFI function uses libffi closures to implement
// reflect.MakeFunc. This is used for processors for which we don't
// have more efficient support.
func makeFuncFFI(ftyp *funcType, fn func(args []Value) (results []Value)) (uintptr, *ffiData) {
callback := func(params, results unsafe.Pointer) {
ffiCall(ftyp, fn, params, results)
}
code, data, cif := ffi(ftyp, callback)
c := &ffiData{code: code, data: data, cif: cif, callback: callback}
runtime.SetFinalizer(c,
func(p *ffiData) {
ffiFree(p.data)
})
return code, c
}
// ffiCall takes pointers to the parameters, calls the function, and
// stores the results back into memory.
func ffiCall(ftyp *funcType, fn func([]Value) []Value, params unsafe.Pointer, results unsafe.Pointer) {
func FFICallbackGo(results unsafe.Pointer, params unsafe.Pointer, impl *makeFuncImpl) {
ftyp := impl.typ
in := make([]Value, 0, len(ftyp.in))
ap := params
for _, rt := range ftyp.in {
@ -61,18 +36,18 @@ func ffiCall(ftyp *funcType, fn func([]Value) []Value, params unsafe.Pointer, re
ap = (unsafe.Pointer)(uintptr(ap) + ptrSize)
}
out := fn(in)
out := impl.call(in)
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(fn) +
panic("reflect: function created by MakeFunc using " + funcName(impl.fn) +
" returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic("reflect: function created by MakeFunc using " + funcName(fn) +
panic("reflect: function created by MakeFunc using " + funcName(impl.fn) +
" returned value obtained from unexported field")
}

View File

@ -10,7 +10,7 @@
#include "go-ffi.h"
#if FFI_CLOSURES
#if FFI_GO_CLOSURES
#define USE_LIBFFI_CLOSURES
#endif
@ -18,36 +18,28 @@
/* Declare C functions with the names used to call from Go. */
struct ffi_ret {
void *code;
void *data;
void *cif;
};
struct ffi_ret ffi(const struct __go_func_type *ftyp, FuncVal *callback)
__asm__ (GOSYM_PREFIX "reflect.ffi");
void ffiFree(void *data)
__asm__ (GOSYM_PREFIX "reflect.ffiFree");
void makeFuncFFI(const struct __go_func_type *ftyp, ffi_go_closure *impl)
__asm__ (GOSYM_PREFIX "reflect.makeFuncFFI");
#ifdef USE_LIBFFI_CLOSURES
/* The function that we pass to ffi_prep_closure_loc. This calls the
Go callback function (passed in user_data) with the pointer to the
arguments and the results area. */
/* The function that we pass to ffi_prep_closure_loc. This calls the Go
function ffiCall with the pointer to the arguments, the results area,
and the closure structure. */
void FFICallbackGo(void *result, void **args, ffi_go_closure *closure)
__asm__ (GOSYM_PREFIX "reflect.FFICallbackGo");
static void ffi_callback (ffi_cif *, void *, void **, void *)
__asm__ ("reflect.ffi_callback");
static void
ffi_callback (ffi_cif* cif __attribute__ ((unused)), void *results,
void **args, void *user_data)
void **args, void *closure)
{
Location locs[8];
int n;
int i;
FuncVal *fv;
void (*f) (void *, void *);
/* This function is called from some series of FFI closure functions
called by a Go function. We want to see whether the caller of
@ -69,10 +61,7 @@ ffi_callback (ffi_cif* cif __attribute__ ((unused)), void *results,
if (i < n)
__go_makefunc_ffi_can_recover (locs + i, n - i);
fv = (FuncVal *) user_data;
__go_set_closure (fv);
f = (void *) fv->fn;
f (args, results);
FFICallbackGo(results, args, closure);
if (i < n)
__go_makefunc_returning ();
@ -80,46 +69,21 @@ ffi_callback (ffi_cif* cif __attribute__ ((unused)), void *results,
/* Allocate an FFI closure and arrange to call ffi_callback. */
struct ffi_ret
ffi (const struct __go_func_type *ftyp, FuncVal *callback)
void
makeFuncFFI(const struct __go_func_type *ftyp, ffi_go_closure *impl)
{
ffi_cif *cif;
void *code;
void *data;
struct ffi_ret ret;
cif = (ffi_cif *) __go_alloc (sizeof (ffi_cif));
__go_func_to_cif (ftyp, 0, 0, cif);
data = ffi_closure_alloc (sizeof (ffi_closure), &code);
if (data == NULL)
runtime_panicstring ("ffi_closure_alloc failed");
if (ffi_prep_closure_loc (data, cif, ffi_callback, callback, code)
!= FFI_OK)
runtime_panicstring ("ffi_prep_closure_loc failed");
ret.code = code;
ret.data = data;
ret.cif = cif;
return ret;
}
/* Free the FFI closure. */
void
ffiFree (void *data)
{
ffi_closure_free (data);
ffi_prep_go_closure(impl, cif, ffi_callback);
}
#else /* !defined(USE_LIBFFI_CLOSURES) */
struct ffi_ret
ffi(const struct __go_func_type *ftyp, FuncVal *callback)
{
runtime_panicstring ("libgo built without FFI does not support "
"reflect.MakeFunc");
}
void ffiFree(void *data)
void
makeFuncFFI(const struct __go_func_type *ftyp, ffi_go_closure *impl)
{
runtime_panicstring ("libgo built without FFI does not support "
"reflect.MakeFunc");

View File

@ -1,86 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "go-panic.h"
#ifdef __s390x__
# define S390_GO_USE_64_BIT_ABI 1
# define S390_GO_S390X_ARGS , double f4, double f6
# define S390_GO_S390X_FIELDS double f4; double f6;
extern void S390xMakeFuncStubGo(void *, void *)
asm ("reflect.S390xMakeFuncStubGo");
# define S390_GO_MakeFuncStubGo(r, c) S390xMakeFuncStubGo((r), (c))
#else
# define S390_GO_USE_64_BIT_ABI 0
# define S390_GO_S390X_ARGS
# define S390_GO_S390X_FIELDS
extern void S390MakeFuncStubGo(void *, void *)
asm ("reflect.S390MakeFuncStubGo");
# define S390_GO_MakeFuncStubGo(r, c) S390MakeFuncStubGo((r), (c))
/* Needed to make the unused 64 bit abi conditional code compile. */
# define f4 f0
# define f6 f2
#endif
/* Structure to store all registers used for parameter passing. */
typedef struct
{
long r2;
long r3;
long r4;
long r5;
long r6;
/* Pointer to non-register arguments on the stack. */
long stack_args;
double f0;
double f2;
S390_GO_S390X_FIELDS
} s390Regs;
void
makeFuncStub(long r2, long r3, long r4, long r5, long r6,
unsigned long stack_args, double f0, double f2
S390_GO_S390X_ARGS)
asm ("reflect.makeFuncStub");
void
makeFuncStub(long r2, long r3, long r4, long r5, long r6,
unsigned long stack_args, double f0, double f2
S390_GO_S390X_ARGS)
{
s390Regs regs;
void *closure;
/* Store the registers in a structure that is passed on to the Go stub
function. */
regs.r2 = r2;
regs.r3 = r3;
regs.r4 = r4;
regs.r5 = r5;
regs.r6 = r6;
regs.stack_args = (long)&stack_args;
regs.f0 = f0;
regs.f2 = f2;
if (S390_GO_USE_64_BIT_ABI) {
regs.f4 = f4;
regs.f6 = f6;
}
/* For MakeFunc functions that call recover. */
__go_makefunc_can_recover(__builtin_return_address(0));
/* Call the Go stub function. */
closure = __go_get_closure();
S390_GO_MakeFuncStubGo(&regs, closure);
/* MakeFunc functions can no longer call recover. */
__go_makefunc_returning();
/* Restore all possible return registers. */
if (S390_GO_USE_64_BIT_ABI) {
asm volatile ("lg\t%%r2,0(%0)" : : "a" (&regs.r2) : "r2" );
asm volatile ("ld\t%%f0,0(%0)" : : "a" (&regs.f0) : "f0" );
} else {
asm volatile ("l\t%%r2,0(%0)" : : "a" (&regs.r2) : "r2" );
asm volatile ("l\t%%r3,0(%0)" : : "a" (&regs.r3) : "r3" );
asm volatile ("ld\t%%f0,0(%0)" : : "a" (&regs.f0) : "f0" );
}
}

View File

@ -1,139 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MakeFunc 386 implementation.
package reflect
import "unsafe"
// The assembler stub will pass a pointer to this structure. We
// assume that no parameters are passed in registers--that is, we do
// not support the -mregparm option. On return we will set the
// registers that might hold result values.
type i386Regs struct {
esp uint32
eax uint32 // Value to return in %eax.
st0 float64 // Value to return in %st(0).
sr bool // Set to true if hidden struct pointer.
sf bool // Set to true if returning float
}
// MakeFuncStubGo implements the 386 calling convention for MakeFunc.
// This should not be called. It is exported so that assembly code
// can call it.
func MakeFuncStubGo(regs *i386Regs, c *makeFuncImpl) {
ftyp := c.typ
// See if the result requires a struct. If it does, the first
// parameter is a pointer to the struct.
retStruct := false
retEmpty := false
switch len(ftyp.out) {
case 0:
retEmpty = true
case 1:
if ftyp.out[0].size == 0 {
retEmpty = true
} else {
switch ftyp.out[0].Kind() {
case Complex64, Complex128, Array, Interface, Slice, String, Struct:
retStruct = true
}
}
default:
size := uintptr(0)
for _, typ := range ftyp.out {
size += typ.size
}
if size == 0 {
retEmpty = true
} else {
retStruct = true
}
}
in := make([]Value, 0, len(ftyp.in))
ap := uintptr(regs.esp)
regs.sr = false
regs.sf = false
var retPtr unsafe.Pointer
if retStruct {
retPtr = *(*unsafe.Pointer)(unsafe.Pointer(ap))
ap += ptrSize
regs.sr = true
}
for _, rt := range ftyp.in {
ap = align(ap, ptrSize)
// We have to copy the argument onto the heap in case
// the function hangs on the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
}
// Call the real function.
out := c.call(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
for i, typ := range ftyp.out {
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
" returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
" returned value obtained from unexported field")
}
}
if retEmpty {
return
}
if retStruct {
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
off = align(off, uintptr(typ.fieldAlign))
addr := unsafe.Pointer(uintptr(retPtr) + off)
if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
*(*unsafe.Pointer)(addr) = v.ptr
} else {
memmove(addr, v.ptr, typ.size)
}
off += typ.size
}
regs.eax = uint32(uintptr(retPtr))
return
}
if len(ftyp.out) != 1 {
panic("inconsistency")
}
v := out[0]
switch v.Kind() {
case Ptr, UnsafePointer, Chan, Func, Map:
regs.eax = uint32(uintptr(v.pointer()))
case Float32, Float64:
regs.st0 = v.Float()
regs.sf = true
default:
memmove(unsafe.Pointer(&regs.eax), v.ptr, v.typ.size)
}
}

View File

@ -1,496 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MakeFunc amd64 implementation.
package reflect
import "unsafe"
// The assembler stub will pass a pointer to this structure.
// This will come in holding all the registers that might hold
// function parameters. On return we will set the registers that
// might hold result values.
type amd64Regs struct {
rax uint64
rdi uint64
rsi uint64
rdx uint64
rcx uint64
r8 uint64
r9 uint64
rsp uint64
xmm0 [2]uint64
xmm1 [2]uint64
xmm2 [2]uint64
xmm3 [2]uint64
xmm4 [2]uint64
xmm5 [2]uint64
xmm6 [2]uint64
xmm7 [2]uint64
}
// Argument classifications. The amd64 ELF ABI uses several more, but
// these are the only ones that arise for Go types.
type amd64Class int
const (
amd64Integer amd64Class = iota
amd64SSE
amd64NoClass
amd64Memory
)
// amd64Classify returns the one or two register classes needed to
// pass the value of type. Go types never need more than two
// registers. amd64Memory means the value is stored in memory.
// amd64NoClass means the register is not used.
func amd64Classify(typ *rtype) (amd64Class, amd64Class) {
switch typ.Kind() {
default:
panic("internal error--unknown kind in amd64Classify")
case Bool, Int, Int8, Int16, Int32, Int64,
Uint, Uint8, Uint16, Uint32, Uint64,
Uintptr, Chan, Func, Map, Ptr, UnsafePointer:
return amd64Integer, amd64NoClass
case Float32, Float64, Complex64:
return amd64SSE, amd64NoClass
case Complex128:
return amd64SSE, amd64SSE
case Array:
if typ.size == 0 {
return amd64NoClass, amd64NoClass
} else if typ.size > 16 {
return amd64Memory, amd64NoClass
}
atyp := (*arrayType)(unsafe.Pointer(typ))
eclass1, eclass2 := amd64Classify(atyp.elem)
if eclass1 == amd64Memory {
return amd64Memory, amd64NoClass
}
if eclass2 == amd64NoClass && typ.size > 8 {
eclass2 = eclass1
}
return eclass1, eclass2
case Interface:
return amd64Integer, amd64Integer
case Slice:
return amd64Memory, amd64NoClass
case String:
return amd64Integer, amd64Integer
case Struct:
if typ.size == 0 {
return amd64NoClass, amd64NoClass
} else if typ.size > 16 {
return amd64Memory, amd64NoClass
}
var first, second amd64Class
f := amd64NoClass
onFirst := true
styp := (*structType)(unsafe.Pointer(typ))
for _, field := range styp.fields {
if onFirst && field.offset >= 8 {
first = f
f = amd64NoClass
onFirst = false
}
fclass1, fclass2 := amd64Classify(field.typ)
f = amd64MergeClasses(f, fclass1)
if fclass2 != amd64NoClass {
if !onFirst {
panic("amd64Classify inconsistent")
}
first = f
f = fclass2
onFirst = false
}
}
if onFirst {
first = f
second = amd64NoClass
} else {
second = f
}
if first == amd64Memory || second == amd64Memory {
return amd64Memory, amd64NoClass
}
return first, second
}
}
// amd64MergeClasses merges two register classes as described in the
// amd64 ELF ABI.
func amd64MergeClasses(c1, c2 amd64Class) amd64Class {
switch {
case c1 == c2:
return c1
case c1 == amd64NoClass:
return c2
case c2 == amd64NoClass:
return c1
case c1 == amd64Memory || c2 == amd64Memory:
return amd64Memory
case c1 == amd64Integer || c2 == amd64Integer:
return amd64Integer
default:
return amd64SSE
}
}
// MakeFuncStubGo implements the amd64 calling convention for
// MakeFunc. This should not be called. It is exported so that
// assembly code can call it.
func MakeFuncStubGo(regs *amd64Regs, c *makeFuncImpl) {
ftyp := c.typ
// See if the result requires a struct. If it does, the first
// parameter is a pointer to the struct.
var ret1, ret2 amd64Class
switch len(ftyp.out) {
case 0:
ret1, ret2 = amd64NoClass, amd64NoClass
case 1:
ret1, ret2 = amd64Classify(ftyp.out[0])
default:
off := uintptr(0)
f := amd64NoClass
onFirst := true
for _, rt := range ftyp.out {
off = align(off, uintptr(rt.fieldAlign))
if onFirst && off >= 8 {
ret1 = f
f = amd64NoClass
onFirst = false
}
off += rt.size
if off > 16 {
break
}
fclass1, fclass2 := amd64Classify(rt)
f = amd64MergeClasses(f, fclass1)
if fclass2 != amd64NoClass {
if !onFirst {
panic("amd64Classify inconsistent")
}
ret1 = f
f = fclass2
onFirst = false
}
}
if off > 16 {
ret1, ret2 = amd64Memory, amd64NoClass
} else {
if onFirst {
ret1, ret2 = f, amd64NoClass
} else {
ret2 = f
}
}
if ret1 == amd64Memory || ret2 == amd64Memory {
ret1, ret2 = amd64Memory, amd64NoClass
}
}
in := make([]Value, 0, len(ftyp.in))
intreg := 0
ssereg := 0
ap := uintptr(regs.rsp)
maxIntregs := 6 // When we support Windows, this would be 4.
maxSSEregs := 8
if ret1 == amd64Memory {
// We are returning a value in memory, which means
// that the first argument is a hidden parameter
// pointing to that return area.
intreg++
}
argloop:
for _, rt := range ftyp.in {
c1, c2 := amd64Classify(rt)
fl := flag(rt.Kind())
if c2 == amd64NoClass {
// Argument is passed in a single register or
// in memory.
switch c1 {
case amd64NoClass:
v := Value{rt, nil, fl | flagIndir}
in = append(in, v)
continue argloop
case amd64Integer:
if intreg < maxIntregs {
reg := amd64IntregVal(regs, intreg)
iw := unsafe.Pointer(reg)
if k := rt.Kind(); k != Ptr && k != UnsafePointer {
iw = unsafe.Pointer(&reg)
fl |= flagIndir
}
v := Value{rt, iw, fl}
in = append(in, v)
intreg++
continue argloop
}
case amd64SSE:
if ssereg < maxSSEregs {
reg := amd64SSEregVal(regs, ssereg)
v := Value{rt, unsafe.Pointer(&reg), fl | flagIndir}
in = append(in, v)
ssereg++
continue argloop
}
}
in, ap = amd64Memarg(in, ap, rt)
continue argloop
}
// Argument is passed in two registers.
nintregs := 0
nsseregs := 0
switch c1 {
case amd64Integer:
nintregs++
case amd64SSE:
nsseregs++
default:
panic("inconsistent")
}
switch c2 {
case amd64Integer:
nintregs++
case amd64SSE:
nsseregs++
default:
panic("inconsistent")
}
// If the whole argument does not fit in registers, it
// is passed in memory.
if intreg+nintregs > maxIntregs || ssereg+nsseregs > maxSSEregs {
in, ap = amd64Memarg(in, ap, rt)
continue argloop
}
var word1, word2 uintptr
switch c1 {
case amd64Integer:
word1 = amd64IntregVal(regs, intreg)
intreg++
case amd64SSE:
word1 = amd64SSEregVal(regs, ssereg)
ssereg++
}
switch c2 {
case amd64Integer:
word2 = amd64IntregVal(regs, intreg)
intreg++
case amd64SSE:
word2 = amd64SSEregVal(regs, ssereg)
ssereg++
}
p := unsafe_New(rt)
*(*uintptr)(p) = word1
*(*uintptr)(unsafe.Pointer(uintptr(p) + ptrSize)) = word2
v := Value{rt, p, fl | flagIndir}
in = append(in, v)
}
// All the real arguments have been found and turned into
// Value's. Call the real function.
out := c.call(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
for i, typ := range ftyp.out {
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
" returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
" returned value obtained from unexported field")
}
}
if ret1 == amd64NoClass {
return
}
if ret1 == amd64Memory {
// The address of the memory area was passed as a
// hidden parameter in %rdi.
ptr := unsafe.Pointer(uintptr(regs.rdi))
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
off = align(off, uintptr(typ.fieldAlign))
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
*(*unsafe.Pointer)(addr) = v.ptr
} else {
memmove(addr, v.ptr, typ.size)
}
off += typ.size
}
return
}
if len(out) == 1 && ret2 == amd64NoClass {
v := out[0]
var w unsafe.Pointer
switch v.Kind() {
case Ptr, UnsafePointer, Chan, Func, Map:
w = v.pointer()
default:
memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
}
switch ret1 {
case amd64Integer:
regs.rax = uint64(uintptr(w))
case amd64SSE:
regs.xmm0[0] = uint64(uintptr(w))
regs.xmm0[1] = 0
default:
panic("inconsistency")
}
return
}
var buf [2]unsafe.Pointer
ptr := unsafe.Pointer(&buf[0])
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
off = align(off, uintptr(typ.fieldAlign))
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
*(*unsafe.Pointer)(addr) = v.ptr
} else {
memmove(addr, v.ptr, typ.size)
}
off += uintptr(typ.size)
}
switch ret1 {
case amd64Integer:
regs.rax = *(*uint64)(unsafe.Pointer(&buf[0]))
case amd64SSE:
regs.xmm0[0] = *(*uint64)(unsafe.Pointer(&buf[0]))
regs.xmm0[1] = 0
default:
panic("inconsistency")
}
switch ret2 {
case amd64Integer:
reg := *(*uint64)(unsafe.Pointer(&buf[1]))
if ret1 == amd64Integer {
regs.rdx = reg
} else {
regs.rax = reg
}
case amd64SSE:
reg := *(*uint64)(unsafe.Pointer(&buf[1]))
if ret1 == amd64Integer {
regs.xmm0[0] = reg
regs.xmm0[1] = 0
} else {
regs.xmm1[0] = reg
regs.xmm1[1] = 0
}
case amd64NoClass:
default:
panic("inconsistency")
}
}
// The amd64Memarg function adds an argument passed in memory.
func amd64Memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
ap = align(ap, ptrSize)
ap = align(ap, uintptr(rt.align))
// We have to copy the argument onto the heap in case the
// function hangs onto the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
return in, ap
}
// The amd64IntregVal function returns the value of integer register i.
func amd64IntregVal(regs *amd64Regs, i int) uintptr {
var r uint64
switch i {
case 0:
r = regs.rdi
case 1:
r = regs.rsi
case 2:
r = regs.rdx
case 3:
r = regs.rcx
case 4:
r = regs.r8
case 5:
r = regs.r9
default:
panic("amd64IntregVal: bad index")
}
return uintptr(r)
}
// The amd64SSEregVal function returns the value of SSE register i.
// Note that although SSE registers can hold two uinptr's, for the
// types we use in Go we only ever use the least significant one. The
// most significant one would only be used for 128 bit types.
func amd64SSEregVal(regs *amd64Regs, i int) uintptr {
var r uint64
switch i {
case 0:
r = regs.xmm0[0]
case 1:
r = regs.xmm1[0]
case 2:
r = regs.xmm2[0]
case 3:
r = regs.xmm3[0]
case 4:
r = regs.xmm4[0]
case 5:
r = regs.xmm5[0]
case 6:
r = regs.xmm6[0]
case 7:
r = regs.xmm7[0]
}
return uintptr(r)
}

View File

@ -1,454 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MakeFunc s390 implementation.
package reflect
import "unsafe"
// Convenience types and constants.
const s390_arch_stack_slot_align uintptr = 4
const s390_num_gr = 5
const s390_num_fr = 2
type s390_arch_gr_t uint32
type s390_arch_fr_t uint64
// The assembler stub will pass a pointer to this structure.
// This will come in holding all the registers that might hold
// function parameters. On return we will set the registers that
// might hold result values.
type s390_regs struct {
r2 s390_arch_gr_t
r3 s390_arch_gr_t
r4 s390_arch_gr_t
r5 s390_arch_gr_t
r6 s390_arch_gr_t
stack_args s390_arch_gr_t
f0 s390_arch_fr_t
f2 s390_arch_fr_t
}
// Argument classifications that arise for Go types.
type s390_arg_t int
const (
s390_general_reg s390_arg_t = iota
s390_general_reg_pair
s390_float_reg
// Argument passed as a pointer to an in-memory value.
s390_mem_ptr
s390_empty
)
// s390ClassifyParameter returns the register class needed to
// pass the value of type TYP. s390_empty means the register is
// not used. The second and third return values are the offset of
// an rtype parameter passed in a register (second) or stack slot
// (third).
func s390ClassifyParameter(typ *rtype) (s390_arg_t, uintptr, uintptr) {
offset := s390_arch_stack_slot_align - typ.Size()
if typ.Size() > s390_arch_stack_slot_align {
offset = 0
}
switch typ.Kind() {
default:
panic("internal error--unknown kind in s390ClassifyParameter")
case Bool, Int, Int8, Int16, Int32, Uint, Uint8, Uint16, Uint32:
return s390_general_reg, offset, offset
case Int64, Uint64:
return s390_general_reg_pair, 0, 0
case Uintptr, Chan, Func, Map, Ptr, UnsafePointer:
return s390_general_reg, 0, 0
case Float32, Float64:
return s390_float_reg, 0, offset
case Complex64, Complex128:
// Complex numbers are passed by reference.
return s390_mem_ptr, 0, 0
case Array, Struct:
var ityp *rtype
var length int
if typ.Size() == 0 {
return s390_empty, 0, 0
}
switch typ.Size() {
default:
// Pointer to memory.
return s390_mem_ptr, 0, 0
case 1, 2:
// Pass in an integer register.
return s390_general_reg, offset, offset
case 4, 8:
// See below.
}
if typ.Kind() == Array {
atyp := (*arrayType)(unsafe.Pointer(typ))
length = atyp.Len()
ityp = atyp.elem
} else {
styp := (*structType)(unsafe.Pointer(typ))
length = len(styp.fields)
ityp = styp.fields[0].typ
}
if length == 1 {
class, off_reg, off_slot := s390ClassifyParameter(ityp)
if class == s390_float_reg {
// The array (stored in a structure) or struct
// is "equivalent to a floating point type" as
// defined in the S390 Abi. Note that this
// can only be the case in the case 4 of the
// switch above.
return s390_float_reg, off_reg, off_slot
}
}
switch typ.Size() {
case 4:
return s390_general_reg, offset, offset
case 8:
return s390_general_reg_pair, 0, 0
default:
return s390_general_reg, 0, 0
}
case Interface, String:
// Structure of size 8.
return s390_general_reg_pair, 0, 0
case Slice:
return s390_mem_ptr, 0, 0
}
}
// s390ClassifyReturn returns the register classes needed to
// return the value of type TYP. s390_empty means the register is
// not used. The second value is the offset of an rtype return
// parameter if stored in a register.
func s390ClassifyReturn(typ *rtype) (s390_arg_t, uintptr) {
offset := s390_arch_stack_slot_align - typ.Size()
if typ.Size() > s390_arch_stack_slot_align {
offset = 0
}
switch typ.Kind() {
default:
panic("internal error--unknown kind in s390ClassifyReturn")
case Bool, Int, Int8, Int16, Int32,
Uint, Uint8, Uint16, Uint32, Uintptr:
return s390_general_reg, offset
case Int64, Uint64:
return s390_general_reg_pair, 0
case Chan, Func, Map, Ptr, UnsafePointer:
return s390_general_reg, 0
case Float32, Float64:
return s390_float_reg, 0
case Complex64, Complex128:
return s390_mem_ptr, 0
case Interface, Slice, String:
return s390_mem_ptr, 0
case Array, Struct:
if typ.size == 0 {
return s390_empty, 0
}
// No optimization is done for returned structures and arrays.
return s390_mem_ptr, 0
}
}
// Given a value of type *rtype left aligned in an unsafe.Pointer,
// reload the value so that it can be stored in a general or
// floating point register. For general registers the value is
// sign extend and right aligned.
func s390ReloadForRegister(typ *rtype, w uintptr, offset uintptr) uintptr {
var do_sign_extend bool = false
var gr s390_arch_gr_t
switch typ.Kind() {
case Int, Int8, Int16, Int32:
do_sign_extend = true
default:
// Handle all other cases in the next switch.
}
switch typ.size {
case 1:
if do_sign_extend == true {
se := int32(*(*int8)(unsafe.Pointer(&w)))
gr = *(*s390_arch_gr_t)(unsafe.Pointer(&se))
} else {
e := int32(*(*uint8)(unsafe.Pointer(&w)))
gr = *(*s390_arch_gr_t)(unsafe.Pointer(&e))
}
case 2:
if do_sign_extend == true {
se := int32(*(*int16)(unsafe.Pointer(&w)))
gr = *(*s390_arch_gr_t)(unsafe.Pointer(&se))
} else {
e := int32(*(*uint16)(unsafe.Pointer(&w)))
gr = *(*s390_arch_gr_t)(unsafe.Pointer(&e))
}
default:
panic("reflect: bad size in s390ReloadForRegister")
}
return *(*uintptr)(unsafe.Pointer(&gr))
}
// MakeFuncStubGo implements the s390 calling convention for
// MakeFunc. This should not be called. It is exported so that
// assembly code can call it.
func S390MakeFuncStubGo(regs *s390_regs, c *makeFuncImpl) {
ftyp := c.typ
gr := 0
fr := 0
ap := uintptr(regs.stack_args)
// See if the result requires a struct. If it does, the first
// parameter is a pointer to the struct.
var ret_class s390_arg_t
var ret_off_reg uintptr
var ret_type *rtype
switch len(ftyp.out) {
case 0:
ret_type = nil
ret_class, ret_off_reg = s390_empty, 0
case 1:
ret_type = ftyp.out[0]
ret_class, ret_off_reg = s390ClassifyReturn(ret_type)
default:
ret_type = nil
ret_class, ret_off_reg = s390_mem_ptr, 0
}
in := make([]Value, 0, len(ftyp.in))
if ret_class == s390_mem_ptr {
// We are returning a value in memory, which means
// that the first argument is a hidden parameter
// pointing to that return area.
gr++
}
argloop:
for _, rt := range ftyp.in {
class, off_reg, off_slot := s390ClassifyParameter(rt)
fl := flag(rt.Kind())
switch class {
case s390_empty:
v := Value{rt, nil, fl | flagIndir}
in = append(in, v)
continue argloop
case s390_general_reg:
// Values stored in a general register are right
// aligned.
if gr < s390_num_gr {
val := s390_general_reg_val(regs, gr)
iw := unsafe.Pointer(&val)
k := rt.Kind()
if k != Ptr && k != UnsafePointer {
ix := uintptr(unsafe.Pointer(&val))
ix += off_reg
iw = unsafe.Pointer(ix)
fl |= flagIndir
}
v := Value{rt, iw, fl}
in = append(in, v)
gr++
} else {
in, ap = s390_add_stackreg(
in, ap, rt, off_slot)
}
continue argloop
case s390_general_reg_pair:
// 64-bit integers and structs are passed in a register
// pair.
if gr+1 < s390_num_gr {
val := uint64(s390_general_reg_val(regs, gr))<<32 + uint64(s390_general_reg_val(regs, gr+1))
iw := unsafe.Pointer(&val)
v := Value{rt, iw, fl | flagIndir}
in = append(in, v)
gr += 2
} else {
in, ap = s390_add_stackreg(in, ap, rt, off_slot)
gr = s390_num_gr
}
continue argloop
case s390_float_reg:
// In a register, floats are left aligned, but in a
// stack slot they are right aligned.
if fr < s390_num_fr {
val := s390_float_reg_val(regs, fr)
ix := uintptr(unsafe.Pointer(&val))
v := Value{
rt, unsafe.Pointer(unsafe.Pointer(ix)),
fl | flagIndir,
}
in = append(in, v)
fr++
} else {
in, ap = s390_add_stackreg(
in, ap, rt, off_slot)
}
continue argloop
case s390_mem_ptr:
if gr < s390_num_gr {
// Register holding a pointer to memory.
val := s390_general_reg_val(regs, gr)
v := Value{
rt, unsafe.Pointer(uintptr(val)),
fl | flagIndir}
in = append(in, v)
gr++
} else {
// Stack slot holding a pointer to memory.
in, ap = s390_add_memarg(in, ap, rt)
}
continue argloop
}
panic("reflect: argtype not handled in MakeFunc:argloop")
}
// All the real arguments have been found and turned into
// Values. Call the real function.
out := c.call(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
for i, typ := range ftyp.out {
v := out[i]
if v.typ != typ {
panic(
"reflect: function created by MakeFunc using " +
funcName(c.fn) + " returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic(
"reflect: function created by MakeFunc using " +
funcName(c.fn) + " returned value obtained " +
"from unexported field")
}
}
switch ret_class {
case s390_general_reg, s390_float_reg, s390_general_reg_pair:
// Single return value in a general or floating point register.
v := out[0]
var w uintptr
switch v.Kind() {
case Ptr, UnsafePointer, Chan, Func, Map:
w = uintptr(v.pointer())
default:
memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
if ret_off_reg != 0 {
w = s390ReloadForRegister(
ret_type, w, ret_off_reg)
}
}
if ret_class == s390_float_reg {
regs.f0 = s390_arch_fr_t(uintptr(w))
} else if ret_class == s390_general_reg {
regs.r2 = s390_arch_gr_t(uintptr(w))
} else {
regs.r2 = s390_arch_gr_t(uintptr(w) >> 32)
regs.r3 = s390_arch_gr_t(uintptr(w) & 0xffffffff)
}
case s390_mem_ptr:
// The address of the memory area was passed as a hidden
// parameter in %r2. Multiple return values are always returned
// in an in-memory structure.
ptr := unsafe.Pointer(uintptr(regs.r2))
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
off = align(off, uintptr(typ.fieldAlign))
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
*(*unsafe.Pointer)(addr) = v.ptr
} else {
memmove(addr, v.ptr, typ.size)
}
off += typ.size
}
case s390_empty:
}
return
}
// The s390_add_stackreg function adds an argument passed on the
// stack that could be passed in a register.
func s390_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Value, uintptr) {
// If we're not already at the beginning of a stack slot, round up to
// the beginning of the next one.
ap = align(ap, s390_arch_stack_slot_align)
// If offset is > 0, the data is right aligned on the stack slot.
ap += offset
// We have to copy the argument onto the heap in case the
// function hangs onto the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
ap = align(ap, s390_arch_stack_slot_align)
return in, ap
}
// The s390_add_memarg function adds an argument passed in memory.
func s390_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
// If we're not already at the beginning of a stack slot,
// round up to the beginning of the next one.
ap = align(ap, s390_arch_stack_slot_align)
// We have to copy the argument onto the heap in case the
// function hangs onto the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += s390_arch_stack_slot_align
return in, ap
}
// The s390_general_reg_val function returns the value of integer register GR.
func s390_general_reg_val(regs *s390_regs, gr int) s390_arch_gr_t {
switch gr {
case 0:
return regs.r2
case 1:
return regs.r3
case 2:
return regs.r4
case 3:
return regs.r5
case 4:
return regs.r6
default:
panic("s390_general_reg_val: bad integer register")
}
}
// The s390_float_reg_val function returns the value of float register FR.
func s390_float_reg_val(regs *s390_regs, fr int) uintptr {
var r s390_arch_fr_t
switch fr {
case 0:
r = regs.f0
case 1:
r = regs.f2
default:
panic("s390_float_reg_val: bad floating point register")
}
return uintptr(r)
}

View File

@ -1,436 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// MakeFunc s390x implementation.
package reflect
import "unsafe"
// Convenience types and constants.
const s390x_arch_stack_slot_align uintptr = 8
const s390x_num_gr = 5
const s390x_num_fr = 4
type s390x_arch_gr_t uint64
type s390x_arch_fr_t uint64
// The assembler stub will pass a pointer to this structure.
// This will come in holding all the registers that might hold
// function parameters. On return we will set the registers that
// might hold result values.
type s390x_regs struct {
r2 s390x_arch_gr_t
r3 s390x_arch_gr_t
r4 s390x_arch_gr_t
r5 s390x_arch_gr_t
r6 s390x_arch_gr_t
stack_args s390x_arch_gr_t
f0 s390x_arch_fr_t
f2 s390x_arch_fr_t
f4 s390x_arch_fr_t
f6 s390x_arch_fr_t
}
// Argument classifications that arise for Go types.
type s390x_arg_t int
const (
s390x_general_reg s390x_arg_t = iota
s390x_float_reg
// Argument passed as a pointer to an in-memory value.
s390x_mem_ptr
s390x_empty
)
// s390xClassifyParameter returns the register class needed to
// pass the value of type TYP. s390x_empty means the register is
// not used. The second and third return values are the offset of
// an rtype parameter passed in a register (second) or stack slot
// (third).
func s390xClassifyParameter(typ *rtype) (s390x_arg_t, uintptr, uintptr) {
offset := s390x_arch_stack_slot_align - typ.Size()
switch typ.Kind() {
default:
panic("internal error--unknown kind in s390xClassifyParameter")
case Bool, Int, Int8, Int16, Int32, Uint, Uint8, Uint16, Uint32:
return s390x_general_reg, offset, offset
case Int64, Uint64, Uintptr, Chan, Func, Map, Ptr, UnsafePointer:
return s390x_general_reg, 0, 0
case Float32, Float64:
return s390x_float_reg, 0, offset
case Complex64, Complex128:
// Complex numbers are passed by reference.
return s390x_mem_ptr, 0, 0
case Array, Struct:
var ityp *rtype
var length int
if typ.Size() == 0 {
return s390x_empty, 0, 0
}
switch typ.Size() {
default:
// Pointer to memory.
return s390x_mem_ptr, 0, 0
case 1, 2:
// Pass in an integer register.
return s390x_general_reg, offset, offset
case 4, 8:
// See below.
}
if typ.Kind() == Array {
atyp := (*arrayType)(unsafe.Pointer(typ))
length = atyp.Len()
ityp = atyp.elem
} else {
styp := (*structType)(unsafe.Pointer(typ))
length = len(styp.fields)
ityp = styp.fields[0].typ
}
if length == 1 {
class, off_reg, off_slot := s390xClassifyParameter(ityp)
if class == s390x_float_reg {
// The array (stored in a structure) or struct
// is "equivalent to a floating point type" as
// defined in the S390x Abi. Note that this
// can only be the case in the cases 4 and 8 of
// the switch above.
return s390x_float_reg, off_reg, off_slot
}
}
// Otherwise pass in an integer register.
switch typ.Size() {
case 4, 8:
return s390x_general_reg, offset, offset
default:
return s390x_general_reg, 0, 0
}
case Interface, Slice, String:
return s390x_mem_ptr, 0, 0
}
}
// s390xClassifyReturn returns the register classes needed to
// return the value of type TYP. s390_empty means the register is
// not used. The second value is the offset of an rtype return
// parameter if stored in a register.
func s390xClassifyReturn(typ *rtype) (s390x_arg_t, uintptr) {
offset := s390x_arch_stack_slot_align - typ.Size()
switch typ.Kind() {
default:
panic("internal error--unknown kind in s390xClassifyReturn")
case Bool, Int, Int8, Int16, Int32, Int64,
Uint, Uint8, Uint16, Uint32, Uint64, Uintptr:
return s390x_general_reg, offset
case Chan, Func, Map, Ptr, UnsafePointer:
return s390x_general_reg, 0
case Float32, Float64:
return s390x_float_reg, 0
case Complex64, Complex128:
return s390x_mem_ptr, 0
case Interface, Slice, String:
return s390x_mem_ptr, 0
case Array, Struct:
if typ.size == 0 {
return s390x_empty, 0
}
// No optimization is done for returned structures and arrays.
return s390x_mem_ptr, 0
}
}
// Given a value of type *rtype left aligned in an unsafe.Pointer,
// reload the value so that it can be stored in a general or
// floating point register. For general registers the value is
// sign extend and right aligned.
func s390xReloadForRegister(typ *rtype, w uintptr, offset uintptr) uintptr {
var do_sign_extend bool = false
var gr s390x_arch_gr_t
switch typ.Kind() {
case Int, Int8, Int16, Int32, Int64:
do_sign_extend = true
default:
// Handle all other cases in the next switch.
}
switch typ.size {
case 1:
if do_sign_extend == true {
se := int64(*(*int8)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&se))
} else {
e := int64(*(*uint8)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&e))
}
case 2:
if do_sign_extend == true {
se := int64(*(*int16)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&se))
} else {
e := int64(*(*uint16)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&e))
}
case 4:
if do_sign_extend == true {
se := int64(*(*int32)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&se))
} else {
e := int64(*(*uint32)(unsafe.Pointer(&w)))
gr = *(*s390x_arch_gr_t)(unsafe.Pointer(&e))
}
default:
panic("reflect: bad size in s390xReloadForRegister")
}
return *(*uintptr)(unsafe.Pointer(&gr))
}
// MakeFuncStubGo implements the s390x calling convention for
// MakeFunc. This should not be called. It is exported so that
// assembly code can call it.
func S390xMakeFuncStubGo(regs *s390x_regs, c *makeFuncImpl) {
ftyp := c.typ
gr := 0
fr := 0
ap := uintptr(regs.stack_args)
// See if the result requires a struct. If it does, the first
// parameter is a pointer to the struct.
var ret_class s390x_arg_t
var ret_off_reg uintptr
var ret_type *rtype
switch len(ftyp.out) {
case 0:
ret_type = nil
ret_class, ret_off_reg = s390x_empty, 0
case 1:
ret_type = ftyp.out[0]
ret_class, ret_off_reg = s390xClassifyReturn(ret_type)
default:
ret_type = nil
ret_class, ret_off_reg = s390x_mem_ptr, 0
}
in := make([]Value, 0, len(ftyp.in))
if ret_class == s390x_mem_ptr {
// We are returning a value in memory, which means
// that the first argument is a hidden parameter
// pointing to that return area.
gr++
}
argloop:
for _, rt := range ftyp.in {
class, off_reg, off_slot := s390xClassifyParameter(rt)
fl := flag(rt.Kind())
switch class {
case s390x_empty:
v := Value{rt, nil, fl | flagIndir}
in = append(in, v)
continue argloop
case s390x_general_reg:
// Values stored in a general register are right
// aligned.
if gr < s390x_num_gr {
val := s390x_general_reg_val(regs, gr)
iw := unsafe.Pointer(val)
k := rt.Kind()
if k != Ptr && k != UnsafePointer {
ix := uintptr(unsafe.Pointer(&val))
ix += off_reg
iw = unsafe.Pointer(ix)
fl |= flagIndir
}
v := Value{rt, iw, fl}
in = append(in, v)
gr++
} else {
in, ap = s390x_add_stackreg(
in, ap, rt, off_slot)
}
continue argloop
case s390x_float_reg:
// In a register, floats are left aligned, but in a
// stack slot they are right aligned.
if fr < s390x_num_fr {
val := s390x_float_reg_val(regs, fr)
ix := uintptr(unsafe.Pointer(&val))
v := Value{
rt, unsafe.Pointer(unsafe.Pointer(ix)),
fl | flagIndir,
}
in = append(in, v)
fr++
} else {
in, ap = s390x_add_stackreg(
in, ap, rt, off_slot)
}
continue argloop
case s390x_mem_ptr:
if gr < s390x_num_gr {
// Register holding a pointer to memory.
val := s390x_general_reg_val(regs, gr)
v := Value{
rt, unsafe.Pointer(val), fl | flagIndir}
in = append(in, v)
gr++
} else {
// Stack slot holding a pointer to memory.
in, ap = s390x_add_memarg(in, ap, rt)
}
continue argloop
}
panic("reflect: argtype not handled in MakeFunc:argloop")
}
// All the real arguments have been found and turned into
// Values. Call the real function.
out := c.call(in)
if len(out) != len(ftyp.out) {
panic("reflect: wrong return count from function created by MakeFunc")
}
for i, typ := range ftyp.out {
v := out[i]
if v.typ != typ {
panic(
"reflect: function created by MakeFunc using " +
funcName(c.fn) + " returned wrong type: have " +
out[i].typ.String() + " for " + typ.String())
}
if v.flag&flagRO != 0 {
panic(
"reflect: function created by MakeFunc using " +
funcName(c.fn) + " returned value obtained " +
"from unexported field")
}
}
switch ret_class {
case s390x_general_reg, s390x_float_reg:
// Single return value in a general or floating point register.
v := out[0]
var w uintptr
switch v.Kind() {
case Ptr, UnsafePointer, Chan, Func, Map:
w = uintptr(v.pointer())
default:
memmove(unsafe.Pointer(&w), v.ptr, v.typ.size)
if ret_off_reg != 0 {
w = s390xReloadForRegister(
ret_type, w, ret_off_reg)
}
}
if ret_class == s390x_float_reg {
regs.f0 = s390x_arch_fr_t(w)
} else {
regs.r2 = s390x_arch_gr_t(w)
}
case s390x_mem_ptr:
// The address of the memory area was passed as a hidden
// parameter in %r2. Multiple return values are always returned
// in an in-memory structure.
ptr := unsafe.Pointer(uintptr(regs.r2))
off := uintptr(0)
for i, typ := range ftyp.out {
v := out[i]
off = align(off, uintptr(typ.fieldAlign))
addr := unsafe.Pointer(uintptr(ptr) + off)
if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
*(*unsafe.Pointer)(addr) = v.ptr
} else {
memmove(addr, v.ptr, typ.size)
}
off += typ.size
}
case s390x_empty:
}
return
}
// The s390x_add_stackreg function adds an argument passed on the
// stack that could be passed in a register.
func s390x_add_stackreg(in []Value, ap uintptr, rt *rtype, offset uintptr) ([]Value, uintptr) {
// If we're not already at the beginning of a stack slot, round up to
// the beginning of the next one.
ap = align(ap, s390x_arch_stack_slot_align)
// If offset is > 0, the data is right aligned on the stack slot.
ap += offset
// We have to copy the argument onto the heap in case the
// function hangs onto the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, unsafe.Pointer(ap), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += rt.size
ap = align(ap, s390x_arch_stack_slot_align)
return in, ap
}
// The s390x_add_memarg function adds an argument passed in memory.
func s390x_add_memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
// If we're not already at the beginning of a stack slot,
// round up to the beginning of the next one.
ap = align(ap, s390x_arch_stack_slot_align)
// We have to copy the argument onto the heap in case the
// function hangs onto the reflect.Value we pass it.
p := unsafe_New(rt)
memmove(p, *(*unsafe.Pointer)(unsafe.Pointer(ap)), rt.size)
v := Value{rt, p, flag(rt.Kind()) | flagIndir}
in = append(in, v)
ap += s390x_arch_stack_slot_align
return in, ap
}
// The s390x_general_reg_val function returns the value of integer register GR.
func s390x_general_reg_val(regs *s390x_regs, gr int) uintptr {
var r s390x_arch_gr_t
switch gr {
case 0:
r = regs.r2
case 1:
r = regs.r3
case 2:
r = regs.r4
case 3:
r = regs.r5
case 4:
r = regs.r6
default:
panic("s390x_general_reg_val: bad integer register")
}
return uintptr(r)
}
// The s390x_float_reg_val function returns the value of float register FR.
func s390x_float_reg_val(regs *s390x_regs, fr int) uintptr {
var r s390x_arch_fr_t
switch fr {
case 0:
r = regs.f0
case 1:
r = regs.f2
case 2:
r = regs.f4
case 3:
r = regs.f6
default:
panic("s390x_float_reg_val: bad floating point register")
}
return uintptr(r)
}

View File

@ -308,9 +308,6 @@ func (v Value) CallSlice(in []Value) []Value {
var callGC bool // for testing; see TestCallMethodJump
var makeFuncStubFn = makeFuncStub
var makeFuncStubCode = **(**uintptr)(unsafe.Pointer(&makeFuncStubFn))
func (v Value) call(op string, in []Value) []Value {
// Get function pointer, type.
t := v.typ
@ -388,17 +385,6 @@ func (v Value) call(op string, in []Value) []Value {
}
nout := t.NumOut()
// If target is makeFuncStub, short circuit the unpack onto stack /
// pack back into []Value for the args and return values. Just do the
// call directly.
// We need to do this here because otherwise we have a situation where
// reflect.callXX calls makeFuncStub, neither of which knows the
// layout of the args. That's bad for precise gc & stack copying.
x := (*makeFuncImpl)(fn)
if x.code == makeFuncStubCode {
return x.call(in)
}
if v.flag&flagMethod != 0 {
nin++
}
@ -1120,16 +1106,6 @@ func (v Value) Pointer() uintptr {
case Chan, Map, Ptr, UnsafePointer:
return uintptr(v.pointer())
case Func:
if v.flag&flagMethod != 0 {
// As the doc comment says, the returned pointer is an
// underlying code pointer but not necessarily enough to
// identify a single function uniquely. All method expressions
// created via reflect have the same underlying code pointer,
// so their Pointers are equal. The function used here must
// match the one used in makeMethodValue.
f := makeFuncStub
return **(**uintptr)(unsafe.Pointer(&f))
}
p := v.pointer()
// Non-nil func value points at data block.
// First word of data block is actual code.

View File

@ -30,8 +30,6 @@ static ffi_type *go_struct_to_ffi (const struct __go_struct_type *)
__attribute__ ((no_split_stack));
static ffi_type *go_string_to_ffi (void) __attribute__ ((no_split_stack));
static ffi_type *go_interface_to_ffi (void) __attribute__ ((no_split_stack));
static ffi_type *go_complex_to_ffi (ffi_type *)
__attribute__ ((no_split_stack, unused));
static ffi_type *go_type_to_ffi (const struct __go_type_descriptor *)
__attribute__ ((no_split_stack));
static ffi_type *go_func_return_ffi (const struct __go_func_type *)
@ -155,7 +153,15 @@ go_interface_to_ffi (void)
return ret;
}
/* Return an ffi_type for a Go complex type. */
#ifndef FFI_TARGET_HAS_COMPLEX_TYPE
/* If libffi hasn't been updated for this target to support complex,
pretend complex is a structure. Warning: This does not work for
all ABIs. Eventually libffi should be updated for all targets
and this should go away. */
static ffi_type *go_complex_to_ffi (ffi_type *)
__attribute__ ((no_split_stack));
static ffi_type *
go_complex_to_ffi (ffi_type *float_type)
@ -170,6 +176,7 @@ go_complex_to_ffi (ffi_type *float_type)
ret->elements[2] = NULL;
return ret;
}
#endif
/* Return an ffi_type for a type described by a
__go_type_descriptor. */
@ -194,23 +201,25 @@ go_type_to_ffi (const struct __go_type_descriptor *descriptor)
return &ffi_type_double;
abort ();
case GO_COMPLEX64:
#ifdef __alpha__
runtime_throw("the libffi library does not support Complex64 type with "
"reflect.Call or runtime.SetFinalizer");
#else
if (sizeof (float) == 4)
return go_complex_to_ffi (&ffi_type_float);
abort ();
#endif
case GO_COMPLEX128:
#ifdef __alpha__
runtime_throw("the libffi library does not support Complex128 type with "
"reflect.Call or runtime.SetFinalizer");
{
#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
return &ffi_type_complex_float;
#else
if (sizeof (double) == 8)
return go_complex_to_ffi (&ffi_type_double);
abort ();
return go_complex_to_ffi (&ffi_type_float);
#endif
}
abort ();
case GO_COMPLEX128:
if (sizeof (double) == 8)
{
#ifdef FFI_TARGET_HAS_COMPLEX_TYPE
return &ffi_type_complex_double;
#else
return go_complex_to_ffi (&ffi_type_double);
#endif
}
abort ();
case GO_INT16:
return &ffi_type_sint16;
case GO_INT32:

View File

@ -12,11 +12,10 @@
#include "go-alloc.h"
#include "go-assert.h"
#include "go-type.h"
#ifdef USE_LIBFFI
#include "go-ffi.h"
#if defined(USE_LIBFFI) && FFI_GO_CLOSURES
/* The functions in this file are only called from reflect_call. As
reflect_call calls a libffi function, which will be compiled
without -fsplit-stack, it will always run with a large stack. */
@ -202,11 +201,7 @@ go_set_results (const struct __go_func_type *func, unsigned char *call_result,
If IS_METHOD is true this is a call to a method expression. The
first argument is the receiver. It is described in FUNC_TYPE, but
regardless of FUNC_TYPE, it is passed as a pointer.
If neither IS_INTERFACE nor IS_METHOD is true then we are calling a
function indirectly, and we must pass a closure pointer via
__go_set_closure. The pointer to pass is simply FUNC_VAL. */
regardless of FUNC_TYPE, it is passed as a pointer. */
void
reflect_call (const struct __go_func_type *func_type, FuncVal *func_val,
@ -221,9 +216,7 @@ reflect_call (const struct __go_func_type *func_type, FuncVal *func_val,
call_result = (unsigned char *) malloc (go_results_size (func_type));
if (!is_interface && !is_method)
__go_set_closure (func_val);
ffi_call (&cif, func_val->fn, call_result, params);
ffi_call_go (&cif, func_val->fn, call_result, params, func_val);
/* Some day we may need to free result values if RESULTS is
NULL. */

View File

@ -84,7 +84,6 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
MLink *v, *next;
byte *tiny;
bool incallback;
void *closure;
if(size == 0) {
// All 0-length allocations use this pointer.
@ -96,10 +95,6 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
m = runtime_m();
g = runtime_g();
// We should not be called in between __go_set_closure and the
// actual function call, but cope with it if we are.
closure = g->closure;
incallback = false;
if(m->mcache == nil && g->ncgo > 0) {
// For gccgo this case can occur when a cgo or SWIG function
@ -180,7 +175,6 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
m->locks--;
if(incallback)
runtime_entersyscall();
g->closure = closure;
return v;
}
}
@ -267,8 +261,6 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
if(incallback)
runtime_entersyscall();
g->closure = closure;
return v;
}

View File

@ -133,8 +133,8 @@ clearpools(void)
// clear sync.Pool's
if(poolcleanup != nil) {
__go_set_closure(poolcleanup);
poolcleanup->fn();
__builtin_call_with_static_chain(poolcleanup->fn(),
poolcleanup);
}
for(pp=runtime_allp; (p=*pp) != nil; pp++) {

View File

@ -3310,26 +3310,6 @@ runtime_proc_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj
enqueue1(wbufp, (Obj){(byte*)&runtime_sched, sizeof runtime_sched, 0});
}
// When a function calls a closure, it passes the closure value to
// __go_set_closure immediately before the function call. When a
// function uses a closure, it calls __go_get_closure immediately on
// function entry. This is a hack, but it will work on any system.
// It would be better to use the static chain register when there is
// one. It is also worth considering expanding these functions
// directly in the compiler.
void
__go_set_closure(void* v)
{
g->closure = v;
}
void *
__go_get_closure(void)
{
return g->closure;
}
// Return whether we are waiting for a GC. This gc toolchain uses
// preemption instead.
bool

View File

@ -195,7 +195,6 @@ struct Location
struct G
{
void* closure; // Closure value.
Defer* defer;
Panic* panic;
void* exception; // current exception being thrown
@ -833,9 +832,6 @@ int32 getproccount(void);
#define PREFETCH(p) __builtin_prefetch(p)
void __go_set_closure(void*);
void* __go_get_closure(void);
bool runtime_gcwaiting(void);
void runtime_badsignal(int);
Defer* runtime_newdefer(void);

View File

@ -237,8 +237,7 @@ timerproc(void* dummy __attribute__ ((unused)))
arg = t->arg;
seq = t->seq;
runtime_unlock(&timers);
__go_set_closure(fv);
f(arg, seq);
__builtin_call_with_static_chain(f(arg, seq), fv);
// clear f and arg to avoid leak while sleeping for next timer
f = nil;