mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-12-03 08:44:23 +08:00
runtime: Support cgo callbacks from threads started by C.
This adjusts the extram support to work with gccgo. There are some corresponding changes to cgo in https://codereview.appspot.com/11406047/ . From-SVN: r201179
This commit is contained in:
parent
fb48aadc78
commit
7acd2b86bf
@ -35,6 +35,9 @@ syscall_cgocall ()
|
||||
M* m;
|
||||
G* g;
|
||||
|
||||
if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0))
|
||||
runtime_newextram ();
|
||||
|
||||
m = runtime_m ();
|
||||
++m->ncgocall;
|
||||
g = runtime_g ();
|
||||
@ -71,7 +74,24 @@ syscall_cgocalldone ()
|
||||
void
|
||||
syscall_cgocallback ()
|
||||
{
|
||||
M *mp;
|
||||
|
||||
mp = runtime_m ();
|
||||
if (mp == NULL)
|
||||
{
|
||||
runtime_needm ();
|
||||
mp = runtime_m ();
|
||||
mp->dropextram = true;
|
||||
}
|
||||
|
||||
runtime_exitsyscall ();
|
||||
|
||||
mp = runtime_m ();
|
||||
if (mp->needextram)
|
||||
{
|
||||
mp->needextram = 0;
|
||||
runtime_newextram ();
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare to return to C/C++ code from a callback to Go code. */
|
||||
@ -79,7 +99,15 @@ syscall_cgocallback ()
|
||||
void
|
||||
syscall_cgocallbackdone ()
|
||||
{
|
||||
M *mp;
|
||||
|
||||
runtime_entersyscall ();
|
||||
mp = runtime_m ();
|
||||
if (mp->dropextram && runtime_g ()->ncgo == 0)
|
||||
{
|
||||
mp->dropextram = false;
|
||||
runtime_dropm ();
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate memory and save it in a list visible to the Go garbage
|
||||
|
@ -42,6 +42,7 @@ __go_undefer (_Bool *frame)
|
||||
{
|
||||
struct __go_defer_stack *d;
|
||||
void (*pfn) (void *);
|
||||
M *m;
|
||||
|
||||
d = g->defer;
|
||||
pfn = d->__pfn;
|
||||
@ -51,7 +52,14 @@ __go_undefer (_Bool *frame)
|
||||
(*pfn) (d->__arg);
|
||||
|
||||
g->defer = d->__next;
|
||||
__go_free (d);
|
||||
|
||||
/* This may be called by a cgo callback routine to defer the
|
||||
call to syscall.CgocallBackDone, in which case we will not
|
||||
have a memory context. Don't try to free anything in that
|
||||
case--the GC will release it later. */
|
||||
m = runtime_m ();
|
||||
if (m != NULL && m->mcache != NULL)
|
||||
__go_free (d);
|
||||
|
||||
/* Since we are executing a defer function here, we know we are
|
||||
returning from the calling function. If the calling
|
||||
|
@ -54,6 +54,7 @@ __go_panic (struct __go_empty_interface arg)
|
||||
{
|
||||
struct __go_defer_stack *d;
|
||||
void (*pfn) (void *);
|
||||
M *m;
|
||||
|
||||
d = g->defer;
|
||||
if (d == NULL)
|
||||
@ -95,7 +96,14 @@ __go_panic (struct __go_empty_interface arg)
|
||||
}
|
||||
|
||||
g->defer = d->__next;
|
||||
__go_free (d);
|
||||
|
||||
/* This may be called by a cgo callback routine to defer the
|
||||
call to syscall.CgocallBackDone, in which case we will not
|
||||
have a memory context. Don't try to free anything in that
|
||||
case--the GC will release it later. */
|
||||
m = runtime_m ();
|
||||
if (m != NULL && m->mcache != NULL)
|
||||
__go_free (d);
|
||||
}
|
||||
|
||||
/* The panic was not recovered. */
|
||||
|
@ -397,7 +397,8 @@ enum { MaxGomaxprocs = 1<<8 };
|
||||
Sched runtime_sched;
|
||||
int32 runtime_gomaxprocs;
|
||||
bool runtime_singleproc;
|
||||
bool runtime_iscgo;
|
||||
bool runtime_iscgo = true;
|
||||
uint32 runtime_needextram = 1;
|
||||
uint32 runtime_gcwaiting;
|
||||
M runtime_m0;
|
||||
G runtime_g0; // idle goroutine for m0
|
||||
@ -901,8 +902,8 @@ runtime_mstart(void* mp)
|
||||
|
||||
#ifdef USING_SPLIT_STACK
|
||||
{
|
||||
int dont_block_signals = 0;
|
||||
__splitstack_block_signals(&dont_block_signals, nil);
|
||||
int dont_block_signals = 0;
|
||||
__splitstack_block_signals(&dont_block_signals, nil);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -944,7 +945,7 @@ struct CgoThreadStart
|
||||
// Allocate a new m unassociated with any thread.
|
||||
// Can use p for allocation context if needed.
|
||||
M*
|
||||
runtime_allocm(P *p)
|
||||
runtime_allocm(P *p, int32 stacksize, byte** ret_g0_stack, size_t* ret_g0_stacksize)
|
||||
{
|
||||
M *mp;
|
||||
|
||||
@ -961,7 +962,7 @@ runtime_allocm(P *p)
|
||||
|
||||
mp = runtime_mal(sizeof *mp);
|
||||
mcommoninit(mp);
|
||||
mp->g0 = runtime_malg(-1, nil, nil);
|
||||
mp->g0 = runtime_malg(stacksize, ret_g0_stack, ret_g0_stacksize);
|
||||
|
||||
if(p == m->p)
|
||||
releasep();
|
||||
@ -1006,6 +1007,9 @@ static void unlockextra(M*);
|
||||
//
|
||||
// When the callback is done with the m, it calls dropm to
|
||||
// put the m back on the list.
|
||||
//
|
||||
// Unlike the gc toolchain, we start running on curg, since we are
|
||||
// just going to return and let the caller continue.
|
||||
void
|
||||
runtime_needm(void)
|
||||
{
|
||||
@ -1027,18 +1031,40 @@ runtime_needm(void)
|
||||
mp->needextram = mp->schedlink == nil;
|
||||
unlockextra(mp->schedlink);
|
||||
|
||||
// Install m and g (= m->g0) and set the stack bounds
|
||||
// to match the current stack. We don't actually know
|
||||
// how big the stack is, like we don't know how big any
|
||||
// scheduling stack is, but we assume there's at least 32 kB,
|
||||
// which is more than enough for us.
|
||||
runtime_setmg(mp, mp->g0);
|
||||
// Install m and g (= m->curg).
|
||||
runtime_setmg(mp, mp->curg);
|
||||
|
||||
// We assume that the split stack support has been initialized
|
||||
// for this new thread.
|
||||
// Initialize g's context as in mstart.
|
||||
initcontext();
|
||||
g->status = Gsyscall;
|
||||
g->entry = nil;
|
||||
g->param = nil;
|
||||
#ifdef USING_SPLIT_STACK
|
||||
__splitstack_getcontext(&g->stack_context[0]);
|
||||
#else
|
||||
g->gcinitial_sp = ∓
|
||||
g->gcstack_size = 0;
|
||||
g->gcnext_sp = ∓
|
||||
#endif
|
||||
getcontext(&g->context);
|
||||
|
||||
if(g->entry != nil) {
|
||||
// Got here from mcall.
|
||||
void (*pfn)(G*) = (void (*)(G*))g->entry;
|
||||
G* gp = (G*)g->param;
|
||||
pfn(gp);
|
||||
*(int*)0x22 = 0x22;
|
||||
}
|
||||
|
||||
// Initialize this thread to use the m.
|
||||
runtime_minit();
|
||||
|
||||
#ifdef USING_SPLIT_STACK
|
||||
{
|
||||
int dont_block_signals = 0;
|
||||
__splitstack_block_signals(&dont_block_signals, nil);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// newextram allocates an m and puts it on the extra list.
|
||||
@ -1049,15 +1075,17 @@ runtime_newextram(void)
|
||||
{
|
||||
M *mp, *mnext;
|
||||
G *gp;
|
||||
byte *g0_sp, *sp;
|
||||
size_t g0_spsize, spsize;
|
||||
|
||||
// Create extra goroutine locked to extra m.
|
||||
// The goroutine is the context in which the cgo callback will run.
|
||||
// The sched.pc will never be returned to, but setting it to
|
||||
// runtime.goexit makes clear to the traceback routines where
|
||||
// the goroutine stack ends.
|
||||
mp = runtime_allocm(nil);
|
||||
gp = runtime_malg(StackMin, nil, nil);
|
||||
gp->status = Gsyscall;
|
||||
mp = runtime_allocm(nil, StackMin, &g0_sp, &g0_spsize);
|
||||
gp = runtime_malg(StackMin, &sp, &spsize);
|
||||
gp->status = Gdead;
|
||||
mp->curg = gp;
|
||||
mp->locked = LockInternal;
|
||||
mp->lockedg = gp;
|
||||
@ -1072,6 +1100,16 @@ runtime_newextram(void)
|
||||
runtime_unlock(&runtime_sched);
|
||||
gp->goid = runtime_xadd64(&runtime_sched.goidgen, 1);
|
||||
|
||||
// The context for gp will be set up in runtime_needm. But
|
||||
// here we need to set up the context for g0.
|
||||
getcontext(&mp->g0->context);
|
||||
mp->g0->context.uc_stack.ss_sp = g0_sp;
|
||||
#ifdef MAKECONTEXT_STACK_TOP
|
||||
mp->g0->context.uc_stack.ss_sp += g0_spsize;
|
||||
#endif
|
||||
mp->g0->context.uc_stack.ss_size = g0_spsize;
|
||||
makecontext(&mp->g0->context, kickoff, 0);
|
||||
|
||||
// Add m to the extra list.
|
||||
mnext = lockextra(true);
|
||||
mp->schedlink = mnext;
|
||||
@ -1114,6 +1152,8 @@ runtime_dropm(void)
|
||||
mp = m;
|
||||
runtime_setmg(nil, nil);
|
||||
|
||||
mp->curg->status = Gdead;
|
||||
|
||||
mnext = lockextra(true);
|
||||
mp->schedlink = mnext;
|
||||
unlockextra(mp);
|
||||
@ -1159,6 +1199,29 @@ unlockextra(M *mp)
|
||||
runtime_atomicstorep(&runtime_extram, mp);
|
||||
}
|
||||
|
||||
static int32
|
||||
countextra()
|
||||
{
|
||||
M *mp, *mc;
|
||||
int32 c;
|
||||
|
||||
for(;;) {
|
||||
mp = runtime_atomicloadp(&runtime_extram);
|
||||
if(mp == MLOCKED) {
|
||||
runtime_osyield();
|
||||
continue;
|
||||
}
|
||||
if(!runtime_casp(&runtime_extram, mp, MLOCKED)) {
|
||||
runtime_osyield();
|
||||
continue;
|
||||
}
|
||||
c = 0;
|
||||
for(mc = mp; mc != nil; mc = mc->schedlink)
|
||||
c++;
|
||||
runtime_atomicstorep(&runtime_extram, mp);
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new m. It will start off with a call to fn, or else the scheduler.
|
||||
static void
|
||||
@ -1166,7 +1229,7 @@ newm(void(*fn)(void), P *p)
|
||||
{
|
||||
M *mp;
|
||||
|
||||
mp = runtime_allocm(p);
|
||||
mp = runtime_allocm(p, -1, nil, nil);
|
||||
mp->nextp = p;
|
||||
mp->mstartfn = fn;
|
||||
|
||||
@ -2348,7 +2411,7 @@ checkdead(void)
|
||||
int32 run, grunning, s;
|
||||
|
||||
// -1 for sysmon
|
||||
run = runtime_sched.mcount - runtime_sched.nmidle - runtime_sched.mlocked - 1;
|
||||
run = runtime_sched.mcount - runtime_sched.nmidle - runtime_sched.mlocked - 1 - countextra();
|
||||
if(run > 0)
|
||||
return;
|
||||
if(run < 0) {
|
||||
|
@ -273,6 +273,7 @@ struct M
|
||||
GCStats gcstats;
|
||||
bool racecall;
|
||||
bool needextram;
|
||||
bool dropextram; // for gccgo: drop after call is done.
|
||||
void* racepc;
|
||||
void (*waitunlockf)(Lock*);
|
||||
void* waitlock;
|
||||
@ -450,6 +451,7 @@ extern G* runtime_lastg;
|
||||
extern M* runtime_allm;
|
||||
extern P** runtime_allp;
|
||||
extern int32 runtime_gomaxprocs;
|
||||
extern uint32 runtime_needextram;
|
||||
extern bool runtime_singleproc;
|
||||
extern uint32 runtime_panicking;
|
||||
extern uint32 runtime_gcwaiting; // gc is waiting to run
|
||||
@ -518,6 +520,8 @@ G* runtime_malg(int32, byte**, size_t*);
|
||||
void runtime_mpreinit(M*);
|
||||
void runtime_minit(void);
|
||||
void runtime_unminit(void);
|
||||
void runtime_needm(void);
|
||||
void runtime_dropm(void);
|
||||
void runtime_signalstack(byte*, int32);
|
||||
MCache* runtime_allocmcache(void);
|
||||
void runtime_freemcache(MCache*);
|
||||
|
Loading…
Reference in New Issue
Block a user