mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 11:23:43 +08:00
Make bottom halves more robust
Bottom halves are supposed to not complete until the next iteration of the main loop. This is very important to ensure that guests can not cause stack overflows in the block driver code. Right now, if you attempt to schedule a bottom half within a bottom half callback, you will enter an infinite loop. This patch uses the same logic that we use for the IOHandler loop to make the bottom half processing robust in list manipulation while in a callback. This patch also introduces idle scheduling for bottom halves. qemu_bh_poll() returns an indication of whether any bottom halves were successfully executed. qemu_aio_wait() uses this to immediately return if a bottom half was executed instead of waiting for a completion notification. qemu_bh_schedule_idle() works around this by not reporting the callback has run in the qemu_bh_poll loop. qemu_aio_wait() probably needs some refactoring but that would require a larger code audit. idle scheduling seems like a good compromise. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5572 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
474ad834d2
commit
1b435b1032
@ -70,6 +70,7 @@ typedef void QEMUBHFunc(void *opaque);
|
||||
|
||||
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
|
||||
void qemu_bh_schedule(QEMUBH *bh);
|
||||
void qemu_bh_schedule_idle(QEMUBH *bh);
|
||||
void qemu_bh_cancel(QEMUBH *bh);
|
||||
void qemu_bh_delete(QEMUBH *bh);
|
||||
int qemu_bh_poll(void);
|
||||
|
60
vl.c
60
vl.c
@ -7578,6 +7578,8 @@ struct QEMUBH {
|
||||
QEMUBHFunc *cb;
|
||||
void *opaque;
|
||||
int scheduled;
|
||||
int idle;
|
||||
int deleted;
|
||||
QEMUBH *next;
|
||||
};
|
||||
|
||||
@ -7591,37 +7593,56 @@ QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
|
||||
return NULL;
|
||||
bh->cb = cb;
|
||||
bh->opaque = opaque;
|
||||
bh->next = first_bh;
|
||||
first_bh = bh;
|
||||
return bh;
|
||||
}
|
||||
|
||||
int qemu_bh_poll(void)
|
||||
{
|
||||
QEMUBH *bh, **pbh;
|
||||
QEMUBH *bh, **bhp;
|
||||
int ret;
|
||||
|
||||
ret = 0;
|
||||
for(;;) {
|
||||
pbh = &first_bh;
|
||||
bh = *pbh;
|
||||
if (!bh)
|
||||
break;
|
||||
ret = 1;
|
||||
*pbh = bh->next;
|
||||
bh->scheduled = 0;
|
||||
bh->cb(bh->opaque);
|
||||
for (bh = first_bh; bh; bh = bh->next) {
|
||||
if (!bh->deleted && bh->scheduled) {
|
||||
bh->scheduled = 0;
|
||||
if (!bh->idle)
|
||||
ret = 1;
|
||||
bh->idle = 0;
|
||||
bh->cb(bh->opaque);
|
||||
}
|
||||
}
|
||||
|
||||
/* remove deleted bhs */
|
||||
bhp = &first_bh;
|
||||
while (*bhp) {
|
||||
bh = *bhp;
|
||||
if (bh->deleted) {
|
||||
*bhp = bh->next;
|
||||
qemu_free(bh);
|
||||
} else
|
||||
bhp = &bh->next;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void qemu_bh_schedule_idle(QEMUBH *bh)
|
||||
{
|
||||
if (bh->scheduled)
|
||||
return;
|
||||
bh->scheduled = 1;
|
||||
bh->idle = 1;
|
||||
}
|
||||
|
||||
void qemu_bh_schedule(QEMUBH *bh)
|
||||
{
|
||||
CPUState *env = cpu_single_env;
|
||||
if (bh->scheduled)
|
||||
return;
|
||||
bh->scheduled = 1;
|
||||
bh->next = first_bh;
|
||||
first_bh = bh;
|
||||
|
||||
bh->idle = 0;
|
||||
/* stop the currently executing CPU to execute the BH ASAP */
|
||||
if (env) {
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
@ -7630,20 +7651,13 @@ void qemu_bh_schedule(QEMUBH *bh)
|
||||
|
||||
void qemu_bh_cancel(QEMUBH *bh)
|
||||
{
|
||||
QEMUBH **pbh;
|
||||
if (bh->scheduled) {
|
||||
pbh = &first_bh;
|
||||
while (*pbh != bh)
|
||||
pbh = &(*pbh)->next;
|
||||
*pbh = bh->next;
|
||||
bh->scheduled = 0;
|
||||
}
|
||||
bh->scheduled = 0;
|
||||
}
|
||||
|
||||
void qemu_bh_delete(QEMUBH *bh)
|
||||
{
|
||||
qemu_bh_cancel(bh);
|
||||
qemu_free(bh);
|
||||
bh->scheduled = 0;
|
||||
bh->deleted = 1;
|
||||
}
|
||||
|
||||
/***********************************************************/
|
||||
|
Loading…
Reference in New Issue
Block a user