mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 11:23:43 +08:00
async: add aio_bh_schedule_oneshot
qemu_bh_delete is already clearing bh->scheduled at the same time as it's setting bh->deleted. Since it's not using any memory barriers, there is no synchronization going on for bh->deleted, and this makes the bh->deleted checks superfluous in aio_compute_timeout, aio_bh_poll and aio_ctx_check. Just remove them, and put the (bh->scheduled && bh->deleted) combo to work in a new function aio_bh_schedule_oneshot. The new function removes the need to save the QEMUBH pointer between the creation and the execution of the bottom half. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
818bbc86c9
commit
5b8bb3595a
27
async.c
27
async.c
@ -44,6 +44,25 @@ struct QEMUBH {
|
|||||||
bool deleted;
|
bool deleted;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||||
|
{
|
||||||
|
QEMUBH *bh;
|
||||||
|
bh = g_new(QEMUBH, 1);
|
||||||
|
*bh = (QEMUBH){
|
||||||
|
.ctx = ctx,
|
||||||
|
.cb = cb,
|
||||||
|
.opaque = opaque,
|
||||||
|
};
|
||||||
|
qemu_mutex_lock(&ctx->bh_lock);
|
||||||
|
bh->next = ctx->first_bh;
|
||||||
|
bh->scheduled = 1;
|
||||||
|
bh->deleted = 1;
|
||||||
|
/* Make sure that the members are ready before putting bh into list */
|
||||||
|
smp_wmb();
|
||||||
|
ctx->first_bh = bh;
|
||||||
|
qemu_mutex_unlock(&ctx->bh_lock);
|
||||||
|
}
|
||||||
|
|
||||||
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
@ -86,7 +105,7 @@ int aio_bh_poll(AioContext *ctx)
|
|||||||
* thread sees the zero before bh->cb has run, and thus will call
|
* thread sees the zero before bh->cb has run, and thus will call
|
||||||
* aio_notify again if necessary.
|
* aio_notify again if necessary.
|
||||||
*/
|
*/
|
||||||
if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
|
if (atomic_xchg(&bh->scheduled, 0)) {
|
||||||
/* Idle BHs and the notify BH don't count as progress */
|
/* Idle BHs and the notify BH don't count as progress */
|
||||||
if (!bh->idle && bh != ctx->notify_dummy_bh) {
|
if (!bh->idle && bh != ctx->notify_dummy_bh) {
|
||||||
ret = 1;
|
ret = 1;
|
||||||
@ -104,7 +123,7 @@ int aio_bh_poll(AioContext *ctx)
|
|||||||
bhp = &ctx->first_bh;
|
bhp = &ctx->first_bh;
|
||||||
while (*bhp) {
|
while (*bhp) {
|
||||||
bh = *bhp;
|
bh = *bhp;
|
||||||
if (bh->deleted) {
|
if (bh->deleted && !bh->scheduled) {
|
||||||
*bhp = bh->next;
|
*bhp = bh->next;
|
||||||
g_free(bh);
|
g_free(bh);
|
||||||
} else {
|
} else {
|
||||||
@ -168,7 +187,7 @@ aio_compute_timeout(AioContext *ctx)
|
|||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
|
|
||||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||||
if (!bh->deleted && bh->scheduled) {
|
if (bh->scheduled) {
|
||||||
if (bh->idle) {
|
if (bh->idle) {
|
||||||
/* idle bottom halves will be polled at least
|
/* idle bottom halves will be polled at least
|
||||||
* every 10ms */
|
* every 10ms */
|
||||||
@ -216,7 +235,7 @@ aio_ctx_check(GSource *source)
|
|||||||
aio_notify_accept(ctx);
|
aio_notify_accept(ctx);
|
||||||
|
|
||||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||||
if (!bh->deleted && bh->scheduled) {
|
if (bh->scheduled) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,6 +180,12 @@ void aio_context_acquire(AioContext *ctx);
|
|||||||
/* Relinquish ownership of the AioContext. */
|
/* Relinquish ownership of the AioContext. */
|
||||||
void aio_context_release(AioContext *ctx);
|
void aio_context_release(AioContext *ctx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
|
||||||
|
* only once and as soon as possible.
|
||||||
|
*/
|
||||||
|
void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_bh_new: Allocate a new bottom half structure.
|
* aio_bh_new: Allocate a new bottom half structure.
|
||||||
*
|
*
|
||||||
|
Loading…
Reference in New Issue
Block a user