mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
block: replace g_new0 with g_new for bottom half allocation.
This saves about 15% of the clock cycles spent on allocation. Using the slice allocator does not add a visible improvement; allocation is faster than malloc, while freeing seems to be slower. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
e012b78cf5
commit
ee82310f8a
10
async.c
10
async.c
@ -44,10 +44,12 @@ struct QEMUBH {
|
||||
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
||||
{
|
||||
QEMUBH *bh;
|
||||
bh = g_new0(QEMUBH, 1);
|
||||
bh->ctx = ctx;
|
||||
bh->cb = cb;
|
||||
bh->opaque = opaque;
|
||||
bh = g_new(QEMUBH, 1);
|
||||
*bh = (QEMUBH){
|
||||
.ctx = ctx,
|
||||
.cb = cb,
|
||||
.opaque = opaque,
|
||||
};
|
||||
qemu_mutex_lock(&ctx->bh_lock);
|
||||
bh->next = ctx->first_bh;
|
||||
/* Make sure that the members are ready before putting bh into list */
|
||||
|
Loading…
Reference in New Issue
Block a user