mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 10:53:37 +08:00
51483f6c84
Currently the QemuLockCnt data structure and associated functions are in the include/qemu/thread.h header. Move them to their own qemu/lockcnt.h. The main reason for doing this is that it means we can autogenerate the documentation comments into the docs/devel documentation. The copyright/author in the new header is drawn from lockcnt.c, since the header changes were added in the same commit as lockcnt.c; since neither thread.h nor lockcnt.c state an explicit license, the standard default of GPL-2-or-later applies. We include the new header (and the .c file, which was accidentally omitted previously) in the "RCU" part of MAINTAINERS, since that is where the lockcnt.rst documentation is categorized. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20240816132212.3602106-7-peter.maydell@linaro.org
158 lines
3.9 KiB
C
158 lines
3.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* epoll(7) file descriptor monitoring
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include <sys/epoll.h>
|
|
#include "qemu/lockcnt.h"
|
|
#include "qemu/rcu_queue.h"
|
|
#include "aio-posix.h"
|
|
|
|
/* The fd number threshold to switch to epoll */
|
|
#define EPOLL_ENABLE_THRESHOLD 64
|
|
|
|
void fdmon_epoll_disable(AioContext *ctx)
|
|
{
|
|
if (ctx->epollfd >= 0) {
|
|
close(ctx->epollfd);
|
|
ctx->epollfd = -1;
|
|
}
|
|
|
|
/* Switch back */
|
|
ctx->fdmon_ops = &fdmon_poll_ops;
|
|
}
|
|
|
|
static inline int epoll_events_from_pfd(int pfd_events)
|
|
{
|
|
return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
|
|
(pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
|
|
(pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
|
|
(pfd_events & G_IO_ERR ? EPOLLERR : 0);
|
|
}
|
|
|
|
static void fdmon_epoll_update(AioContext *ctx,
|
|
AioHandler *old_node,
|
|
AioHandler *new_node)
|
|
{
|
|
struct epoll_event event = {
|
|
.data.ptr = new_node,
|
|
.events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0,
|
|
};
|
|
int r;
|
|
|
|
if (!new_node) {
|
|
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event);
|
|
} else if (!old_node) {
|
|
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event);
|
|
} else {
|
|
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event);
|
|
}
|
|
|
|
if (r) {
|
|
fdmon_epoll_disable(ctx);
|
|
}
|
|
}
|
|
|
|
static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
|
|
int64_t timeout)
|
|
{
|
|
GPollFD pfd = {
|
|
.fd = ctx->epollfd,
|
|
.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
|
|
};
|
|
AioHandler *node;
|
|
int i, ret = 0;
|
|
struct epoll_event events[128];
|
|
|
|
if (timeout > 0) {
|
|
ret = qemu_poll_ns(&pfd, 1, timeout);
|
|
if (ret > 0) {
|
|
timeout = 0;
|
|
}
|
|
}
|
|
if (timeout <= 0 || ret > 0) {
|
|
ret = epoll_wait(ctx->epollfd, events,
|
|
ARRAY_SIZE(events),
|
|
timeout);
|
|
if (ret <= 0) {
|
|
goto out;
|
|
}
|
|
for (i = 0; i < ret; i++) {
|
|
int ev = events[i].events;
|
|
int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
|
|
(ev & EPOLLOUT ? G_IO_OUT : 0) |
|
|
(ev & EPOLLHUP ? G_IO_HUP : 0) |
|
|
(ev & EPOLLERR ? G_IO_ERR : 0);
|
|
|
|
node = events[i].data.ptr;
|
|
aio_add_ready_handler(ready_list, node, revents);
|
|
}
|
|
}
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static const FDMonOps fdmon_epoll_ops = {
|
|
.update = fdmon_epoll_update,
|
|
.wait = fdmon_epoll_wait,
|
|
.need_wait = aio_poll_disabled,
|
|
};
|
|
|
|
static bool fdmon_epoll_try_enable(AioContext *ctx)
|
|
{
|
|
AioHandler *node;
|
|
struct epoll_event event;
|
|
|
|
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
|
|
int r;
|
|
if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
|
|
continue;
|
|
}
|
|
event.events = epoll_events_from_pfd(node->pfd.events);
|
|
event.data.ptr = node;
|
|
r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
|
|
if (r) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
ctx->fdmon_ops = &fdmon_epoll_ops;
|
|
return true;
|
|
}
|
|
|
|
bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
|
|
{
|
|
bool ok;
|
|
|
|
if (ctx->epollfd < 0) {
|
|
return false;
|
|
}
|
|
|
|
if (npfd < EPOLL_ENABLE_THRESHOLD) {
|
|
return false;
|
|
}
|
|
|
|
/* The list must not change while we add fds to epoll */
|
|
if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
|
|
return false;
|
|
}
|
|
|
|
ok = fdmon_epoll_try_enable(ctx);
|
|
|
|
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
|
|
|
|
if (!ok) {
|
|
fdmon_epoll_disable(ctx);
|
|
}
|
|
return ok;
|
|
}
|
|
|
|
void fdmon_epoll_setup(AioContext *ctx)
|
|
{
|
|
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
|
|
if (ctx->epollfd == -1) {
|
|
fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
|
|
}
|
|
}
|