monitor: Make MonitorQMP a child class of Monitor

Currently, struct Monitor mixes state that is only relevant for HMP,
state that is only relevant for QMP, and some actually shared state.
In particular, a MonitorQMP field is present in the state of any
monitor, even if it's not a QMP monitor and therefore doesn't use the
state.

As a first step towards a clean separation between QMP and HMP, let
MonitorQMP extend Monitor and create a MonitorQMP object only when the
monitor is actually a QMP monitor.

Some places accessed Monitor.qmp unconditionally, even for HMP monitors.
They can't keep doing this now, so during the conversion, they are
either changed to become conditional on monitor_is_qmp() or to assert()
that they always get a QMP monitor.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20190613153405.24769-4-kwolf@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
[Superfluous variable in monitor_data_destroy() eliminated]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
This commit is contained in:
Kevin Wolf 2019-06-13 17:33:53 +02:00 committed by Markus Armbruster
parent b6c7c2e4a3
commit b8e31d6ccc

219
monitor.c
View File

@ -168,26 +168,6 @@ struct MonFdset {
QLIST_ENTRY(MonFdset) next; QLIST_ENTRY(MonFdset) next;
}; };
typedef struct {
JSONMessageParser parser;
/*
* When a client connects, we're in capabilities negotiation mode.
* @commands is &qmp_cap_negotiation_commands then. When command
* qmp_capabilities succeeds, we go into command mode, and
* @command becomes &qmp_commands.
*/
QmpCommandList *commands;
bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */
/*
* Protects qmp request/response queue.
* Take monitor_lock first when you need both.
*/
QemuMutex qmp_queue_lock;
/* Input queue that holds all the parsed QMP requests */
GQueue *qmp_requests;
} MonitorQMP;
/* /*
* To prevent flooding clients, events can be throttled. The * To prevent flooding clients, events can be throttled. The
* throttling is calculated globally, rather than per-Monitor * throttling is calculated globally, rather than per-Monitor
@ -220,7 +200,6 @@ struct Monitor {
*/ */
ReadLineState *rs; ReadLineState *rs;
MonitorQMP qmp;
gchar *mon_cpu_path; gchar *mon_cpu_path;
mon_cmd_t *cmd_table; mon_cmd_t *cmd_table;
QTAILQ_ENTRY(Monitor) entry; QTAILQ_ENTRY(Monitor) entry;
@ -241,6 +220,27 @@ struct Monitor {
int mux_out; int mux_out;
}; };
typedef struct {
Monitor common;
JSONMessageParser parser;
/*
* When a client connects, we're in capabilities negotiation mode.
* @commands is &qmp_cap_negotiation_commands then. When command
* qmp_capabilities succeeds, we go into command mode, and
* @command becomes &qmp_commands.
*/
QmpCommandList *commands;
bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */
/*
* Protects qmp request/response queue.
* Take monitor_lock first when you need both.
*/
QemuMutex qmp_queue_lock;
/* Input queue that holds all the parsed QMP requests */
GQueue *qmp_requests;
} MonitorQMP;
/* Shared monitor I/O thread */ /* Shared monitor I/O thread */
IOThread *mon_iothread; IOThread *mon_iothread;
@ -249,7 +249,7 @@ QEMUBH *qmp_dispatcher_bh;
struct QMPRequest { struct QMPRequest {
/* Owner of the request */ /* Owner of the request */
Monitor *mon; MonitorQMP *mon;
/* /*
* Request object to be handled or Error to be reported * Request object to be handled or Error to be reported
* (exactly one of them is non-null) * (exactly one of them is non-null)
@ -357,18 +357,18 @@ static void qmp_request_free(QMPRequest *req)
} }
/* Caller must hold mon->qmp.qmp_queue_lock */ /* Caller must hold mon->qmp.qmp_queue_lock */
static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon) static void monitor_qmp_cleanup_req_queue_locked(MonitorQMP *mon)
{ {
while (!g_queue_is_empty(mon->qmp.qmp_requests)) { while (!g_queue_is_empty(mon->qmp_requests)) {
qmp_request_free(g_queue_pop_head(mon->qmp.qmp_requests)); qmp_request_free(g_queue_pop_head(mon->qmp_requests));
} }
} }
static void monitor_qmp_cleanup_queues(Monitor *mon) static void monitor_qmp_cleanup_queues(MonitorQMP *mon)
{ {
qemu_mutex_lock(&mon->qmp.qmp_queue_lock); qemu_mutex_lock(&mon->qmp_queue_lock);
monitor_qmp_cleanup_req_queue_locked(mon); monitor_qmp_cleanup_req_queue_locked(mon);
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock); qemu_mutex_unlock(&mon->qmp_queue_lock);
} }
@ -480,17 +480,17 @@ int monitor_printf(Monitor *mon, const char *fmt, ...)
return ret; return ret;
} }
static void qmp_send_response(Monitor *mon, const QDict *rsp) static void qmp_send_response(MonitorQMP *mon, const QDict *rsp)
{ {
const QObject *data = QOBJECT(rsp); const QObject *data = QOBJECT(rsp);
QString *json; QString *json;
json = mon->flags & MONITOR_USE_PRETTY ? qobject_to_json_pretty(data) : json = mon->common.flags & MONITOR_USE_PRETTY ?
qobject_to_json(data); qobject_to_json_pretty(data) : qobject_to_json(data);
assert(json != NULL); assert(json != NULL);
qstring_append_chr(json, '\n'); qstring_append_chr(json, '\n');
monitor_puts(mon, qstring_get_str(json)); monitor_puts(&mon->common, qstring_get_str(json));
qobject_unref(json); qobject_unref(json);
} }
@ -513,12 +513,17 @@ static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
{ {
Monitor *mon; Monitor *mon;
MonitorQMP *qmp_mon;
trace_monitor_protocol_event_emit(event, qdict); trace_monitor_protocol_event_emit(event, qdict);
QTAILQ_FOREACH(mon, &mon_list, entry) { QTAILQ_FOREACH(mon, &mon_list, entry) {
if (monitor_is_qmp(mon) if (!monitor_is_qmp(mon)) {
&& mon->qmp.commands != &qmp_cap_negotiation_commands) { continue;
qmp_send_response(mon, qdict); }
qmp_mon = container_of(mon, MonitorQMP, common);
if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
qmp_send_response(qmp_mon, qdict);
} }
} }
} }
@ -711,29 +716,32 @@ static void monitor_data_init(Monitor *mon, int flags, bool skip_flush,
monitor_iothread_init(); monitor_iothread_init();
} }
qemu_mutex_init(&mon->mon_lock); qemu_mutex_init(&mon->mon_lock);
qemu_mutex_init(&mon->qmp.qmp_queue_lock);
mon->outbuf = qstring_new(); mon->outbuf = qstring_new();
/* Use *mon_cmds by default. */ /* Use *mon_cmds by default. */
mon->cmd_table = mon_cmds; mon->cmd_table = mon_cmds;
mon->skip_flush = skip_flush; mon->skip_flush = skip_flush;
mon->use_io_thread = use_io_thread; mon->use_io_thread = use_io_thread;
mon->qmp.qmp_requests = g_queue_new();
mon->flags = flags; mon->flags = flags;
} }
static void monitor_data_destroy_qmp(MonitorQMP *mon)
{
json_message_parser_destroy(&mon->parser);
qemu_mutex_destroy(&mon->qmp_queue_lock);
monitor_qmp_cleanup_req_queue_locked(mon);
g_queue_free(mon->qmp_requests);
}
static void monitor_data_destroy(Monitor *mon) static void monitor_data_destroy(Monitor *mon)
{ {
g_free(mon->mon_cpu_path); g_free(mon->mon_cpu_path);
qemu_chr_fe_deinit(&mon->chr, false); qemu_chr_fe_deinit(&mon->chr, false);
if (monitor_is_qmp(mon)) { if (monitor_is_qmp(mon)) {
json_message_parser_destroy(&mon->qmp.parser); monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
} }
readline_free(mon->rs); readline_free(mon->rs);
qobject_unref(mon->outbuf); qobject_unref(mon->outbuf);
qemu_mutex_destroy(&mon->mon_lock); qemu_mutex_destroy(&mon->mon_lock);
qemu_mutex_destroy(&mon->qmp.qmp_queue_lock);
monitor_qmp_cleanup_req_queue_locked(mon);
g_queue_free(mon->qmp.qmp_requests);
} }
char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index, char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index,
@ -1087,8 +1095,12 @@ static void query_commands_cb(QmpCommand *cmd, void *opaque)
CommandInfoList *qmp_query_commands(Error **errp) CommandInfoList *qmp_query_commands(Error **errp)
{ {
CommandInfoList *list = NULL; CommandInfoList *list = NULL;
MonitorQMP *mon;
qmp_for_each_command(cur_mon->qmp.commands, query_commands_cb, &list); assert(monitor_is_qmp(cur_mon));
mon = container_of(cur_mon, MonitorQMP, common);
qmp_for_each_command(mon->commands, query_commands_cb, &list);
return list; return list;
} }
@ -1155,16 +1167,16 @@ static void monitor_init_qmp_commands(void)
qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG); qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG);
} }
static bool qmp_oob_enabled(Monitor *mon) static bool qmp_oob_enabled(MonitorQMP *mon)
{ {
return mon->qmp.capab[QMP_CAPABILITY_OOB]; return mon->capab[QMP_CAPABILITY_OOB];
} }
static void monitor_qmp_caps_reset(Monitor *mon) static void monitor_qmp_caps_reset(MonitorQMP *mon)
{ {
memset(mon->qmp.capab_offered, 0, sizeof(mon->qmp.capab_offered)); memset(mon->capab_offered, 0, sizeof(mon->capab_offered));
memset(mon->qmp.capab, 0, sizeof(mon->qmp.capab)); memset(mon->capab, 0, sizeof(mon->capab));
mon->qmp.capab_offered[QMP_CAPABILITY_OOB] = mon->use_io_thread; mon->capab_offered[QMP_CAPABILITY_OOB] = mon->common.use_io_thread;
} }
/* /*
@ -1172,7 +1184,7 @@ static void monitor_qmp_caps_reset(Monitor *mon)
* On success, set mon->qmp.capab[], and return true. * On success, set mon->qmp.capab[], and return true.
* On error, set @errp, and return false. * On error, set @errp, and return false.
*/ */
static bool qmp_caps_accept(Monitor *mon, QMPCapabilityList *list, static bool qmp_caps_accept(MonitorQMP *mon, QMPCapabilityList *list,
Error **errp) Error **errp)
{ {
GString *unavailable = NULL; GString *unavailable = NULL;
@ -1181,7 +1193,7 @@ static bool qmp_caps_accept(Monitor *mon, QMPCapabilityList *list,
memset(capab, 0, sizeof(capab)); memset(capab, 0, sizeof(capab));
for (; list; list = list->next) { for (; list; list = list->next) {
if (!mon->qmp.capab_offered[list->value]) { if (!mon->capab_offered[list->value]) {
if (!unavailable) { if (!unavailable) {
unavailable = g_string_new(QMPCapability_str(list->value)); unavailable = g_string_new(QMPCapability_str(list->value));
} else { } else {
@ -1198,25 +1210,30 @@ static bool qmp_caps_accept(Monitor *mon, QMPCapabilityList *list,
return false; return false;
} }
memcpy(mon->qmp.capab, capab, sizeof(capab)); memcpy(mon->capab, capab, sizeof(capab));
return true; return true;
} }
void qmp_qmp_capabilities(bool has_enable, QMPCapabilityList *enable, void qmp_qmp_capabilities(bool has_enable, QMPCapabilityList *enable,
Error **errp) Error **errp)
{ {
if (cur_mon->qmp.commands == &qmp_commands) { MonitorQMP *mon;
assert(monitor_is_qmp(cur_mon));
mon = container_of(cur_mon, MonitorQMP, common);
if (mon->commands == &qmp_commands) {
error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND, error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
"Capabilities negotiation is already complete, command " "Capabilities negotiation is already complete, command "
"ignored"); "ignored");
return; return;
} }
if (!qmp_caps_accept(cur_mon, enable, errp)) { if (!qmp_caps_accept(mon, enable, errp)) {
return; return;
} }
cur_mon->qmp.commands = &qmp_commands; mon->commands = &qmp_commands;
} }
/* Set the current CPU defined by the user. Callers must hold BQL. */ /* Set the current CPU defined by the user. Callers must hold BQL. */
@ -4123,27 +4140,27 @@ static int monitor_can_read(void *opaque)
* Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP. * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
* Nothing is emitted then. * Nothing is emitted then.
*/ */
static void monitor_qmp_respond(Monitor *mon, QDict *rsp) static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp)
{ {
if (rsp) { if (rsp) {
qmp_send_response(mon, rsp); qmp_send_response(mon, rsp);
} }
} }
static void monitor_qmp_dispatch(Monitor *mon, QObject *req) static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
{ {
Monitor *old_mon; Monitor *old_mon;
QDict *rsp; QDict *rsp;
QDict *error; QDict *error;
old_mon = cur_mon; old_mon = cur_mon;
cur_mon = mon; cur_mon = &mon->common;
rsp = qmp_dispatch(mon->qmp.commands, req, qmp_oob_enabled(mon)); rsp = qmp_dispatch(mon->commands, req, qmp_oob_enabled(mon));
cur_mon = old_mon; cur_mon = old_mon;
if (mon->qmp.commands == &qmp_cap_negotiation_commands) { if (mon->commands == &qmp_cap_negotiation_commands) {
error = qdict_get_qdict(rsp, "error"); error = qdict_get_qdict(rsp, "error");
if (error if (error
&& !g_strcmp0(qdict_get_try_str(error, "class"), && !g_strcmp0(qdict_get_try_str(error, "class"),
@ -4168,24 +4185,30 @@ static void monitor_qmp_dispatch(Monitor *mon, QObject *req)
* monitor to the end of mon_list queue. * monitor to the end of mon_list queue.
* *
* Note: if the function returned with non-NULL, then the caller will * Note: if the function returned with non-NULL, then the caller will
* be with mon->qmp.qmp_queue_lock held, and the caller is responsible * be with qmp_mon->qmp_queue_lock held, and the caller is responsible
* to release it. * to release it.
*/ */
static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void) static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
{ {
QMPRequest *req_obj = NULL; QMPRequest *req_obj = NULL;
Monitor *mon; Monitor *mon;
MonitorQMP *qmp_mon;
qemu_mutex_lock(&monitor_lock); qemu_mutex_lock(&monitor_lock);
QTAILQ_FOREACH(mon, &mon_list, entry) { QTAILQ_FOREACH(mon, &mon_list, entry) {
qemu_mutex_lock(&mon->qmp.qmp_queue_lock); if (!monitor_is_qmp(mon)) {
req_obj = g_queue_pop_head(mon->qmp.qmp_requests); continue;
}
qmp_mon = container_of(mon, MonitorQMP, common);
qemu_mutex_lock(&qmp_mon->qmp_queue_lock);
req_obj = g_queue_pop_head(qmp_mon->qmp_requests);
if (req_obj) { if (req_obj) {
/* With the lock of corresponding queue held */ /* With the lock of corresponding queue held */
break; break;
} }
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock); qemu_mutex_unlock(&qmp_mon->qmp_queue_lock);
} }
if (req_obj) { if (req_obj) {
@ -4207,7 +4230,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
QMPRequest *req_obj = monitor_qmp_requests_pop_any_with_lock(); QMPRequest *req_obj = monitor_qmp_requests_pop_any_with_lock();
QDict *rsp; QDict *rsp;
bool need_resume; bool need_resume;
Monitor *mon; MonitorQMP *mon;
if (!req_obj) { if (!req_obj) {
return; return;
@ -4216,8 +4239,8 @@ static void monitor_qmp_bh_dispatcher(void *data)
mon = req_obj->mon; mon = req_obj->mon;
/* qmp_oob_enabled() might change after "qmp_capabilities" */ /* qmp_oob_enabled() might change after "qmp_capabilities" */
need_resume = !qmp_oob_enabled(mon) || need_resume = !qmp_oob_enabled(mon) ||
mon->qmp.qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1; mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock); qemu_mutex_unlock(&mon->qmp_queue_lock);
if (req_obj->req) { if (req_obj->req) {
QDict *qdict = qobject_to(QDict, req_obj->req); QDict *qdict = qobject_to(QDict, req_obj->req);
QObject *id = qdict ? qdict_get(qdict, "id") : NULL; QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
@ -4233,7 +4256,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
if (need_resume) { if (need_resume) {
/* Pairs with the monitor_suspend() in handle_qmp_command() */ /* Pairs with the monitor_suspend() in handle_qmp_command() */
monitor_resume(mon); monitor_resume(&mon->common);
} }
qmp_request_free(req_obj); qmp_request_free(req_obj);
@ -4243,7 +4266,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
static void handle_qmp_command(void *opaque, QObject *req, Error *err) static void handle_qmp_command(void *opaque, QObject *req, Error *err)
{ {
Monitor *mon = opaque; MonitorQMP *mon = opaque;
QObject *id = NULL; QObject *id = NULL;
QDict *qdict; QDict *qdict;
QMPRequest *req_obj; QMPRequest *req_obj;
@ -4275,7 +4298,7 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err)
req_obj->err = err; req_obj->err = err;
/* Protect qmp_requests and fetching its length. */ /* Protect qmp_requests and fetching its length. */
qemu_mutex_lock(&mon->qmp.qmp_queue_lock); qemu_mutex_lock(&mon->qmp_queue_lock);
/* /*
* Suspend the monitor when we can't queue more requests after * Suspend the monitor when we can't queue more requests after
@ -4284,8 +4307,8 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err)
* command, for backward compatibility. * command, for backward compatibility.
*/ */
if (!qmp_oob_enabled(mon) || if (!qmp_oob_enabled(mon) ||
mon->qmp.qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) { mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1) {
monitor_suspend(mon); monitor_suspend(&mon->common);
} }
/* /*
@ -4293,9 +4316,9 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err)
* handled in time order. Ownership for req_obj, req, * handled in time order. Ownership for req_obj, req,
* etc. will be delivered to the handler side. * etc. will be delivered to the handler side.
*/ */
assert(mon->qmp.qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX); assert(mon->qmp_requests->length < QMP_REQ_QUEUE_LEN_MAX);
g_queue_push_tail(mon->qmp.qmp_requests, req_obj); g_queue_push_tail(mon->qmp_requests, req_obj);
qemu_mutex_unlock(&mon->qmp.qmp_queue_lock); qemu_mutex_unlock(&mon->qmp_queue_lock);
/* Kick the dispatcher routine */ /* Kick the dispatcher routine */
qemu_bh_schedule(qmp_dispatcher_bh); qemu_bh_schedule(qmp_dispatcher_bh);
@ -4303,9 +4326,9 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err)
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size) static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
{ {
Monitor *mon = opaque; MonitorQMP *mon = opaque;
json_message_parser_feed(&mon->qmp.parser, (const char *) buf, size); json_message_parser_feed(&mon->parser, (const char *) buf, size);
} }
static void monitor_read(void *opaque, const uint8_t *buf, int size) static void monitor_read(void *opaque, const uint8_t *buf, int size)
@ -4391,7 +4414,7 @@ void monitor_resume(Monitor *mon)
trace_monitor_suspend(mon, -1); trace_monitor_suspend(mon, -1);
} }
static QDict *qmp_greeting(Monitor *mon) static QDict *qmp_greeting(MonitorQMP *mon)
{ {
QList *cap_list = qlist_new(); QList *cap_list = qlist_new();
QObject *ver = NULL; QObject *ver = NULL;
@ -4400,7 +4423,7 @@ static QDict *qmp_greeting(Monitor *mon)
qmp_marshal_query_version(NULL, &ver, NULL); qmp_marshal_query_version(NULL, &ver, NULL);
for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) { for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
if (mon->qmp.capab_offered[cap]) { if (mon->capab_offered[cap]) {
qlist_append_str(cap_list, QMPCapability_str(cap)); qlist_append_str(cap_list, QMPCapability_str(cap));
} }
} }
@ -4413,11 +4436,11 @@ static QDict *qmp_greeting(Monitor *mon)
static void monitor_qmp_event(void *opaque, int event) static void monitor_qmp_event(void *opaque, int event)
{ {
QDict *data; QDict *data;
Monitor *mon = opaque; MonitorQMP *mon = opaque;
switch (event) { switch (event) {
case CHR_EVENT_OPENED: case CHR_EVENT_OPENED:
mon->qmp.commands = &qmp_cap_negotiation_commands; mon->commands = &qmp_cap_negotiation_commands;
monitor_qmp_caps_reset(mon); monitor_qmp_caps_reset(mon);
data = qmp_greeting(mon); data = qmp_greeting(mon);
qmp_send_response(mon, data); qmp_send_response(mon, data);
@ -4432,8 +4455,8 @@ static void monitor_qmp_event(void *opaque, int event)
* is closed. * is closed.
*/ */
monitor_qmp_cleanup_queues(mon); monitor_qmp_cleanup_queues(mon);
json_message_parser_destroy(&mon->qmp.parser); json_message_parser_destroy(&mon->parser);
json_message_parser_init(&mon->qmp.parser, handle_qmp_command, json_message_parser_init(&mon->parser, handle_qmp_command,
mon, NULL); mon, NULL);
mon_refcount--; mon_refcount--;
monitor_fdsets_cleanup(); monitor_fdsets_cleanup();
@ -4595,33 +4618,37 @@ static void monitor_list_append(Monitor *mon)
static void monitor_qmp_setup_handlers_bh(void *opaque) static void monitor_qmp_setup_handlers_bh(void *opaque)
{ {
Monitor *mon = opaque; MonitorQMP *mon = opaque;
GMainContext *context; GMainContext *context;
assert(mon->use_io_thread); assert(mon->common.use_io_thread);
context = iothread_get_g_main_context(mon_iothread); context = iothread_get_g_main_context(mon_iothread);
assert(context); assert(context);
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read, qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
monitor_qmp_event, NULL, mon, context, true); monitor_qmp_read, monitor_qmp_event,
monitor_list_append(mon); NULL, &mon->common, context, true);
monitor_list_append(&mon->common);
} }
static void monitor_init_qmp(Chardev *chr, int flags) static void monitor_init_qmp(Chardev *chr, int flags)
{ {
Monitor *mon = g_new0(Monitor, 1); MonitorQMP *mon = g_new0(MonitorQMP, 1);
/* Only HMP supports readline */ /* Only HMP supports readline */
assert(!(flags & MONITOR_USE_READLINE)); assert(!(flags & MONITOR_USE_READLINE));
/* Note: we run QMP monitor in I/O thread when @chr supports that */ /* Note: we run QMP monitor in I/O thread when @chr supports that */
monitor_data_init(mon, flags, false, monitor_data_init(&mon->common, flags, false,
qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT)); qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT));
qemu_chr_fe_init(&mon->chr, chr, &error_abort); qemu_mutex_init(&mon->qmp_queue_lock);
qemu_chr_fe_set_echo(&mon->chr, true); mon->qmp_requests = g_queue_new();
json_message_parser_init(&mon->qmp.parser, handle_qmp_command, mon, NULL); qemu_chr_fe_init(&mon->common.chr, chr, &error_abort);
if (mon->use_io_thread) { qemu_chr_fe_set_echo(&mon->common.chr, true);
json_message_parser_init(&mon->parser, handle_qmp_command, mon, NULL);
if (mon->common.use_io_thread) {
/* /*
* Make sure the old iowatch is gone. It's possible when * Make sure the old iowatch is gone. It's possible when
* e.g. the chardev is in client mode, with wait=on. * e.g. the chardev is in client mode, with wait=on.
@ -4636,10 +4663,10 @@ static void monitor_init_qmp(Chardev *chr, int flags)
monitor_qmp_setup_handlers_bh, mon); monitor_qmp_setup_handlers_bh, mon);
/* The bottom half will add @mon to @mon_list */ /* The bottom half will add @mon to @mon_list */
} else { } else {
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
monitor_qmp_read, monitor_qmp_event, monitor_qmp_read, monitor_qmp_event,
NULL, mon, NULL, true); NULL, &mon->common, NULL, true);
monitor_list_append(mon); monitor_list_append(&mon->common);
} }
} }