mirror of
https://github.com/qemu/qemu.git
synced 2024-12-05 01:33:41 +08:00
b69c3c21a5
Devices may have component devices and buses. Device realization may fail. Realization is recursive: a device's realize() method realizes its components, and device_set_realized() realizes its buses (which should in turn realize the devices on that bus, except bus_set_realized() doesn't implement that, yet). When realization of a component or bus fails, we need to roll back: unrealize everything we realized so far. If any of these unrealizes failed, the device would be left in an inconsistent state. Must not happen. device_set_realized() lets it happen: it ignores errors in the roll back code starting at label child_realize_fail. Since realization is recursive, unrealization must be recursive, too. But how could a partly failed unrealize be rolled back? We'd have to re-realize, which can fail. This design is fundamentally broken. device_set_realized() does not roll back at all. Instead, it keeps unrealizing, ignoring further errors. It can screw up even for a device with no buses: if the lone dc->unrealize() fails, it still unregisters vmstate, and calls listeners' unrealize() callback. bus_set_realized() does not roll back either. Instead, it stops unrealizing. Fortunately, no unrealize method can fail, as we'll see below. To fix the design error, drop parameter @errp from all the unrealize methods. Any unrealize method that uses @errp now needs an update. This leads us to unrealize() methods that can fail. Merely passing it to another unrealize method cannot cause failure, though. Here are the ones that do other things with @errp: * virtio_serial_device_unrealize() Fails when qbus_set_hotplug_handler() fails, but still does all the other work. On failure, the device would stay realized with its resources completely gone. Oops. Can't happen, because qbus_set_hotplug_handler() can't actually fail here. Pass &error_abort to qbus_set_hotplug_handler() instead. * hw/ppc/spapr_drc.c's unrealize() Fails when object_property_del() fails, but all the other work is already done. On failure, the device would stay realized with its vmstate registration gone. Oops. Can't happen, because object_property_del() can't actually fail here. Pass &error_abort to object_property_del() instead. * spapr_phb_unrealize() Fails and bails out when remove_drcs() fails, but other work is already done. On failure, the device would stay realized with some of its resources gone. Oops. remove_drcs() fails only when chassis_from_bus()'s object_property_get_uint() fails, and it can't here. Pass &error_abort to remove_drcs() instead. Therefore, no unrealize method can fail before this patch. device_set_realized()'s recursive unrealization via bus uses object_property_set_bool(). Can't drop @errp there, so pass &error_abort. We similarly unrealize with object_property_set_bool() elsewhere, always ignoring errors. Pass &error_abort instead. Several unrealize methods no longer handle errors from other unrealize methods: virtio_9p_device_unrealize(), virtio_input_device_unrealize(), scsi_qdev_unrealize(), ... Much of the deleted error handling looks wrong anyway. One unrealize methods no longer ignore such errors: usb_ehci_pci_exit(). Several realize methods no longer ignore errors when rolling back: v9fs_device_realize_common(), pci_qdev_unrealize(), spapr_phb_realize(), usb_qdev_realize(), vfio_ccw_realize(), virtio_device_realize(). Signed-off-by: Markus Armbruster <armbru@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20200505152926.18877-17-armbru@redhat.com>
457 lines
12 KiB
C
457 lines
12 KiB
C
/*
|
|
* QEMU CPU model
|
|
*
|
|
* Copyright (c) 2012-2014 SUSE LINUX Products GmbH
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, see
|
|
* <http://www.gnu.org/licenses/gpl-2.0.html>
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qapi/error.h"
|
|
#include "hw/core/cpu.h"
|
|
#include "sysemu/hw_accel.h"
|
|
#include "qemu/notify.h"
|
|
#include "qemu/log.h"
|
|
#include "qemu/main-loop.h"
|
|
#include "exec/log.h"
|
|
#include "qemu/error-report.h"
|
|
#include "qemu/qemu-print.h"
|
|
#include "sysemu/tcg.h"
|
|
#include "hw/boards.h"
|
|
#include "hw/qdev-properties.h"
|
|
#include "trace-root.h"
|
|
#include "qemu/plugin.h"
|
|
|
|
CPUInterruptHandler cpu_interrupt_handler;
|
|
|
|
CPUState *cpu_by_arch_id(int64_t id)
|
|
{
|
|
CPUState *cpu;
|
|
|
|
CPU_FOREACH(cpu) {
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
if (cc->get_arch_id(cpu) == id) {
|
|
return cpu;
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
bool cpu_exists(int64_t id)
|
|
{
|
|
return !!cpu_by_arch_id(id);
|
|
}
|
|
|
|
CPUState *cpu_create(const char *typename)
|
|
{
|
|
Error *err = NULL;
|
|
CPUState *cpu = CPU(object_new(typename));
|
|
object_property_set_bool(OBJECT(cpu), true, "realized", &err);
|
|
if (err != NULL) {
|
|
error_report_err(err);
|
|
object_unref(OBJECT(cpu));
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
return cpu;
|
|
}
|
|
|
|
bool cpu_paging_enabled(const CPUState *cpu)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
return cc->get_paging_enabled(cpu);
|
|
}
|
|
|
|
static bool cpu_common_get_paging_enabled(const CPUState *cpu)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
|
|
Error **errp)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
cc->get_memory_mapping(cpu, list, errp);
|
|
}
|
|
|
|
static void cpu_common_get_memory_mapping(CPUState *cpu,
|
|
MemoryMappingList *list,
|
|
Error **errp)
|
|
{
|
|
error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
|
|
}
|
|
|
|
/* Resetting the IRQ comes from across the code base so we take the
|
|
* BQL here if we need to. cpu_interrupt assumes it is held.*/
|
|
void cpu_reset_interrupt(CPUState *cpu, int mask)
|
|
{
|
|
bool need_lock = !qemu_mutex_iothread_locked();
|
|
|
|
if (need_lock) {
|
|
qemu_mutex_lock_iothread();
|
|
}
|
|
cpu->interrupt_request &= ~mask;
|
|
if (need_lock) {
|
|
qemu_mutex_unlock_iothread();
|
|
}
|
|
}
|
|
|
|
void cpu_exit(CPUState *cpu)
|
|
{
|
|
atomic_set(&cpu->exit_request, 1);
|
|
/* Ensure cpu_exec will see the exit request after TCG has exited. */
|
|
smp_wmb();
|
|
atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
|
|
}
|
|
|
|
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
|
void *opaque)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
return (*cc->write_elf32_qemunote)(f, cpu, opaque);
|
|
}
|
|
|
|
static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
|
|
CPUState *cpu, void *opaque)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
|
|
int cpuid, void *opaque)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
|
|
}
|
|
|
|
static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
|
|
CPUState *cpu, int cpuid,
|
|
void *opaque)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
|
void *opaque)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
return (*cc->write_elf64_qemunote)(f, cpu, opaque);
|
|
}
|
|
|
|
static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
|
|
CPUState *cpu, void *opaque)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
|
int cpuid, void *opaque)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
|
|
}
|
|
|
|
static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
|
|
CPUState *cpu, int cpuid,
|
|
void *opaque)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
|
|
static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
|
|
{
|
|
/* If no extra check is required, QEMU watchpoint match can be considered
|
|
* as an architectural match.
|
|
*/
|
|
return true;
|
|
}
|
|
|
|
static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
|
|
{
|
|
return target_words_bigendian();
|
|
}
|
|
|
|
static void cpu_common_noop(CPUState *cpu)
|
|
{
|
|
}
|
|
|
|
static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
GuestPanicInformation *res = NULL;
|
|
|
|
if (cc->get_crash_info) {
|
|
res = cc->get_crash_info(cpu);
|
|
}
|
|
return res;
|
|
}
|
|
|
|
void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
if (cc->dump_state) {
|
|
cpu_synchronize_state(cpu);
|
|
cc->dump_state(cpu, f, flags);
|
|
}
|
|
}
|
|
|
|
void cpu_dump_statistics(CPUState *cpu, int flags)
|
|
{
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
if (cc->dump_statistics) {
|
|
cc->dump_statistics(cpu, flags);
|
|
}
|
|
}
|
|
|
|
void cpu_reset(CPUState *cpu)
|
|
{
|
|
device_cold_reset(DEVICE(cpu));
|
|
|
|
trace_guest_cpu_reset(cpu);
|
|
}
|
|
|
|
static void cpu_common_reset(DeviceState *dev)
|
|
{
|
|
CPUState *cpu = CPU(dev);
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
|
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
|
|
log_cpu_state(cpu, cc->reset_dump_flags);
|
|
}
|
|
|
|
cpu->interrupt_request = 0;
|
|
cpu->halted = 0;
|
|
cpu->mem_io_pc = 0;
|
|
cpu->icount_extra = 0;
|
|
atomic_set(&cpu->icount_decr_ptr->u32, 0);
|
|
cpu->can_do_io = 1;
|
|
cpu->exception_index = -1;
|
|
cpu->crash_occurred = false;
|
|
cpu->cflags_next_tb = -1;
|
|
|
|
if (tcg_enabled()) {
|
|
cpu_tb_jmp_cache_clear(cpu);
|
|
|
|
tcg_flush_softmmu_tlb(cpu);
|
|
}
|
|
}
|
|
|
|
static bool cpu_common_has_work(CPUState *cs)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
|
|
{
|
|
CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
|
|
|
|
assert(cpu_model && cc->class_by_name);
|
|
return cc->class_by_name(cpu_model);
|
|
}
|
|
|
|
static void cpu_common_parse_features(const char *typename, char *features,
|
|
Error **errp)
|
|
{
|
|
char *val;
|
|
static bool cpu_globals_initialized;
|
|
/* Single "key=value" string being parsed */
|
|
char *featurestr = features ? strtok(features, ",") : NULL;
|
|
|
|
/* should be called only once, catch invalid users */
|
|
assert(!cpu_globals_initialized);
|
|
cpu_globals_initialized = true;
|
|
|
|
while (featurestr) {
|
|
val = strchr(featurestr, '=');
|
|
if (val) {
|
|
GlobalProperty *prop = g_new0(typeof(*prop), 1);
|
|
*val = 0;
|
|
val++;
|
|
prop->driver = typename;
|
|
prop->property = g_strdup(featurestr);
|
|
prop->value = g_strdup(val);
|
|
qdev_prop_register_global(prop);
|
|
} else {
|
|
error_setg(errp, "Expected key=value format, found %s.",
|
|
featurestr);
|
|
return;
|
|
}
|
|
featurestr = strtok(NULL, ",");
|
|
}
|
|
}
|
|
|
|
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
|
|
{
|
|
CPUState *cpu = CPU(dev);
|
|
Object *machine = qdev_get_machine();
|
|
|
|
/* qdev_get_machine() can return something that's not TYPE_MACHINE
|
|
* if this is one of the user-only emulators; in that case there's
|
|
* no need to check the ignore_memory_transaction_failures board flag.
|
|
*/
|
|
if (object_dynamic_cast(machine, TYPE_MACHINE)) {
|
|
ObjectClass *oc = object_get_class(machine);
|
|
MachineClass *mc = MACHINE_CLASS(oc);
|
|
|
|
if (mc) {
|
|
cpu->ignore_memory_transaction_failures =
|
|
mc->ignore_memory_transaction_failures;
|
|
}
|
|
}
|
|
|
|
if (dev->hotplugged) {
|
|
cpu_synchronize_post_init(cpu);
|
|
cpu_resume(cpu);
|
|
}
|
|
|
|
/* NOTE: latest generic point where the cpu is fully realized */
|
|
trace_init_vcpu(cpu);
|
|
}
|
|
|
|
static void cpu_common_unrealizefn(DeviceState *dev)
|
|
{
|
|
CPUState *cpu = CPU(dev);
|
|
/* NOTE: latest generic point before the cpu is fully unrealized */
|
|
trace_fini_vcpu(cpu);
|
|
qemu_plugin_vcpu_exit_hook(cpu);
|
|
cpu_exec_unrealizefn(cpu);
|
|
}
|
|
|
|
static void cpu_common_initfn(Object *obj)
|
|
{
|
|
CPUState *cpu = CPU(obj);
|
|
CPUClass *cc = CPU_GET_CLASS(obj);
|
|
|
|
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
|
|
cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
|
|
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
|
|
/* *-user doesn't have configurable SMP topology */
|
|
/* the default value is changed by qemu_init_vcpu() for softmmu */
|
|
cpu->nr_cores = 1;
|
|
cpu->nr_threads = 1;
|
|
|
|
qemu_mutex_init(&cpu->work_mutex);
|
|
QTAILQ_INIT(&cpu->breakpoints);
|
|
QTAILQ_INIT(&cpu->watchpoints);
|
|
|
|
cpu_exec_initfn(cpu);
|
|
}
|
|
|
|
static void cpu_common_finalize(Object *obj)
|
|
{
|
|
CPUState *cpu = CPU(obj);
|
|
|
|
qemu_mutex_destroy(&cpu->work_mutex);
|
|
}
|
|
|
|
static int64_t cpu_common_get_arch_id(CPUState *cpu)
|
|
{
|
|
return cpu->cpu_index;
|
|
}
|
|
|
|
static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
|
|
{
|
|
return addr;
|
|
}
|
|
|
|
static void generic_handle_interrupt(CPUState *cpu, int mask)
|
|
{
|
|
cpu->interrupt_request |= mask;
|
|
|
|
if (!qemu_cpu_is_self(cpu)) {
|
|
qemu_cpu_kick(cpu);
|
|
}
|
|
}
|
|
|
|
CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
|
|
|
|
static void cpu_class_init(ObjectClass *klass, void *data)
|
|
{
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
CPUClass *k = CPU_CLASS(klass);
|
|
|
|
k->parse_features = cpu_common_parse_features;
|
|
k->get_arch_id = cpu_common_get_arch_id;
|
|
k->has_work = cpu_common_has_work;
|
|
k->get_paging_enabled = cpu_common_get_paging_enabled;
|
|
k->get_memory_mapping = cpu_common_get_memory_mapping;
|
|
k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
|
|
k->write_elf32_note = cpu_common_write_elf32_note;
|
|
k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
|
|
k->write_elf64_note = cpu_common_write_elf64_note;
|
|
k->gdb_read_register = cpu_common_gdb_read_register;
|
|
k->gdb_write_register = cpu_common_gdb_write_register;
|
|
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
|
|
k->debug_excp_handler = cpu_common_noop;
|
|
k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
|
|
k->cpu_exec_enter = cpu_common_noop;
|
|
k->cpu_exec_exit = cpu_common_noop;
|
|
k->cpu_exec_interrupt = cpu_common_exec_interrupt;
|
|
k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
|
|
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
|
|
dc->realize = cpu_common_realizefn;
|
|
dc->unrealize = cpu_common_unrealizefn;
|
|
dc->reset = cpu_common_reset;
|
|
device_class_set_props(dc, cpu_common_props);
|
|
/*
|
|
* Reason: CPUs still need special care by board code: wiring up
|
|
* IRQs, adding reset handlers, halting non-first CPUs, ...
|
|
*/
|
|
dc->user_creatable = false;
|
|
}
|
|
|
|
static const TypeInfo cpu_type_info = {
|
|
.name = TYPE_CPU,
|
|
.parent = TYPE_DEVICE,
|
|
.instance_size = sizeof(CPUState),
|
|
.instance_init = cpu_common_initfn,
|
|
.instance_finalize = cpu_common_finalize,
|
|
.abstract = true,
|
|
.class_size = sizeof(CPUClass),
|
|
.class_init = cpu_class_init,
|
|
};
|
|
|
|
static void cpu_register_types(void)
|
|
{
|
|
type_register_static(&cpu_type_info);
|
|
}
|
|
|
|
type_init(cpu_register_types)
|