mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
QOM CPUState refactorings / X86CPU
* Conversion of global CPU list to QTAILQ - preparing for CPU hot-unplug * Document X86CPU magic numbers for CPUID cache info -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQIcBAABAgAGBQJSJgdaAAoJEPou0S0+fgE/WqAQAJ6pcTymZO86NLKwcY4dD5Dr Es2aTs4XFs9V3+gpbH9vOA71n9HanFQp1s4ZUskQ2BVQU8cZeRUKlGhKJfqcEbPF H5wkxskqgV2Sw8+XWjQk80J/X/W6k10Fit64CUpQqxzd3HwXXzT/QHXzM8t6p79i KdEAsjaQYqR8/qa7+pd437lLcTiRb51FqB5u3ClbCbIKjnnjswr/ZypKr+CUc9WY 1AzP9UKg0qSxz1yCkgzYHt3eWjfuGhsqn8KXVQfc+37xFRZp0uYQYkCahhwrPRUO jTg0eJKxoyH76t+2jIsnNHfd6r5zaTmVThGnun/SzJTGj8AFNrz81EfT1niJdp2/ 6RdykpWdqqeA3usKoSzBgTEAXGL50tCL0xiREk7hPwflxJqjbjFuVuttkazEcHZf Q2OS0tUFhYi3yUojms/YJYFUaNUhA033wJSjKGbFfSDdtJdjnxmB2r+LhsH4ByfS 4SPU5zr4up1Yr1dnmIlNUA5W/KMgZseT3shasLhFmODR7wGvrQ7DuEHRs87UQbbM pedvN92VmWzByEvLNkICJGuaVer+mHznig9f1eOkxXlK4RdNBmAf5QYMU+oxbkUG fwXu0w7/aUJKpcYl6aYUmkhgn9dB3Oe/WTVLkvfg54MUFKpo4b72AR01+fWT91XO r8DQQYwP94htozAC6F9n =/bSY -----END PGP SIGNATURE----- Merge remote-tracking branch 'afaerber/tags/qom-cpu-for-anthony' into staging QOM CPUState refactorings / X86CPU * Conversion of global CPU list to QTAILQ - preparing for CPU hot-unplug * Document X86CPU magic numbers for CPUID cache info # gpg: Signature made Tue 03 Sep 2013 10:59:22 AM CDT using RSA key ID 3E7E013F # gpg: Can't check signature: public key not found # By Andreas Färber (3) and Eduardo Habkost (1) # Via Andreas Färber * afaerber/tags/qom-cpu-for-anthony: target-i386: Use #defines instead of magic numbers for CPUID cache info cpu: Replace qemu_for_each_cpu() cpu: Use QTAILQ for CPU list a15mpcore: Use qemu_get_cpu() for generic timers
This commit is contained in:
commit
aaa6a40194
11
arch_init.c
11
arch_init.c
@ -1196,15 +1196,14 @@ static void mig_sleep_cpu(void *opq)
|
||||
much time in the VM. The migration thread will try to catchup.
|
||||
Workload will experience a performance drop.
|
||||
*/
|
||||
static void mig_throttle_cpu_down(CPUState *cpu, void *data)
|
||||
{
|
||||
async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
|
||||
}
|
||||
|
||||
static void mig_throttle_guest_down(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_for_each_cpu(mig_throttle_cpu_down, NULL);
|
||||
CPU_FOREACH(cpu) {
|
||||
async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
|
60
cpus.c
60
cpus.c
@ -86,7 +86,7 @@ static bool all_cpu_threads_idle(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!cpu_thread_is_idle(cpu)) {
|
||||
return false;
|
||||
}
|
||||
@ -416,7 +416,7 @@ void hw_error(const char *fmt, ...)
|
||||
fprintf(stderr, "qemu: hardware error: ");
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, "\n");
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
|
||||
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
|
||||
}
|
||||
@ -428,7 +428,7 @@ void cpu_synchronize_all_states(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
@ -437,7 +437,7 @@ void cpu_synchronize_all_post_reset(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
@ -446,7 +446,7 @@ void cpu_synchronize_all_post_init(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
@ -760,7 +760,7 @@ static void qemu_tcg_wait_io_event(void)
|
||||
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
@ -854,12 +854,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
|
||||
|
||||
static void tcg_exec_all(void);
|
||||
|
||||
static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
|
||||
{
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->created = true;
|
||||
}
|
||||
|
||||
static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
@ -868,15 +862,18 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
qemu_mutex_lock(&qemu_global_mutex);
|
||||
qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->created = true;
|
||||
}
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
/* wait for initial kick-off after machine start */
|
||||
while (first_cpu->stopped) {
|
||||
while (QTAILQ_FIRST(&cpus)->stopped) {
|
||||
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
|
||||
|
||||
/* process any pending work */
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
}
|
||||
@ -991,13 +988,12 @@ void qemu_mutex_unlock_iothread(void)
|
||||
|
||||
static int all_vcpus_paused(void)
|
||||
{
|
||||
CPUState *cpu = first_cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
while (cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!cpu->stopped) {
|
||||
return 0;
|
||||
}
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@ -1005,23 +1001,20 @@ static int all_vcpus_paused(void)
|
||||
|
||||
void pause_all_vcpus(void)
|
||||
{
|
||||
CPUState *cpu = first_cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
|
||||
while (cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->stop = true;
|
||||
qemu_cpu_kick(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
if (qemu_in_vcpu_thread()) {
|
||||
cpu_stop_current();
|
||||
if (!kvm_enabled()) {
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu->stop = false;
|
||||
cpu->stopped = true;
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -1029,10 +1022,8 @@ void pause_all_vcpus(void)
|
||||
|
||||
while (!all_vcpus_paused()) {
|
||||
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_cpu_kick(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1046,12 +1037,11 @@ void cpu_resume(CPUState *cpu)
|
||||
|
||||
void resume_all_vcpus(void)
|
||||
{
|
||||
CPUState *cpu = first_cpu;
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
|
||||
while (cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_resume(cpu);
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1215,7 +1205,7 @@ static void tcg_exec_all(void)
|
||||
if (next_cpu == NULL) {
|
||||
next_cpu = first_cpu;
|
||||
}
|
||||
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
|
||||
for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
|
||||
CPUState *cpu = next_cpu;
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
@ -1240,7 +1230,7 @@ void set_numa_modes(void)
|
||||
CPUState *cpu;
|
||||
int i;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (test_bit(cpu->cpu_index, node_cpumask[i])) {
|
||||
cpu->numa_node = i;
|
||||
@ -1262,7 +1252,7 @@ CpuInfoList *qmp_query_cpus(Error **errp)
|
||||
CpuInfoList *head = NULL, *cur_item = NULL;
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
CpuInfoList *info;
|
||||
#if defined(TARGET_I386)
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
@ -1391,7 +1381,7 @@ void qmp_inject_nmi(Error **errp)
|
||||
#if defined(TARGET_I386)
|
||||
CPUState *cs;
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
@ -1405,7 +1395,7 @@ void qmp_inject_nmi(Error **errp)
|
||||
CPUState *cs;
|
||||
S390CPU *cpu;
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
cpu = S390_CPU(cs);
|
||||
if (cpu->env.cpu_num == monitor_get_cpu_index()) {
|
||||
if (s390_cpu_restart(S390_CPU(cs)) == -1) {
|
||||
|
2
cputlb.c
2
cputlb.c
@ -189,7 +189,7 @@ void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
|
||||
CPUState *cpu;
|
||||
CPUArchState *env;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
int mmu_idx;
|
||||
|
||||
env = cpu->env_ptr;
|
||||
|
10
dump.c
10
dump.c
@ -277,7 +277,7 @@ static int write_elf64_notes(DumpState *s)
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
id = cpu_index(cpu);
|
||||
ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s);
|
||||
if (ret < 0) {
|
||||
@ -286,7 +286,7 @@ static int write_elf64_notes(DumpState *s)
|
||||
}
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s);
|
||||
if (ret < 0) {
|
||||
dump_error(s, "dump: failed to write CPU status.\n");
|
||||
@ -327,7 +327,7 @@ static int write_elf32_notes(DumpState *s)
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
id = cpu_index(cpu);
|
||||
ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s);
|
||||
if (ret < 0) {
|
||||
@ -336,7 +336,7 @@ static int write_elf32_notes(DumpState *s)
|
||||
}
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s);
|
||||
if (ret < 0) {
|
||||
dump_error(s, "dump: failed to write CPU status.\n");
|
||||
@ -734,7 +734,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
|
||||
*/
|
||||
cpu_synchronize_all_states();
|
||||
nr_cpus = 0;
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
nr_cpus++;
|
||||
}
|
||||
|
||||
|
37
exec.c
37
exec.c
@ -69,7 +69,7 @@ static MemoryRegion io_mem_unassigned;
|
||||
|
||||
#endif
|
||||
|
||||
CPUState *first_cpu;
|
||||
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||
/* current CPU in the current thread. It is only valid inside
|
||||
cpu_exec() */
|
||||
DEFINE_TLS(CPUState *, current_cpu);
|
||||
@ -350,45 +350,30 @@ const VMStateDescription vmstate_cpu_common = {
|
||||
#endif
|
||||
|
||||
CPUState *qemu_get_cpu(int index)
|
||||
{
|
||||
CPUState *cpu = first_cpu;
|
||||
|
||||
while (cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
break;
|
||||
}
|
||||
cpu = cpu->next_cpu;
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
cpu = first_cpu;
|
||||
while (cpu) {
|
||||
func(cpu, data);
|
||||
cpu = cpu->next_cpu;
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->cpu_index == index) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void cpu_exec_init(CPUArchState *env)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUState **pcpu;
|
||||
CPUState *some_cpu;
|
||||
int cpu_index;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_lock();
|
||||
#endif
|
||||
cpu->next_cpu = NULL;
|
||||
pcpu = &first_cpu;
|
||||
cpu_index = 0;
|
||||
while (*pcpu != NULL) {
|
||||
pcpu = &(*pcpu)->next_cpu;
|
||||
CPU_FOREACH(some_cpu) {
|
||||
cpu_index++;
|
||||
}
|
||||
cpu->cpu_index = cpu_index;
|
||||
@ -398,7 +383,7 @@ void cpu_exec_init(CPUArchState *env)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
#endif
|
||||
*pcpu = cpu;
|
||||
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
cpu_list_unlock();
|
||||
#endif
|
||||
@ -1762,7 +1747,7 @@ static void tcg_commit(MemoryListener *listener)
|
||||
/* since each CPU stores ram addresses in its TLB cache, we must
|
||||
reset the modified entries */
|
||||
/* XXX: slow ! */
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
tlb_flush(env, 1);
|
||||
|
14
gdbstub.c
14
gdbstub.c
@ -648,7 +648,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
||||
switch (type) {
|
||||
case GDB_BREAKPOINT_SW:
|
||||
case GDB_BREAKPOINT_HW:
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
|
||||
if (err)
|
||||
@ -659,7 +659,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
|
||||
case GDB_WATCHPOINT_WRITE:
|
||||
case GDB_WATCHPOINT_READ:
|
||||
case GDB_WATCHPOINT_ACCESS:
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
|
||||
NULL);
|
||||
@ -686,7 +686,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
||||
switch (type) {
|
||||
case GDB_BREAKPOINT_SW:
|
||||
case GDB_BREAKPOINT_HW:
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_breakpoint_remove(env, addr, BP_GDB);
|
||||
if (err)
|
||||
@ -697,7 +697,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
|
||||
case GDB_WATCHPOINT_WRITE:
|
||||
case GDB_WATCHPOINT_READ:
|
||||
case GDB_WATCHPOINT_ACCESS:
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
env = cpu->env_ptr;
|
||||
err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
|
||||
if (err)
|
||||
@ -720,7 +720,7 @@ static void gdb_breakpoint_remove_all(void)
|
||||
return;
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
env = cpu->env_ptr;
|
||||
cpu_breakpoint_remove_all(env, BP_GDB);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -744,7 +744,7 @@ static CPUState *find_cpu(uint32_t thread_id)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu_index(cpu) == thread_id) {
|
||||
return cpu;
|
||||
}
|
||||
@ -1070,7 +1070,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
|
||||
if (s->query_cpu) {
|
||||
snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
|
||||
put_packet(s, buf);
|
||||
s->query_cpu = s->query_cpu->next_cpu;
|
||||
s->query_cpu = CPU_NEXT(s->query_cpu);
|
||||
} else
|
||||
put_packet(s, "l");
|
||||
break;
|
||||
|
@ -667,22 +667,14 @@ static void piix4_cpu_added_req(Notifier *n, void *opaque)
|
||||
piix4_cpu_hotplug_req(s, CPU(opaque), PLUG);
|
||||
}
|
||||
|
||||
static void piix4_init_cpu_status(CPUState *cpu, void *data)
|
||||
{
|
||||
CPUStatus *g = (CPUStatus *)data;
|
||||
CPUClass *k = CPU_GET_CLASS(cpu);
|
||||
int64_t id = k->get_arch_id(cpu);
|
||||
|
||||
g_assert((id / 8) < PIIX4_PROC_LEN);
|
||||
g->sts[id / 8] |= (1 << (id % 8));
|
||||
}
|
||||
|
||||
static int piix4_device_hotplug(DeviceState *qdev, PCIDevice *dev,
|
||||
PCIHotplugState state);
|
||||
|
||||
static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
|
||||
PCIBus *bus, PIIX4PMState *s)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
memory_region_init_io(&s->io_gpe, OBJECT(s), &piix4_gpe_ops, s,
|
||||
"acpi-gpe0", GPE_LEN);
|
||||
memory_region_add_subregion(parent, GPE_BASE, &s->io_gpe);
|
||||
@ -693,7 +685,13 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
|
||||
&s->io_pci);
|
||||
pci_bus_hotplug(bus, piix4_device_hotplug, DEVICE(s));
|
||||
|
||||
qemu_for_each_cpu(piix4_init_cpu_status, &s->gpe_cpu);
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
int64_t id = cc->get_arch_id(cpu);
|
||||
|
||||
g_assert((id / 8) < PIIX4_PROC_LEN);
|
||||
s->gpe_cpu.sts[id / 8] |= (1 << (id % 8));
|
||||
}
|
||||
memory_region_init_io(&s->io_cpu, OBJECT(s), &cpu_hotplug_ops, s,
|
||||
"acpi-cpu-hotplug", PIIX4_PROC_LEN);
|
||||
memory_region_add_subregion(parent, PIIX4_PROC_BASE, &s->io_cpu);
|
||||
|
@ -468,7 +468,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
|
||||
}
|
||||
info->is_linux = is_linux;
|
||||
|
||||
for (; cs; cs = cs->next_cpu) {
|
||||
for (; cs; cs = CPU_NEXT(cs)) {
|
||||
cpu = ARM_CPU(cs);
|
||||
cpu->env.boot_info = info;
|
||||
qemu_register_reset(do_cpu_reset, cpu);
|
||||
|
@ -50,7 +50,6 @@ static int a15mp_priv_init(SysBusDevice *dev)
|
||||
SysBusDevice *busdev;
|
||||
const char *gictype = "arm_gic";
|
||||
int i;
|
||||
CPUState *cpu;
|
||||
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
gictype = "kvm-arm-gic";
|
||||
@ -72,8 +71,8 @@ static int a15mp_priv_init(SysBusDevice *dev)
|
||||
/* Wire the outputs from each CPU's generic timer to the
|
||||
* appropriate GIC PPI inputs
|
||||
*/
|
||||
for (i = 0, cpu = first_cpu; i < s->num_cpu; i++, cpu = cpu->next_cpu) {
|
||||
DeviceState *cpudev = DEVICE(cpu);
|
||||
for (i = 0; i < s->num_cpu; i++) {
|
||||
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
|
||||
int ppibase = s->num_irq - 32 + i * 32;
|
||||
/* physical timer; we wire it up to the non-secure timer's ID,
|
||||
* since a real A15 always has TrustZone but QEMU doesn't.
|
||||
|
@ -59,7 +59,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
||||
if (!cap_clock_ctrl) {
|
||||
return;
|
||||
}
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0);
|
||||
if (ret) {
|
||||
if (ret != -EINVAL) {
|
||||
|
@ -498,7 +498,7 @@ static void vapic_enable_tpr_reporting(bool enable)
|
||||
X86CPU *cpu;
|
||||
CPUX86State *env;
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
info.apic = env->apic_state;
|
||||
|
@ -191,13 +191,12 @@ static void pic_irq_request(void *opaque, int irq, int level)
|
||||
|
||||
DPRINTF("pic_irqs: %s irq %d\n", level? "raise" : "lower", irq);
|
||||
if (env->apic_state) {
|
||||
while (cs) {
|
||||
CPU_FOREACH(cs) {
|
||||
cpu = X86_CPU(cs);
|
||||
env = &cpu->env;
|
||||
if (apic_accept_pic_intr(env->apic_state)) {
|
||||
apic_deliver_pic_intr(env->apic_state, level);
|
||||
}
|
||||
cs = cs->next_cpu;
|
||||
}
|
||||
} else {
|
||||
if (level) {
|
||||
|
@ -540,7 +540,7 @@ static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
if (kvm_openpic_connect_vcpu(dev, cs)) {
|
||||
fprintf(stderr, "%s: failed to connect vcpu to irqchip\n",
|
||||
__func__);
|
||||
|
@ -443,7 +443,7 @@ void ppce500_set_mpic_proxy(bool enabled)
|
||||
{
|
||||
CPUState *cs;
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
cpu->env.mpic_proxy = enabled;
|
||||
|
@ -187,7 +187,7 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
|
||||
|
||||
assert(spapr->cpu_model);
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
uint32_t associativity[] = {cpu_to_be32(0x5),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(0x0),
|
||||
@ -351,7 +351,7 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
||||
/* This is needed during FDT finalization */
|
||||
spapr->cpu_model = g_strdup(modelname);
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
|
||||
|
@ -679,7 +679,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
|
||||
switch (mflags) {
|
||||
case H_SET_MODE_ENDIAN_BIG:
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cp = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cp->env;
|
||||
env->spr[SPR_LPCR] &= ~LPCR_ILE;
|
||||
@ -688,7 +688,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
break;
|
||||
|
||||
case H_SET_MODE_ENDIAN_LITTLE:
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cp = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cp->env;
|
||||
env->spr[SPR_LPCR] |= LPCR_ILE;
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <signal.h>
|
||||
#include "hw/qdev-core.h"
|
||||
#include "exec/hwaddr.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/tls.h"
|
||||
#include "qemu/typedefs.h"
|
||||
@ -190,7 +191,7 @@ struct CPUState {
|
||||
struct GDBRegisterState *gdb_regs;
|
||||
int gdb_num_regs;
|
||||
int gdb_num_g_regs;
|
||||
CPUState *next_cpu;
|
||||
QTAILQ_ENTRY(CPUState) node;
|
||||
|
||||
int kvm_fd;
|
||||
bool kvm_vcpu_dirty;
|
||||
@ -202,7 +203,13 @@ struct CPUState {
|
||||
uint32_t halted; /* used by alpha, cris, ppc TCG */
|
||||
};
|
||||
|
||||
extern CPUState *first_cpu;
|
||||
QTAILQ_HEAD(CPUTailQ, CPUState);
|
||||
extern struct CPUTailQ cpus;
|
||||
#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
|
||||
#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
|
||||
#define CPU_FOREACH_SAFE(cpu, next_cpu) \
|
||||
QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
|
||||
#define first_cpu QTAILQ_FIRST(&cpus)
|
||||
|
||||
DECLARE_TLS(CPUState *, current_cpu);
|
||||
#define current_cpu tls_var(current_cpu)
|
||||
@ -395,15 +402,6 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
|
||||
*/
|
||||
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
|
||||
|
||||
/**
|
||||
* qemu_for_each_cpu:
|
||||
* @func: The function to be executed.
|
||||
* @data: Data to pass to the function.
|
||||
*
|
||||
* Executes @func for each CPU.
|
||||
*/
|
||||
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data);
|
||||
|
||||
/**
|
||||
* qemu_get_cpu:
|
||||
* @index: The CPUState@cpu_index value of the CPU to obtain.
|
||||
|
@ -1925,7 +1925,7 @@ int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
|
||||
}
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
err = kvm_update_guest_debug(cpu, 0);
|
||||
if (err) {
|
||||
return err;
|
||||
@ -1965,7 +1965,7 @@ int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
|
||||
}
|
||||
}
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
err = kvm_update_guest_debug(cpu, 0);
|
||||
if (err) {
|
||||
return err;
|
||||
@ -1982,7 +1982,7 @@ void kvm_remove_all_breakpoints(CPUState *cpu)
|
||||
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
|
||||
if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
|
||||
/* Try harder to find a CPU that currently sees the breakpoint. */
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
|
||||
break;
|
||||
}
|
||||
@ -1993,7 +1993,7 @@ void kvm_remove_all_breakpoints(CPUState *cpu)
|
||||
}
|
||||
kvm_arch_remove_all_hw_breakpoints();
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
kvm_update_guest_debug(cpu, 0);
|
||||
}
|
||||
}
|
||||
|
@ -2668,7 +2668,7 @@ static int fill_note_info(struct elf_note_info *info,
|
||||
|
||||
/* read and fill status of all threads */
|
||||
cpu_list_lock();
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu == thread_cpu) {
|
||||
continue;
|
||||
}
|
||||
|
@ -117,10 +117,14 @@ void fork_end(int child)
|
||||
{
|
||||
mmap_fork_end(child);
|
||||
if (child) {
|
||||
CPUState *cpu, *next_cpu;
|
||||
/* Child processes created by fork() only have a single thread.
|
||||
Discard information about the parent threads. */
|
||||
first_cpu = thread_cpu;
|
||||
first_cpu->next_cpu = NULL;
|
||||
CPU_FOREACH_SAFE(cpu, next_cpu) {
|
||||
if (cpu != thread_cpu) {
|
||||
QTAILQ_REMOVE(&cpus, thread_cpu, node);
|
||||
}
|
||||
}
|
||||
pending_cpus = 0;
|
||||
pthread_mutex_init(&exclusive_lock, NULL);
|
||||
pthread_mutex_init(&cpu_list_mutex, NULL);
|
||||
@ -154,7 +158,7 @@ static inline void start_exclusive(void)
|
||||
|
||||
pending_cpus = 1;
|
||||
/* Make all other cpus stop executing. */
|
||||
for (other_cpu = first_cpu; other_cpu; other_cpu = other_cpu->next_cpu) {
|
||||
CPU_FOREACH(other_cpu) {
|
||||
if (other_cpu->running) {
|
||||
pending_cpus++;
|
||||
cpu_exit(other_cpu);
|
||||
|
@ -5113,25 +5113,12 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||
Do thread termination if we have more then one thread. */
|
||||
/* FIXME: This probably breaks if a signal arrives. We should probably
|
||||
be disabling signals. */
|
||||
if (first_cpu->next_cpu) {
|
||||
if (CPU_NEXT(first_cpu)) {
|
||||
TaskState *ts;
|
||||
CPUState **lastp;
|
||||
CPUState *p;
|
||||
|
||||
cpu_list_lock();
|
||||
lastp = &first_cpu;
|
||||
p = first_cpu;
|
||||
while (p && p != cpu) {
|
||||
lastp = &p->next_cpu;
|
||||
p = p->next_cpu;
|
||||
}
|
||||
/* If we didn't find the CPU for this thread then something is
|
||||
horribly wrong. */
|
||||
if (!p) {
|
||||
abort();
|
||||
}
|
||||
/* Remove the CPU from the list. */
|
||||
*lastp = p->next_cpu;
|
||||
QTAILQ_REMOVE(&cpus, cpu, node);
|
||||
cpu_list_unlock();
|
||||
ts = ((CPUArchState *)cpu_env)->opaque;
|
||||
if (ts->child_tidptr) {
|
||||
|
@ -270,7 +270,7 @@ static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
for (cpu = start_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu_paging_enabled(cpu)) {
|
||||
return cpu;
|
||||
}
|
||||
@ -289,7 +289,8 @@ void qemu_get_guest_memory_mapping(MemoryMappingList *list,
|
||||
|
||||
first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
|
||||
if (first_paging_enabled_cpu) {
|
||||
for (cpu = first_paging_enabled_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
for (cpu = first_paging_enabled_cpu; cpu != NULL;
|
||||
cpu = CPU_NEXT(cpu)) {
|
||||
Error *err = NULL;
|
||||
cpu_get_memory_mapping(cpu, list, &err);
|
||||
if (err) {
|
||||
|
@ -2003,7 +2003,7 @@ static void do_info_numa(Monitor *mon, const QDict *qdict)
|
||||
monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
monitor_printf(mon, "node %d cpus:", i);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu->numa_node == i) {
|
||||
monitor_printf(mon, " %d", cpu->cpu_index);
|
||||
}
|
||||
|
30
qom/cpu.c
30
qom/cpu.c
@ -25,30 +25,18 @@
|
||||
#include "qemu/log.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
typedef struct CPUExistsArgs {
|
||||
int64_t id;
|
||||
bool found;
|
||||
} CPUExistsArgs;
|
||||
|
||||
static void cpu_exist_cb(CPUState *cpu, void *data)
|
||||
{
|
||||
CPUClass *klass = CPU_GET_CLASS(cpu);
|
||||
CPUExistsArgs *arg = data;
|
||||
|
||||
if (klass->get_arch_id(cpu) == arg->id) {
|
||||
arg->found = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool cpu_exists(int64_t id)
|
||||
{
|
||||
CPUExistsArgs data = {
|
||||
.id = id,
|
||||
.found = false,
|
||||
};
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_for_each_cpu(cpu_exist_cb, &data);
|
||||
return data.found;
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->get_arch_id(cpu) == id) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool cpu_paging_enabled(const CPUState *cpu)
|
||||
|
@ -48,6 +48,118 @@
|
||||
#include "hw/i386/apic_internal.h"
|
||||
#endif
|
||||
|
||||
|
||||
/* Cache topology CPUID constants: */
|
||||
|
||||
/* CPUID Leaf 2 Descriptors */
|
||||
|
||||
#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
|
||||
#define CPUID_2_L1I_32KB_8WAY_64B 0x30
|
||||
#define CPUID_2_L2_2MB_8WAY_64B 0x7d
|
||||
|
||||
|
||||
/* CPUID Leaf 4 constants: */
|
||||
|
||||
/* EAX: */
|
||||
#define CPUID_4_TYPE_DCACHE 1
|
||||
#define CPUID_4_TYPE_ICACHE 2
|
||||
#define CPUID_4_TYPE_UNIFIED 3
|
||||
|
||||
#define CPUID_4_LEVEL(l) ((l) << 5)
|
||||
|
||||
#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
|
||||
#define CPUID_4_FULLY_ASSOC (1 << 9)
|
||||
|
||||
/* EDX: */
|
||||
#define CPUID_4_NO_INVD_SHARING (1 << 0)
|
||||
#define CPUID_4_INCLUSIVE (1 << 1)
|
||||
#define CPUID_4_COMPLEX_IDX (1 << 2)
|
||||
|
||||
#define ASSOC_FULL 0xFF
|
||||
|
||||
/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
|
||||
#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
|
||||
a == 2 ? 0x2 : \
|
||||
a == 4 ? 0x4 : \
|
||||
a == 8 ? 0x6 : \
|
||||
a == 16 ? 0x8 : \
|
||||
a == 32 ? 0xA : \
|
||||
a == 48 ? 0xB : \
|
||||
a == 64 ? 0xC : \
|
||||
a == 96 ? 0xD : \
|
||||
a == 128 ? 0xE : \
|
||||
a == ASSOC_FULL ? 0xF : \
|
||||
0 /* invalid value */)
|
||||
|
||||
|
||||
/* Definitions of the hardcoded cache entries we expose: */
|
||||
|
||||
/* L1 data cache: */
|
||||
#define L1D_LINE_SIZE 64
|
||||
#define L1D_ASSOCIATIVITY 8
|
||||
#define L1D_SETS 64
|
||||
#define L1D_PARTITIONS 1
|
||||
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
|
||||
#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
|
||||
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
|
||||
#define L1D_LINES_PER_TAG 1
|
||||
#define L1D_SIZE_KB_AMD 64
|
||||
#define L1D_ASSOCIATIVITY_AMD 2
|
||||
|
||||
/* L1 instruction cache: */
|
||||
#define L1I_LINE_SIZE 64
|
||||
#define L1I_ASSOCIATIVITY 8
|
||||
#define L1I_SETS 64
|
||||
#define L1I_PARTITIONS 1
|
||||
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
|
||||
#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
|
||||
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
|
||||
#define L1I_LINES_PER_TAG 1
|
||||
#define L1I_SIZE_KB_AMD 64
|
||||
#define L1I_ASSOCIATIVITY_AMD 2
|
||||
|
||||
/* Level 2 unified cache: */
|
||||
#define L2_LINE_SIZE 64
|
||||
#define L2_ASSOCIATIVITY 16
|
||||
#define L2_SETS 4096
|
||||
#define L2_PARTITIONS 1
|
||||
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
|
||||
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
|
||||
#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
|
||||
/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
|
||||
#define L2_LINES_PER_TAG 1
|
||||
#define L2_SIZE_KB_AMD 512
|
||||
|
||||
/* No L3 cache: */
|
||||
#define L3_SIZE_KB 0 /* disabled */
|
||||
#define L3_ASSOCIATIVITY 0 /* disabled */
|
||||
#define L3_LINES_PER_TAG 0 /* disabled */
|
||||
#define L3_LINE_SIZE 0 /* disabled */
|
||||
|
||||
/* TLB definitions: */
|
||||
|
||||
#define L1_DTLB_2M_ASSOC 1
|
||||
#define L1_DTLB_2M_ENTRIES 255
|
||||
#define L1_DTLB_4K_ASSOC 1
|
||||
#define L1_DTLB_4K_ENTRIES 255
|
||||
|
||||
#define L1_ITLB_2M_ASSOC 1
|
||||
#define L1_ITLB_2M_ENTRIES 255
|
||||
#define L1_ITLB_4K_ASSOC 1
|
||||
#define L1_ITLB_4K_ENTRIES 255
|
||||
|
||||
#define L2_DTLB_2M_ASSOC 0 /* disabled */
|
||||
#define L2_DTLB_2M_ENTRIES 0 /* disabled */
|
||||
#define L2_DTLB_4K_ASSOC 4
|
||||
#define L2_DTLB_4K_ENTRIES 512
|
||||
|
||||
#define L2_ITLB_2M_ASSOC 0 /* disabled */
|
||||
#define L2_ITLB_2M_ENTRIES 0 /* disabled */
|
||||
#define L2_ITLB_4K_ASSOC 4
|
||||
#define L2_ITLB_4K_ENTRIES 512
|
||||
|
||||
|
||||
|
||||
static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
|
||||
uint32_t vendor2, uint32_t vendor3)
|
||||
{
|
||||
@ -1950,10 +2062,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 2:
|
||||
/* cache info: needed for Pentium Pro compatibility */
|
||||
*eax = 1;
|
||||
*eax = 1; /* Number of CPUID[EAX=2] calls required */
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0x2c307d;
|
||||
*edx = (L1D_DESCRIPTOR << 16) | \
|
||||
(L1I_DESCRIPTOR << 8) | \
|
||||
(L2_DESCRIPTOR);
|
||||
break;
|
||||
case 4:
|
||||
/* cache info: needed for Core compatibility */
|
||||
@ -1964,25 +2078,37 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
}
|
||||
switch (count) {
|
||||
case 0: /* L1 dcache info */
|
||||
*eax |= 0x0000121;
|
||||
*ebx = 0x1c0003f;
|
||||
*ecx = 0x000003f;
|
||||
*edx = 0x0000001;
|
||||
*eax |= CPUID_4_TYPE_DCACHE | \
|
||||
CPUID_4_LEVEL(1) | \
|
||||
CPUID_4_SELF_INIT_LEVEL;
|
||||
*ebx = (L1D_LINE_SIZE - 1) | \
|
||||
((L1D_PARTITIONS - 1) << 12) | \
|
||||
((L1D_ASSOCIATIVITY - 1) << 22);
|
||||
*ecx = L1D_SETS - 1;
|
||||
*edx = CPUID_4_NO_INVD_SHARING;
|
||||
break;
|
||||
case 1: /* L1 icache info */
|
||||
*eax |= 0x0000122;
|
||||
*ebx = 0x1c0003f;
|
||||
*ecx = 0x000003f;
|
||||
*edx = 0x0000001;
|
||||
*eax |= CPUID_4_TYPE_ICACHE | \
|
||||
CPUID_4_LEVEL(1) | \
|
||||
CPUID_4_SELF_INIT_LEVEL;
|
||||
*ebx = (L1I_LINE_SIZE - 1) | \
|
||||
((L1I_PARTITIONS - 1) << 12) | \
|
||||
((L1I_ASSOCIATIVITY - 1) << 22);
|
||||
*ecx = L1I_SETS - 1;
|
||||
*edx = CPUID_4_NO_INVD_SHARING;
|
||||
break;
|
||||
case 2: /* L2 cache info */
|
||||
*eax |= 0x0000143;
|
||||
*eax |= CPUID_4_TYPE_UNIFIED | \
|
||||
CPUID_4_LEVEL(2) | \
|
||||
CPUID_4_SELF_INIT_LEVEL;
|
||||
if (cs->nr_threads > 1) {
|
||||
*eax |= (cs->nr_threads - 1) << 14;
|
||||
}
|
||||
*ebx = 0x3c0003f;
|
||||
*ecx = 0x0000fff;
|
||||
*edx = 0x0000001;
|
||||
*ebx = (L2_LINE_SIZE - 1) | \
|
||||
((L2_PARTITIONS - 1) << 12) | \
|
||||
((L2_ASSOCIATIVITY - 1) << 22);
|
||||
*ecx = L2_SETS - 1;
|
||||
*edx = CPUID_4_NO_INVD_SHARING;
|
||||
break;
|
||||
default: /* end of info */
|
||||
*eax = 0;
|
||||
@ -2102,17 +2228,31 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 0x80000005:
|
||||
/* cache info (L1 cache) */
|
||||
*eax = 0x01ff01ff;
|
||||
*ebx = 0x01ff01ff;
|
||||
*ecx = 0x40020140;
|
||||
*edx = 0x40020140;
|
||||
*eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
|
||||
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
|
||||
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
|
||||
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
|
||||
*ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
|
||||
(L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
|
||||
*edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
|
||||
(L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
|
||||
break;
|
||||
case 0x80000006:
|
||||
/* cache info (L2 cache) */
|
||||
*eax = 0;
|
||||
*ebx = 0x42004200;
|
||||
*ecx = 0x02008140;
|
||||
*edx = 0;
|
||||
*eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
|
||||
(L2_DTLB_2M_ENTRIES << 16) | \
|
||||
(AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
|
||||
(L2_ITLB_2M_ENTRIES);
|
||||
*ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
|
||||
(L2_DTLB_4K_ENTRIES << 16) | \
|
||||
(AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
|
||||
(L2_ITLB_4K_ENTRIES);
|
||||
*ecx = (L2_SIZE_KB_AMD << 16) | \
|
||||
(AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
|
||||
(L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
|
||||
*edx = ((L3_SIZE_KB/512) << 18) | \
|
||||
(AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
|
||||
(L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
|
||||
break;
|
||||
case 0x80000008:
|
||||
/* virtual & phys address size in low 2 bytes. */
|
||||
|
@ -1231,8 +1231,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
||||
params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
|
||||
params.addr = 0;
|
||||
params.misc = 0;
|
||||
for (other_cs = first_cpu; other_cs != NULL;
|
||||
other_cs = other_cs->next_cpu) {
|
||||
CPU_FOREACH(other_cs) {
|
||||
if (other_cs == cs) {
|
||||
continue;
|
||||
}
|
||||
|
@ -610,7 +610,7 @@ void helper_mwait(CPUX86State *env, int next_eip_addend)
|
||||
cpu = x86_env_get_cpu(env);
|
||||
cs = CPU(cpu);
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
if (cs->cpu_index != 0 || cs->next_cpu != NULL) {
|
||||
if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
|
||||
/* more than one CPU: do not sleep because another CPU may
|
||||
wake this one */
|
||||
} else {
|
||||
|
@ -1699,15 +1699,14 @@ target_ulong helper_dvpe(CPUMIPSState *env)
|
||||
CPUState *other_cs = first_cpu;
|
||||
target_ulong prev = env->mvp->CP0_MVPControl;
|
||||
|
||||
do {
|
||||
CPU_FOREACH(other_cs) {
|
||||
MIPSCPU *other_cpu = MIPS_CPU(other_cs);
|
||||
/* Turn off all VPEs except the one executing the dvpe. */
|
||||
if (&other_cpu->env != env) {
|
||||
other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
|
||||
mips_vpe_sleep(other_cpu);
|
||||
}
|
||||
other_cs = other_cs->next_cpu;
|
||||
} while (other_cs);
|
||||
}
|
||||
return prev;
|
||||
}
|
||||
|
||||
@ -1716,7 +1715,7 @@ target_ulong helper_evpe(CPUMIPSState *env)
|
||||
CPUState *other_cs = first_cpu;
|
||||
target_ulong prev = env->mvp->CP0_MVPControl;
|
||||
|
||||
do {
|
||||
CPU_FOREACH(other_cs) {
|
||||
MIPSCPU *other_cpu = MIPS_CPU(other_cs);
|
||||
|
||||
if (&other_cpu->env != env
|
||||
@ -1726,8 +1725,7 @@ target_ulong helper_evpe(CPUMIPSState *env)
|
||||
other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
|
||||
mips_vpe_wake(other_cpu); /* And wake it up. */
|
||||
}
|
||||
other_cs = other_cs->next_cpu;
|
||||
} while (other_cs);
|
||||
}
|
||||
return prev;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -1002,7 +1002,7 @@ void helper_msgsnd(target_ulong rb)
|
||||
return;
|
||||
}
|
||||
|
||||
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *cenv = &cpu->env;
|
||||
|
||||
|
@ -183,12 +183,12 @@ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static void cpu_reset_all(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
CPUState *cs;
|
||||
S390CPUClass *scc;
|
||||
|
||||
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
|
||||
scc = S390_CPU_GET_CLASS(CPU(cpu));
|
||||
scc->cpu_reset(CPU(cpu));
|
||||
CPU_FOREACH(cs) {
|
||||
scc = S390_CPU_GET_CLASS(cs);
|
||||
scc->cpu_reset(cs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -696,7 +696,7 @@ void tb_flush(CPUArchState *env1)
|
||||
}
|
||||
tcg_ctx.tb_ctx.nb_tbs = 0;
|
||||
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
|
||||
@ -850,7 +850,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
if (env->tb_jmp_cache[h] == tb) {
|
||||
|
Loading…
Reference in New Issue
Block a user