mirror of
https://github.com/qemu/qemu.git
synced 2024-12-15 15:33:29 +08:00
spapr: Don't clamp RMA to 16GiB on new machine types
In spapr_machine_init() we clamp the size of the RMA to 16GiB and the comment saying why doesn't make a whole lot of sense. In fact, this was done because the real mode handling code elsewhere limited the RMA in TCG mode to the maximum value configurable in LPCR[RMLS], 16GiB. But, * Actually LPCR[RMLS] has been able to encode a 256GiB size for a very long time, we just didn't implement it properly in the softmmu * LPCR[RMLS] shouldn't really be relevant anyway, it only was because we used to abuse the RMOR based translation mode in order to handle the fact that we're not modelling the hypervisor parts of the cpu We've now removed those limitations in the modelling so the 16GiB clamp no longer serves a function. However, we can't just remove the limit universally: that would break migration to earlier qemu versions, where the 16GiB RMLS limit still applies, no matter how bad the reasons for it are. So, we replace the 16GiB clamp, with a clamp to a limit defined in the machine type class. We set it to 16 GiB for machine types 4.2 and earlier, but set it to 0 meaning unlimited for the new 5.0 machine type. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Greg Kurz <groug@kaod.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
This commit is contained in:
parent
8897ea5a9f
commit
1052ab67f4
@ -2701,12 +2701,14 @@ static void spapr_machine_init(MachineState *machine)
|
||||
|
||||
spapr->rma_size = node0_size;
|
||||
|
||||
/* Actually we don't support unbounded RMA anymore since we added
|
||||
* proper emulation of HV mode. The max we can get is 16G which
|
||||
* also happens to be what we configure for PAPR mode so make sure
|
||||
* we don't do anything bigger than that
|
||||
/*
|
||||
* Clamp the RMA size based on machine type. This is for
|
||||
* migration compatibility with older qemu versions, which limited
|
||||
* the RMA size for complicated and mostly bad reasons.
|
||||
*/
|
||||
spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
|
||||
if (smc->rma_limit) {
|
||||
spapr->rma_size = MIN(spapr->rma_size, smc->rma_limit);
|
||||
}
|
||||
|
||||
if (spapr->rma_size > node0_size) {
|
||||
error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
|
||||
@ -4598,6 +4600,7 @@ static void spapr_machine_4_2_class_options(MachineClass *mc)
|
||||
compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
|
||||
smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF;
|
||||
smc->default_caps.caps[SPAPR_CAP_FWNMI_MCE] = SPAPR_CAP_OFF;
|
||||
smc->rma_limit = 16 * GiB;
|
||||
mc->nvdimm_supported = false;
|
||||
}
|
||||
|
||||
|
@ -126,6 +126,7 @@ struct SpaprMachineClass {
|
||||
bool pre_4_1_migration; /* don't migrate hpt-max-page-size */
|
||||
bool linux_pci_probe;
|
||||
bool smp_threads_vsmt; /* set VSMT to smp_threads by default */
|
||||
hwaddr rma_limit; /* clamp the RMA to this size */
|
||||
|
||||
void (*phb_placement)(SpaprMachineState *spapr, uint32_t index,
|
||||
uint64_t *buid, hwaddr *pio,
|
||||
|
Loading…
Reference in New Issue
Block a user