2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2017-12-02 00:47:08 +08:00
|
|
|
/*
|
|
|
|
* drmem.h: Power specific logical memory block representation
|
|
|
|
*
|
|
|
|
* Copyright 2017 IBM Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_POWERPC_LMB_H
|
|
|
|
#define _ASM_POWERPC_LMB_H
|
|
|
|
|
2020-08-13 23:11:31 +08:00
|
|
|
#include <linux/sched.h>
|
|
|
|
|
2017-12-02 00:47:08 +08:00
|
|
|
struct drmem_lmb {
|
|
|
|
u64 base_addr;
|
|
|
|
u32 drc_index;
|
|
|
|
u32 aa_index;
|
|
|
|
u32 flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct drmem_lmb_info {
|
|
|
|
struct drmem_lmb *lmbs;
|
|
|
|
int n_lmbs;
|
2020-10-07 19:48:33 +08:00
|
|
|
u64 lmb_size;
|
2017-12-02 00:47:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
extern struct drmem_lmb_info *drmem_info;
|
|
|
|
|
2020-08-13 23:11:31 +08:00
|
|
|
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
|
|
|
|
const struct drmem_lmb *start)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* DLPAR code paths can take several milliseconds per element
|
|
|
|
* when interacting with firmware. Ensure that we don't
|
|
|
|
* unfairly monopolize the CPU.
|
|
|
|
*/
|
|
|
|
if (((++lmb - start) % 16) == 0)
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
return lmb;
|
|
|
|
}
|
|
|
|
|
2017-12-02 00:47:08 +08:00
|
|
|
#define for_each_drmem_lmb_in_range(lmb, start, end) \
|
2020-08-13 23:11:31 +08:00
|
|
|
for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
|
2017-12-02 00:47:08 +08:00
|
|
|
|
|
|
|
#define for_each_drmem_lmb(lmb) \
|
|
|
|
for_each_drmem_lmb_in_range((lmb), \
|
|
|
|
&drmem_info->lmbs[0], \
|
powerpc/pseries: Avoid NULL pointer dereference when drmem is unavailable
In guests without hotplugagble memory drmem structure is only zero
initialized. Trying to manipulate DLPAR parameters results in a crash.
$ echo "memory add count 1" > /sys/kernel/dlpar
Oops: Kernel access of bad area, sig: 11 [#1]
LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
...
NIP: c0000000000ff294 LR: c0000000000ff248 CTR: 0000000000000000
REGS: c0000000fb9d3880 TRAP: 0300 Tainted: G E (5.5.0-rc6-2-default)
MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 28242428 XER: 20000000
CFAR: c0000000009a6c10 DAR: 0000000000000010 DSISR: 40000000 IRQMASK: 0
...
NIP dlpar_memory+0x6e4/0xd00
LR dlpar_memory+0x698/0xd00
Call Trace:
dlpar_memory+0x698/0xd00 (unreliable)
handle_dlpar_errorlog+0xc0/0x190
dlpar_store+0x198/0x4a0
kobj_attr_store+0x30/0x50
sysfs_kf_write+0x64/0x90
kernfs_fop_write+0x1b0/0x290
__vfs_write+0x3c/0x70
vfs_write+0xd0/0x260
ksys_write+0xdc/0x130
system_call+0x5c/0x68
Taking closer look at the code, I can see that for_each_drmem_lmb is a
macro expanding into `for (lmb = &drmem_info->lmbs[0]; lmb <=
&drmem_info->lmbs[drmem_info->n_lmbs - 1]; lmb++)`. When drmem_info->lmbs
is NULL, the loop would iterate through the whole address range if it
weren't stopped by the NULL pointer dereference on the next line.
This patch aligns for_each_drmem_lmb and for_each_drmem_lmb_in_range
macro behavior with the common C semantics, where the end marker does
not belong to the scanned range, and alters get_lmb_range() semantics.
As a side effect, the wraparound observed in the crash is prevented.
Fixes: 6c6ea53725b3 ("powerpc/mm: Separate ibm, dynamic-memory data from DT format")
Cc: stable@vger.kernel.org # v4.16+
Signed-off-by: Libor Pechacek <lpechacek@suse.cz>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200131132829.10281-1-msuchanek@suse.de
2020-01-31 21:28:29 +08:00
|
|
|
&drmem_info->lmbs[drmem_info->n_lmbs])
|
2017-12-02 00:47:08 +08:00
|
|
|
|
2017-12-02 00:47:42 +08:00
|
|
|
/*
|
|
|
|
* The of_drconf_cell_v1 struct defines the layout of the LMB data
|
|
|
|
* specified in the ibm,dynamic-memory device tree property.
|
|
|
|
* The property itself is a 32-bit value specifying the number of
|
|
|
|
* LMBs followed by an array of of_drconf_cell_v1 entries, one
|
|
|
|
* per LMB.
|
|
|
|
*/
|
|
|
|
struct of_drconf_cell_v1 {
|
|
|
|
__be64 base_addr;
|
|
|
|
__be32 drc_index;
|
|
|
|
__be32 reserved;
|
|
|
|
__be32 aa_index;
|
|
|
|
__be32 flags;
|
|
|
|
};
|
|
|
|
|
2017-12-02 00:47:53 +08:00
|
|
|
/*
|
|
|
|
* Version 2 of the ibm,dynamic-memory property is defined as a
|
|
|
|
* 32-bit value specifying the number of LMB sets followed by an
|
|
|
|
* array of of_drconf_cell_v2 entries, one per LMB set.
|
|
|
|
*/
|
|
|
|
struct of_drconf_cell_v2 {
|
|
|
|
u32 seq_lmbs;
|
|
|
|
u64 base_addr;
|
|
|
|
u32 drc_index;
|
|
|
|
u32 aa_index;
|
|
|
|
u32 flags;
|
|
|
|
} __packed;
|
|
|
|
|
2017-12-02 00:47:42 +08:00
|
|
|
#define DRCONF_MEM_ASSIGNED 0x00000008
|
|
|
|
#define DRCONF_MEM_AI_INVALID 0x00000040
|
|
|
|
#define DRCONF_MEM_RESERVED 0x00000080
|
powerpc/kernel: Enables memory hot-remove after reboot on pseries guests
While providing guests, it's desirable to resize it's memory on demand.
By now, it's possible to do so by creating a guest with a small base
memory, hot-plugging all the rest, and using 'movable_node' kernel
command-line parameter, which puts all hot-plugged memory in
ZONE_MOVABLE, allowing it to be removed whenever needed.
But there is an issue regarding guest reboot:
If memory is hot-plugged, and then the guest is rebooted, all hot-plugged
memory goes to ZONE_NORMAL, which offers no guaranteed hot-removal.
It usually prevents this memory to be hot-removed from the guest.
It's possible to use device-tree information to fix that behavior, as
it stores flags for LMB ranges on ibm,dynamic-memory-vN.
It involves marking each memblock with the correct flags as hotpluggable
memory, which mm/memblock.c puts in ZONE_MOVABLE during boot if
'movable_node' is passed.
For carrying such information, the new flag DRCONF_MEM_HOTREMOVABLE was
proposed and accepted into Power Architecture documentation.
This flag should be:
- true (b=1) if the hypervisor may want to hot-remove it later, and
- false (b=0) if it does not care.
During boot, guest kernel reads the device-tree, early_init_drmem_lmb()
is called for every added LMBs. Here, checking for this new flag and
marking memblocks as hotplugable memory is enough to get the desirable
behavior.
This should cause no change if 'movable_node' parameter is not passed
in kernel command-line.
Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com>
Reviewed-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200402195156.626430-1-leonardo@linux.ibm.com
2020-04-03 03:51:57 +08:00
|
|
|
#define DRCONF_MEM_HOTREMOVABLE 0x00000100
|
2017-12-02 00:47:42 +08:00
|
|
|
|
2020-10-07 19:48:33 +08:00
|
|
|
static inline u64 drmem_lmb_size(void)
|
2017-12-02 00:47:08 +08:00
|
|
|
{
|
|
|
|
return drmem_info->lmb_size;
|
|
|
|
}
|
|
|
|
|
2017-12-02 00:47:31 +08:00
|
|
|
#define DRMEM_LMB_RESERVED 0x80000000
|
|
|
|
|
|
|
|
static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
|
|
|
|
{
|
|
|
|
lmb->flags |= DRMEM_LMB_RESERVED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
|
|
|
|
{
|
|
|
|
lmb->flags &= ~DRMEM_LMB_RESERVED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
|
|
|
|
{
|
|
|
|
return lmb->flags & DRMEM_LMB_RESERVED;
|
|
|
|
}
|
|
|
|
|
2017-12-02 00:47:21 +08:00
|
|
|
u64 drmem_lmb_memory_max(void);
|
2020-07-29 19:40:32 +08:00
|
|
|
int walk_drmem_lmbs(struct device_node *dn, void *data,
|
|
|
|
int (*func)(struct drmem_lmb *, const __be32 **, void *));
|
2017-12-02 00:47:31 +08:00
|
|
|
int drmem_update_dt(void);
|
2017-12-02 00:47:21 +08:00
|
|
|
|
2017-12-02 00:47:08 +08:00
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
2020-07-29 19:40:32 +08:00
|
|
|
int __init
|
|
|
|
walk_drmem_lmbs_early(unsigned long node, void *data,
|
|
|
|
int (*func)(struct drmem_lmb *, const __be32 **, void *));
|
pseries/drmem: update LMBs after LPM
After a LPM, the device tree node ibm,dynamic-reconfiguration-memory may be
updated by the hypervisor in the case the NUMA topology of the LPAR's
memory is updated.
This is handled by the kernel, but the memory's node is not updated because
there is no way to move a memory block between nodes from the Linux kernel
point of view.
If later a memory block is added or removed, drmem_update_dt() is called
and it is overwriting the DT node ibm,dynamic-reconfiguration-memory to
match the added or removed LMB. But the LMB's associativity node has not
been updated after the DT node update and thus the node is overwritten by
the Linux's topology instead of the hypervisor one.
Introduce a hook called when the ibm,dynamic-reconfiguration-memory node is
updated to force an update of the LMB's associativity. However, ignore the
call to that hook when the update has been triggered by drmem_update_dt().
Because, in that case, the LMB tree has been used to set the DT property
and thus it doesn't need to be updated back. Since drmem_update_dt() is
called under the protection of the device_hotplug_lock and the hook is
called in the same context, use a simple boolean variable to detect that
call.
Signed-off-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Nathan Lynch <nathanl@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210517090606.56930-1-ldufour@linux.ibm.com
2021-05-17 17:06:06 +08:00
|
|
|
void drmem_update_lmbs(struct property *prop);
|
2017-12-02 00:47:08 +08:00
|
|
|
#endif
|
|
|
|
|
2018-04-21 04:29:48 +08:00
|
|
|
static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
|
|
|
|
{
|
|
|
|
lmb->aa_index = 0xffffffff;
|
|
|
|
}
|
|
|
|
|
2017-12-02 00:47:08 +08:00
|
|
|
#endif /* _ASM_POWERPC_LMB_H */
|