linux/arch/powerpc/platforms/pseries/suspend.c
Michal Hocko 0ee931c4e3 mm: treewide: remove GFP_TEMPORARY allocation flag
GFP_TEMPORARY was introduced by commit e12ba74d8f ("Group short-lived
and reclaimable kernel allocations") along with __GFP_RECLAIMABLE.  It's
primary motivation was to allow users to tell that an allocation is
short lived and so the allocator can try to place such allocations close
together and prevent long term fragmentation.  As much as this sounds
like a reasonable semantic it becomes much less clear when to use the
highlevel GFP_TEMPORARY allocation flag.  How long is temporary? Can the
context holding that memory sleep? Can it take locks? It seems there is
no good answer for those questions.

The current implementation of GFP_TEMPORARY is basically GFP_KERNEL |
__GFP_RECLAIMABLE which in itself is tricky because basically none of
the existing caller provide a way to reclaim the allocated memory.  So
this is rather misleading and hard to evaluate for any benefits.

I have checked some random users and none of them has added the flag
with a specific justification.  I suspect most of them just copied from
other existing users and others just thought it might be a good idea to
use without any measuring.  This suggests that GFP_TEMPORARY just
motivates for cargo cult usage without any reasoning.

I believe that our gfp flags are quite complex already and especially
those with highlevel semantic should be clearly defined to prevent from
confusion and abuse.  Therefore I propose dropping GFP_TEMPORARY and
replace all existing users to simply use GFP_KERNEL.  Please note that
SLAB users with shrinkers will still get __GFP_RECLAIMABLE heuristic and
so they will be placed properly for memory fragmentation prevention.

I can see reasons we might want some gfp flag to reflect shorterm
allocations but I propose starting from a clear semantic definition and
only then add users with proper justification.

This was been brought up before LSF this year by Matthew [1] and it
turned out that GFP_TEMPORARY really doesn't have a clear semantic.  It
seems to be a heuristic without any measured advantage for most (if not
all) its current users.  The follow up discussion has revealed that
opinions on what might be temporary allocation differ a lot between
developers.  So rather than trying to tweak existing users into a
semantic which they haven't expected I propose to simply remove the flag
and start from scratch if we really need a semantic for short term
allocations.

[1] http://lkml.kernel.org/r/20170118054945.GD18349@bombadil.infradead.org

[akpm@linux-foundation.org: fix typo]
[akpm@linux-foundation.org: coding-style fixes]
[sfr@canb.auug.org.au: drm/i915: fix up]
  Link: http://lkml.kernel.org/r/20170816144703.378d4f4d@canb.auug.org.au
Link: http://lkml.kernel.org/r/20170728091904.14627-1-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Neil Brown <neilb@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-13 18:53:16 -07:00

284 lines
6.6 KiB
C

/*
* Copyright (C) 2010 Brian King IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
#include "../../kernel/cacheinfo.h"
static u64 stream_id;
static struct device suspend_dev;
static DECLARE_COMPLETION(suspend_work);
static struct rtas_suspend_me_data suspend_data;
static atomic_t suspending;
/**
* pseries_suspend_begin - First phase of hibernation
*
* Check to ensure we are in a valid state to hibernate
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_begin(suspend_state_t state)
{
long vasi_state, rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
/* Make sure the state is valid */
rc = plpar_hcall(H_VASI_STATE, retbuf, stream_id);
vasi_state = retbuf[0];
if (rc) {
pr_err("pseries_suspend_begin: vasi_state returned %ld\n",rc);
return rc;
} else if (vasi_state == H_VASI_ENABLED) {
return -EAGAIN;
} else if (vasi_state != H_VASI_SUSPENDING) {
pr_err("pseries_suspend_begin: vasi_state returned state %ld\n",
vasi_state);
return -EIO;
}
return 0;
}
/**
* pseries_suspend_cpu - Suspend a single CPU
*
* Makes the H_JOIN call to suspend the CPU
*
**/
static int pseries_suspend_cpu(void)
{
if (atomic_read(&suspending))
return rtas_suspend_cpu(&suspend_data);
return 0;
}
/**
* pseries_suspend_enable_irqs
*
* Post suspend configuration updates
*
**/
static void pseries_suspend_enable_irqs(void)
{
/*
* Update configuration which can be modified based on device tree
* changes during resume.
*/
cacheinfo_cpu_offline(smp_processor_id());
post_mobility_fixup();
cacheinfo_cpu_online(smp_processor_id());
}
/**
* pseries_suspend_enter - Final phase of hibernation
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_enter(suspend_state_t state)
{
int rc = rtas_suspend_last_cpu(&suspend_data);
atomic_set(&suspending, 0);
atomic_set(&suspend_data.done, 1);
return rc;
}
/**
* pseries_prepare_late - Prepare to suspend all other CPUs
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_prepare_late(void)
{
atomic_set(&suspending, 1);
atomic_set(&suspend_data.working, 0);
atomic_set(&suspend_data.done, 0);
atomic_set(&suspend_data.error, 0);
suspend_data.complete = &suspend_work;
reinit_completion(&suspend_work);
return 0;
}
/**
* store_hibernate - Initiate partition hibernation
* @dev: subsys root device
* @attr: device attribute struct
* @buf: buffer
* @count: buffer size
*
* Write the stream ID received from the HMC to this file
* to trigger hibernating the partition
*
* Return value:
* number of bytes printed to buffer / other on failure
**/
static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
return -ENOMEM;
stream_id = simple_strtoul(buf, NULL, 16);
do {
rc = pseries_suspend_begin(PM_SUSPEND_MEM);
if (rc == -EAGAIN)
ssleep(1);
} while (rc == -EAGAIN);
if (!rc) {
/* All present CPUs must be online */
cpumask_andnot(offline_mask, cpu_present_mask,
cpu_online_mask);
rc = rtas_online_cpus_mask(offline_mask);
if (rc) {
pr_err("%s: Could not bring present CPUs online.\n",
__func__);
goto out;
}
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
/* Take down CPUs not online prior to suspend */
if (!rtas_offline_cpus_mask(offline_mask))
pr_warn("%s: Could not restore CPUs to offline "
"state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
out:
free_cpumask_var(offline_mask);
return rc;
}
#define USER_DT_UPDATE 0
#define KERN_DT_UPDATE 1
/**
* show_hibernate - Report device tree update responsibilty
* @dev: subsys root device
* @attr: device attribute struct
* @buf: buffer
*
* Report whether a device tree update is performed by the kernel after a
* resume, or if drmgr must coordinate the update from user space.
*
* Return value:
* 0 if drmgr is to initiate update, and 1 otherwise
**/
static ssize_t show_hibernate(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", KERN_DT_UPDATE);
}
static DEVICE_ATTR(hibernate, S_IWUSR | S_IRUGO,
show_hibernate, store_hibernate);
static struct bus_type suspend_subsys = {
.name = "power",
.dev_name = "power",
};
static const struct platform_suspend_ops pseries_suspend_ops = {
.valid = suspend_valid_only_mem,
.begin = pseries_suspend_begin,
.prepare_late = pseries_prepare_late,
.enter = pseries_suspend_enter,
};
/**
* pseries_suspend_sysfs_register - Register with sysfs
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_sysfs_register(struct device *dev)
{
int rc;
if ((rc = subsys_system_register(&suspend_subsys, NULL)))
return rc;
dev->id = 0;
dev->bus = &suspend_subsys;
if ((rc = device_create_file(suspend_subsys.dev_root, &dev_attr_hibernate)))
goto subsys_unregister;
return 0;
subsys_unregister:
bus_unregister(&suspend_subsys);
return rc;
}
/**
* pseries_suspend_init - initcall for pSeries suspend
*
* Return value:
* 0 on success / other on failure
**/
static int __init pseries_suspend_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
suspend_data.token = rtas_token("ibm,suspend-me");
if (suspend_data.token == RTAS_UNKNOWN_SERVICE)
return 0;
if ((rc = pseries_suspend_sysfs_register(&suspend_dev)))
return rc;
ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
ppc_md.suspend_enable_irqs = pseries_suspend_enable_irqs;
suspend_set_ops(&pseries_suspend_ops);
return 0;
}
machine_device_initcall(pseries, pseries_suspend_init);