mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
Merge branches 'pm-core' and 'pm-sleep'
* pm-core: PM: runtime: Clarify documentation when callbacks are unassigned PM: runtime: Allow unassigned ->runtime_suspend|resume callbacks PM: runtime: Improve path in rpm_idle() when no callback PM: runtime: document common mistake with pm_runtime_get_sync() * pm-sleep: PM: hibernate: remove leading spaces before tabs PM: sleep: remove trailing spaces and tabs PM: hibernate: fix spelling mistakes PM: wakeirq: Set IRQF_NO_AUTOEN when requesting the IRQ
This commit is contained in:
commit
afe94fb82c
@ -378,7 +378,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
||||
|
||||
`int pm_runtime_get_sync(struct device *dev);`
|
||||
- increment the device's usage counter, run pm_runtime_resume(dev) and
|
||||
return its result
|
||||
return its result;
|
||||
note that it does not drop the device's usage counter on errors, so
|
||||
consider using pm_runtime_resume_and_get() instead of it, especially
|
||||
if its return value is checked by the caller, as this is likely to
|
||||
result in cleaner code.
|
||||
|
||||
`int pm_runtime_get_if_in_use(struct device *dev);`
|
||||
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
|
||||
@ -827,6 +831,15 @@ or driver about runtime power changes. Instead, the driver for the device's
|
||||
parent must take responsibility for telling the device's driver when the
|
||||
parent's power state changes.
|
||||
|
||||
Note that, in some cases it may not be desirable for subsystems/drivers to call
|
||||
pm_runtime_no_callbacks() for their devices. This could be because a subset of
|
||||
the runtime PM callbacks needs to be implemented, a platform dependent PM
|
||||
domain could get attached to the device or that the device is power managed
|
||||
through a supplier device link. For these reasons and to avoid boilerplate code
|
||||
in subsystems/drivers, the PM core allows runtime PM callbacks to be
|
||||
unassigned. More precisely, if a callback pointer is NULL, the PM core will act
|
||||
as though there was a callback and it returned 0.
|
||||
|
||||
9. Autosuspend, or automatically-delayed suspends
|
||||
=================================================
|
||||
|
||||
|
@ -345,7 +345,7 @@ static void rpm_suspend_suppliers(struct device *dev)
|
||||
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
||||
{
|
||||
int retval, idx;
|
||||
int retval = 0, idx;
|
||||
bool use_links = dev->power.links_count > 0;
|
||||
|
||||
if (dev->power.irq_safe) {
|
||||
@ -373,7 +373,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
retval = cb(dev);
|
||||
if (cb)
|
||||
retval = cb(dev);
|
||||
|
||||
if (dev->power.irq_safe) {
|
||||
spin_lock(&dev->power.lock);
|
||||
@ -446,7 +447,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
/* Pending requests need to be canceled. */
|
||||
dev->power.request = RPM_REQ_NONE;
|
||||
|
||||
if (dev->power.no_callbacks)
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_idle);
|
||||
|
||||
/* If no callback assume success. */
|
||||
if (!callback || dev->power.no_callbacks)
|
||||
goto out;
|
||||
|
||||
/* Carry out an asynchronous or a synchronous idle notification. */
|
||||
@ -462,10 +466,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
|
||||
dev->power.idle_notification = true;
|
||||
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_idle);
|
||||
|
||||
if (callback)
|
||||
retval = __rpm_callback(callback, dev);
|
||||
retval = __rpm_callback(callback, dev);
|
||||
|
||||
dev->power.idle_notification = false;
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
@ -484,9 +485,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!cb)
|
||||
return -ENOSYS;
|
||||
|
||||
if (dev->power.memalloc_noio) {
|
||||
unsigned int noio_flag;
|
||||
|
||||
|
@ -182,7 +182,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||
|
||||
wirq->dev = dev;
|
||||
wirq->irq = irq;
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
|
||||
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
|
||||
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
||||
@ -192,7 +191,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||
* so we use a threaded irq.
|
||||
*/
|
||||
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
|
||||
IRQF_ONESHOT, wirq->name, wirq);
|
||||
IRQF_ONESHOT | IRQF_NO_AUTOEN,
|
||||
wirq->name, wirq);
|
||||
if (err)
|
||||
goto err_free_name;
|
||||
|
||||
|
@ -380,6 +380,9 @@ static inline int pm_runtime_get(struct device *dev)
|
||||
* The possible return values of this function are the same as for
|
||||
* pm_runtime_resume() and the runtime PM usage counter of @dev remains
|
||||
* incremented in all cases, even if it returns an error code.
|
||||
* Consider using pm_runtime_resume_and_get() instead of it, especially
|
||||
* if its return value is checked by the caller, as this is likely to result
|
||||
* in cleaner code.
|
||||
*/
|
||||
static inline int pm_runtime_get_sync(struct device *dev)
|
||||
{
|
||||
|
@ -98,20 +98,20 @@ config PM_STD_PARTITION
|
||||
default ""
|
||||
help
|
||||
The default resume partition is the partition that the suspend-
|
||||
to-disk implementation will look for a suspended disk image.
|
||||
to-disk implementation will look for a suspended disk image.
|
||||
|
||||
The partition specified here will be different for almost every user.
|
||||
The partition specified here will be different for almost every user.
|
||||
It should be a valid swap partition (at least for now) that is turned
|
||||
on before suspending.
|
||||
on before suspending.
|
||||
|
||||
The partition specified can be overridden by specifying:
|
||||
|
||||
resume=/dev/<other device>
|
||||
resume=/dev/<other device>
|
||||
|
||||
which will set the resume partition to the device specified.
|
||||
which will set the resume partition to the device specified.
|
||||
|
||||
Note there is currently not a way to specify which device to save the
|
||||
suspended image to. It will simply pick the first available swap
|
||||
suspended image to. It will simply pick the first available swap
|
||||
device.
|
||||
|
||||
config PM_SLEEP
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/power/process.c - Functions for starting/stopping processes on
|
||||
* drivers/power/process.c - Functions for starting/stopping processes on
|
||||
* suspend transitions.
|
||||
*
|
||||
* Originally from swsusp.
|
||||
|
@ -331,7 +331,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
|
||||
*
|
||||
* Memory bitmap is a structure consisting of many linked lists of
|
||||
* objects. The main list's elements are of type struct zone_bitmap
|
||||
* and each of them corresonds to one zone. For each zone bitmap
|
||||
* and each of them corresponds to one zone. For each zone bitmap
|
||||
* object there is a list of objects of type struct bm_block that
|
||||
* represent each blocks of bitmap in which information is stored.
|
||||
*
|
||||
@ -1146,7 +1146,7 @@ int create_basic_memory_bitmaps(void)
|
||||
Free_second_object:
|
||||
kfree(bm2);
|
||||
Free_first_bitmap:
|
||||
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
|
||||
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
|
||||
Free_first_object:
|
||||
kfree(bm1);
|
||||
return -ENOMEM;
|
||||
@ -1500,7 +1500,7 @@ static struct memory_bitmap copy_bm;
|
||||
/**
|
||||
* swsusp_free - Free pages allocated for hibernation image.
|
||||
*
|
||||
* Image pages are alocated before snapshot creation, so they need to be
|
||||
* Image pages are allocated before snapshot creation, so they need to be
|
||||
* released after resume.
|
||||
*/
|
||||
void swsusp_free(void)
|
||||
@ -2326,7 +2326,7 @@ static struct memory_bitmap *safe_highmem_bm;
|
||||
* (@nr_highmem_p points to the variable containing the number of highmem image
|
||||
* pages). The pages that are "safe" (ie. will not be overwritten when the
|
||||
* hibernation image is restored entirely) have the corresponding bits set in
|
||||
* @bm (it must be unitialized).
|
||||
* @bm (it must be uninitialized).
|
||||
*
|
||||
* NOTE: This function should not be called if there are no highmem image pages.
|
||||
*/
|
||||
@ -2483,7 +2483,7 @@ static inline void free_highmem_data(void) {}
|
||||
|
||||
/**
|
||||
* prepare_image - Make room for loading hibernation image.
|
||||
* @new_bm: Unitialized memory bitmap structure.
|
||||
* @new_bm: Uninitialized memory bitmap structure.
|
||||
* @bm: Memory bitmap with unsafe pages marked.
|
||||
*
|
||||
* Use @bm to mark the pages that will be overwritten in the process of
|
||||
|
@ -1125,7 +1125,7 @@ struct dec_data {
|
||||
};
|
||||
|
||||
/**
|
||||
* Deompression function that runs in its own thread.
|
||||
* Decompression function that runs in its own thread.
|
||||
*/
|
||||
static int lzo_decompress_threadfn(void *data)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user