mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
77dcfe2b9e
- Rework the main suspend-to-idle control flow to avoid repeating "noirq" device resume and suspend operations in case of spurious wakeups from the ACPI EC and decouple the ACPI EC wakeups support from the LPS0 _DSM support (Rafael Wysocki). - Extend the wakeup sources framework to expose wakeup sources as device objects in sysfs (Tri Vo, Stephen Boyd). - Expose system suspend statistics in sysfs (Kalesh Singh). - Introduce a new haltpoll cpuidle driver and a new matching governor for virtualized guests wanting to do guest-side polling in the idle loop (Marcelo Tosatti, Joao Martins, Wanpeng Li, Stephen Rothwell). - Fix the menu and teo cpuidle governors to allow the scheduler tick to be stopped if PM QoS is used to limit the CPU idle state exit latency in some cases (Rafael Wysocki). - Increase the resolution of the play_idle() argument to microseconds for more fine-grained injection of CPU idle cycles (Daniel Lezcano). - Switch over some users of cpuidle notifiers to the new QoS-based frequency limits and drop the CPUFREQ_ADJUST and CPUFREQ_NOTIFY policy notifier events (Viresh Kumar). - Add new cpufreq driver based on nvmem for sun50i (Yangtao Li). - Add support for MT8183 and MT8516 to the mediatek cpufreq driver (Andrew-sh.Cheng, Fabien Parent). - Add i.MX8MN support to the imx-cpufreq-dt cpufreq driver (Anson Huang). - Add qcs404 to cpufreq-dt-platdev blacklist (Jorge Ramirez-Ortiz). - Update the qcom cpufreq driver (among other things, to make it easier to extend and to use kryo cpufreq for other nvmem-based SoCs) and add qcs404 support to it (Niklas Cassel, Douglas RAILLARD, Sibi Sankar, Sricharan R). - Fix assorted issues and make assorted minor improvements in the cpufreq code (Colin Ian King, Douglas RAILLARD, Florian Fainelli, Gustavo Silva, Hariprasad Kelam). - Add new devfreq driver for NVidia Tegra20 (Dmitry Osipenko, Arnd Bergmann). - Add new Exynos PPMU events to devfreq events and extend that mechanism (Lukasz Luba). - Fix and clean up the exynos-bus devfreq driver (Kamil Konieczny). - Improve devfreq documentation and governor code, fix spelling typos in devfreq (Ezequiel Garcia, Krzysztof Kozlowski, Leonard Crestez, MyungJoo Ham, Gaël PORTAY). - Add regulators enable and disable to the OPP (operating performance points) framework (Kamil Konieczny). - Update the OPP framework to support multiple opp-suspend properties (Anson Huang). - Fix assorted issues and make assorted minor improvements in the OPP code (Niklas Cassel, Viresh Kumar, Yue Hu). - Clean up the generic power domains (genpd) framework (Ulf Hansson). - Clean up assorted pieces of power management code and documentation (Akinobu Mita, Amit Kucheria, Chuhong Yuan). - Update the pm-graph tool to version 5.5 including multiple fixes and improvements (Todd Brandt). - Update the cpupower utility (Benjamin Weis, Geert Uytterhoeven, Sébastien Szymanski). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl2ArZ4SHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxgfYQAK80hs43vWQDmp7XKrN4pQe8+qYULAGO fBfrFl+NG9y/cnuqnt3NtA8MoyNsMMkMLkpkEDMfSbYqqH5ehEzX5+uGJWiWx8+Y oH5KU8MH7Tj/utYaalGzDt0AHfHZDIGC0NCUNQJVtE/4mOANFabwsCwscp4MrD5Q WjFN8U4BrsmWgJdZ/U9QIWcDZ0I+1etCF+rZG2yxSv31FMq2Zk/Qm4YyobqCvQFl TR9rxl08wqUmIYIz5cDjt/3AKH7NLLDqOTstbCL7cmufM5XPFc1yox69xc89UrIa 4AMgmDp7SMwFG/gdUPof0WQNmx7qxmiRAPleAOYBOZW/8jPNZk2y+RhM5NeF72m7 AFqYiuxqatkSb4IsT8fLzH9IUZOdYr8uSmoMQECw+MHdApaKFjFV8Lb/qx5+AwkD y7pwys8dZSamAjAf62eUzJDWcEwkNrujIisGrIXrVHb7ISbweskMOmdAYn9p4KgP dfRzpJBJ45IaMIdbaVXNpg3rP7Apfs7X1X+/ZhG6f+zHH3zYwr8Y81WPqX8WaZJ4 qoVCyxiVWzMYjY2/1lzjaAdqWojPWHQ3or3eBaK52DouyG3jY6hCDTLwU7iuqcCX jzAtrnqrNIKufvaObEmqcmYlIIOFT7QaJCtGUSRFQLfSon8fsVSR7LLeXoAMUJKT JWQenuNaJngK =TBDQ -----END PGP SIGNATURE----- Merge tag 'pm-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management updates from Rafael Wysocki: "These include a rework of the main suspend-to-idle code flow (related to the handling of spurious wakeups), a switch over of several users of cpufreq notifiers to QoS-based limits, a new devfreq driver for Tegra20, a new cpuidle driver and governor for virtualized guests, an extension of the wakeup sources framework to expose wakeup sources as device objects in sysfs, and more. Specifics: - Rework the main suspend-to-idle control flow to avoid repeating "noirq" device resume and suspend operations in case of spurious wakeups from the ACPI EC and decouple the ACPI EC wakeups support from the LPS0 _DSM support (Rafael Wysocki). - Extend the wakeup sources framework to expose wakeup sources as device objects in sysfs (Tri Vo, Stephen Boyd). - Expose system suspend statistics in sysfs (Kalesh Singh). - Introduce a new haltpoll cpuidle driver and a new matching governor for virtualized guests wanting to do guest-side polling in the idle loop (Marcelo Tosatti, Joao Martins, Wanpeng Li, Stephen Rothwell). - Fix the menu and teo cpuidle governors to allow the scheduler tick to be stopped if PM QoS is used to limit the CPU idle state exit latency in some cases (Rafael Wysocki). - Increase the resolution of the play_idle() argument to microseconds for more fine-grained injection of CPU idle cycles (Daniel Lezcano). - Switch over some users of cpuidle notifiers to the new QoS-based frequency limits and drop the CPUFREQ_ADJUST and CPUFREQ_NOTIFY policy notifier events (Viresh Kumar). - Add new cpufreq driver based on nvmem for sun50i (Yangtao Li). - Add support for MT8183 and MT8516 to the mediatek cpufreq driver (Andrew-sh.Cheng, Fabien Parent). - Add i.MX8MN support to the imx-cpufreq-dt cpufreq driver (Anson Huang). - Add qcs404 to cpufreq-dt-platdev blacklist (Jorge Ramirez-Ortiz). - Update the qcom cpufreq driver (among other things, to make it easier to extend and to use kryo cpufreq for other nvmem-based SoCs) and add qcs404 support to it (Niklas Cassel, Douglas RAILLARD, Sibi Sankar, Sricharan R). - Fix assorted issues and make assorted minor improvements in the cpufreq code (Colin Ian King, Douglas RAILLARD, Florian Fainelli, Gustavo Silva, Hariprasad Kelam). - Add new devfreq driver for NVidia Tegra20 (Dmitry Osipenko, Arnd Bergmann). - Add new Exynos PPMU events to devfreq events and extend that mechanism (Lukasz Luba). - Fix and clean up the exynos-bus devfreq driver (Kamil Konieczny). - Improve devfreq documentation and governor code, fix spelling typos in devfreq (Ezequiel Garcia, Krzysztof Kozlowski, Leonard Crestez, MyungJoo Ham, Gaël PORTAY). - Add regulators enable and disable to the OPP (operating performance points) framework (Kamil Konieczny). - Update the OPP framework to support multiple opp-suspend properties (Anson Huang). - Fix assorted issues and make assorted minor improvements in the OPP code (Niklas Cassel, Viresh Kumar, Yue Hu). - Clean up the generic power domains (genpd) framework (Ulf Hansson). - Clean up assorted pieces of power management code and documentation (Akinobu Mita, Amit Kucheria, Chuhong Yuan). - Update the pm-graph tool to version 5.5 including multiple fixes and improvements (Todd Brandt). - Update the cpupower utility (Benjamin Weis, Geert Uytterhoeven, Sébastien Szymanski)" * tag 'pm-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (126 commits) cpuidle-haltpoll: Enable kvm guest polling when dedicated physical CPUs are available cpuidle-haltpoll: do not set an owner to allow modunload cpuidle-haltpoll: return -ENODEV on modinit failure cpuidle-haltpoll: set haltpoll as preferred governor cpuidle: allow governor switch on cpuidle_register_driver() PM: runtime: Documentation: add runtime_status ABI document pm-graph: make setVal unbuffered again for python2 and python3 powercap: idle_inject: Use higher resolution for idle injection cpuidle: play_idle: Increase the resolution to usec cpuidle-haltpoll: vcpu hotplug support cpufreq: Add qcs404 to cpufreq-dt-platdev blacklist cpufreq: qcom: Add support for qcs404 on nvmem driver cpufreq: qcom: Refactor the driver to make it easier to extend cpufreq: qcom: Re-organise kryo cpufreq to use it for other nvmem based qcom socs dt-bindings: opp: Add qcom-opp bindings with properties needed for CPR dt-bindings: opp: qcom-nvmem: Support pstates provided by a power domain Documentation: cpufreq: Update policy notifier documentation cpufreq: Remove CPUFREQ_ADJUST and CPUFREQ_NOTIFY policy notifier events PM / Domains: Verify PM domain type in dev_pm_genpd_set_performance_state() PM / Domains: Simplify genpd_lookup_dev() ...
915 lines
23 KiB
C
915 lines
23 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Alarmtimer interface
|
|
*
|
|
* This interface provides a timer which is similarto hrtimers,
|
|
* but triggers a RTC alarm if the box is suspend.
|
|
*
|
|
* This interface is influenced by the Android RTC Alarm timer
|
|
* interface.
|
|
*
|
|
* Copyright (C) 2010 IBM Corperation
|
|
*
|
|
* Author: John Stultz <john.stultz@linaro.org>
|
|
*/
|
|
#include <linux/time.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/timerqueue.h>
|
|
#include <linux/rtc.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/alarmtimer.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/compat.h>
|
|
#include <linux/module.h>
|
|
|
|
#include "posix-timers.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/alarmtimer.h>
|
|
|
|
/**
|
|
* struct alarm_base - Alarm timer bases
|
|
* @lock: Lock for syncrhonized access to the base
|
|
* @timerqueue: Timerqueue head managing the list of events
|
|
* @gettime: Function to read the time correlating to the base
|
|
* @base_clockid: clockid for the base
|
|
*/
|
|
static struct alarm_base {
|
|
spinlock_t lock;
|
|
struct timerqueue_head timerqueue;
|
|
ktime_t (*gettime)(void);
|
|
clockid_t base_clockid;
|
|
} alarm_bases[ALARM_NUMTYPE];
|
|
|
|
#if defined(CONFIG_POSIX_TIMERS) || defined(CONFIG_RTC_CLASS)
|
|
/* freezer information to handle clock_nanosleep triggered wakeups */
|
|
static enum alarmtimer_type freezer_alarmtype;
|
|
static ktime_t freezer_expires;
|
|
static ktime_t freezer_delta;
|
|
static DEFINE_SPINLOCK(freezer_delta_lock);
|
|
#endif
|
|
|
|
#ifdef CONFIG_RTC_CLASS
|
|
static struct wakeup_source *ws;
|
|
|
|
/* rtc timer and device for setting alarm wakeups at suspend */
|
|
static struct rtc_timer rtctimer;
|
|
static struct rtc_device *rtcdev;
|
|
static DEFINE_SPINLOCK(rtcdev_lock);
|
|
|
|
/**
|
|
* alarmtimer_get_rtcdev - Return selected rtcdevice
|
|
*
|
|
* This function returns the rtc device to use for wakealarms.
|
|
* If one has not already been chosen, it checks to see if a
|
|
* functional rtc device is available.
|
|
*/
|
|
struct rtc_device *alarmtimer_get_rtcdev(void)
|
|
{
|
|
unsigned long flags;
|
|
struct rtc_device *ret;
|
|
|
|
spin_lock_irqsave(&rtcdev_lock, flags);
|
|
ret = rtcdev;
|
|
spin_unlock_irqrestore(&rtcdev_lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarmtimer_get_rtcdev);
|
|
|
|
static int alarmtimer_rtc_add_device(struct device *dev,
|
|
struct class_interface *class_intf)
|
|
{
|
|
unsigned long flags;
|
|
struct rtc_device *rtc = to_rtc_device(dev);
|
|
struct wakeup_source *__ws;
|
|
|
|
if (rtcdev)
|
|
return -EBUSY;
|
|
|
|
if (!rtc->ops->set_alarm)
|
|
return -1;
|
|
if (!device_may_wakeup(rtc->dev.parent))
|
|
return -1;
|
|
|
|
__ws = wakeup_source_register(dev, "alarmtimer");
|
|
|
|
spin_lock_irqsave(&rtcdev_lock, flags);
|
|
if (!rtcdev) {
|
|
if (!try_module_get(rtc->owner)) {
|
|
spin_unlock_irqrestore(&rtcdev_lock, flags);
|
|
return -1;
|
|
}
|
|
|
|
rtcdev = rtc;
|
|
/* hold a reference so it doesn't go away */
|
|
get_device(dev);
|
|
ws = __ws;
|
|
__ws = NULL;
|
|
}
|
|
spin_unlock_irqrestore(&rtcdev_lock, flags);
|
|
|
|
wakeup_source_unregister(__ws);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void alarmtimer_rtc_timer_init(void)
|
|
{
|
|
rtc_timer_init(&rtctimer, NULL, NULL);
|
|
}
|
|
|
|
static struct class_interface alarmtimer_rtc_interface = {
|
|
.add_dev = &alarmtimer_rtc_add_device,
|
|
};
|
|
|
|
static int alarmtimer_rtc_interface_setup(void)
|
|
{
|
|
alarmtimer_rtc_interface.class = rtc_class;
|
|
return class_interface_register(&alarmtimer_rtc_interface);
|
|
}
|
|
static void alarmtimer_rtc_interface_remove(void)
|
|
{
|
|
class_interface_unregister(&alarmtimer_rtc_interface);
|
|
}
|
|
#else
|
|
struct rtc_device *alarmtimer_get_rtcdev(void)
|
|
{
|
|
return NULL;
|
|
}
|
|
#define rtcdev (NULL)
|
|
static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
|
|
static inline void alarmtimer_rtc_interface_remove(void) { }
|
|
static inline void alarmtimer_rtc_timer_init(void) { }
|
|
#endif
|
|
|
|
/**
|
|
* alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
|
|
* @base: pointer to the base where the timer is being run
|
|
* @alarm: pointer to alarm being enqueued.
|
|
*
|
|
* Adds alarm to a alarm_base timerqueue
|
|
*
|
|
* Must hold base->lock when calling.
|
|
*/
|
|
static void alarmtimer_enqueue(struct alarm_base *base, struct alarm *alarm)
|
|
{
|
|
if (alarm->state & ALARMTIMER_STATE_ENQUEUED)
|
|
timerqueue_del(&base->timerqueue, &alarm->node);
|
|
|
|
timerqueue_add(&base->timerqueue, &alarm->node);
|
|
alarm->state |= ALARMTIMER_STATE_ENQUEUED;
|
|
}
|
|
|
|
/**
|
|
* alarmtimer_dequeue - Removes an alarm timer from an alarm_base timerqueue
|
|
* @base: pointer to the base where the timer is running
|
|
* @alarm: pointer to alarm being removed
|
|
*
|
|
* Removes alarm to a alarm_base timerqueue
|
|
*
|
|
* Must hold base->lock when calling.
|
|
*/
|
|
static void alarmtimer_dequeue(struct alarm_base *base, struct alarm *alarm)
|
|
{
|
|
if (!(alarm->state & ALARMTIMER_STATE_ENQUEUED))
|
|
return;
|
|
|
|
timerqueue_del(&base->timerqueue, &alarm->node);
|
|
alarm->state &= ~ALARMTIMER_STATE_ENQUEUED;
|
|
}
|
|
|
|
|
|
/**
|
|
* alarmtimer_fired - Handles alarm hrtimer being fired.
|
|
* @timer: pointer to hrtimer being run
|
|
*
|
|
* When a alarm timer fires, this runs through the timerqueue to
|
|
* see which alarms expired, and runs those. If there are more alarm
|
|
* timers queued for the future, we set the hrtimer to fire when
|
|
* when the next future alarm timer expires.
|
|
*/
|
|
static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
|
|
{
|
|
struct alarm *alarm = container_of(timer, struct alarm, timer);
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
unsigned long flags;
|
|
int ret = HRTIMER_NORESTART;
|
|
int restart = ALARMTIMER_NORESTART;
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
alarmtimer_dequeue(base, alarm);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
if (alarm->function)
|
|
restart = alarm->function(alarm, base->gettime());
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
if (restart != ALARMTIMER_NORESTART) {
|
|
hrtimer_set_expires(&alarm->timer, alarm->node.expires);
|
|
alarmtimer_enqueue(base, alarm);
|
|
ret = HRTIMER_RESTART;
|
|
}
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
trace_alarmtimer_fired(alarm, base->gettime());
|
|
return ret;
|
|
|
|
}
|
|
|
|
ktime_t alarm_expires_remaining(const struct alarm *alarm)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
return ktime_sub(alarm->node.expires, base->gettime());
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_expires_remaining);
|
|
|
|
#ifdef CONFIG_RTC_CLASS
|
|
/**
|
|
* alarmtimer_suspend - Suspend time callback
|
|
* @dev: unused
|
|
*
|
|
* When we are going into suspend, we look through the bases
|
|
* to see which is the soonest timer to expire. We then
|
|
* set an rtc timer to fire that far into the future, which
|
|
* will wake us from suspend.
|
|
*/
|
|
static int alarmtimer_suspend(struct device *dev)
|
|
{
|
|
ktime_t min, now, expires;
|
|
int i, ret, type;
|
|
struct rtc_device *rtc;
|
|
unsigned long flags;
|
|
struct rtc_time tm;
|
|
|
|
spin_lock_irqsave(&freezer_delta_lock, flags);
|
|
min = freezer_delta;
|
|
expires = freezer_expires;
|
|
type = freezer_alarmtype;
|
|
freezer_delta = 0;
|
|
spin_unlock_irqrestore(&freezer_delta_lock, flags);
|
|
|
|
rtc = alarmtimer_get_rtcdev();
|
|
/* If we have no rtcdev, just return */
|
|
if (!rtc)
|
|
return 0;
|
|
|
|
/* Find the soonest timer to expire*/
|
|
for (i = 0; i < ALARM_NUMTYPE; i++) {
|
|
struct alarm_base *base = &alarm_bases[i];
|
|
struct timerqueue_node *next;
|
|
ktime_t delta;
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
next = timerqueue_getnext(&base->timerqueue);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
if (!next)
|
|
continue;
|
|
delta = ktime_sub(next->expires, base->gettime());
|
|
if (!min || (delta < min)) {
|
|
expires = next->expires;
|
|
min = delta;
|
|
type = i;
|
|
}
|
|
}
|
|
if (min == 0)
|
|
return 0;
|
|
|
|
if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
|
|
__pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
|
|
return -EBUSY;
|
|
}
|
|
|
|
trace_alarmtimer_suspend(expires, type);
|
|
|
|
/* Setup an rtc timer to fire that far in the future */
|
|
rtc_timer_cancel(rtc, &rtctimer);
|
|
rtc_read_time(rtc, &tm);
|
|
now = rtc_tm_to_ktime(tm);
|
|
now = ktime_add(now, min);
|
|
|
|
/* Set alarm, if in the past reject suspend briefly to handle */
|
|
ret = rtc_timer_start(rtc, &rtctimer, now, 0);
|
|
if (ret < 0)
|
|
__pm_wakeup_event(ws, MSEC_PER_SEC);
|
|
return ret;
|
|
}
|
|
|
|
static int alarmtimer_resume(struct device *dev)
|
|
{
|
|
struct rtc_device *rtc;
|
|
|
|
rtc = alarmtimer_get_rtcdev();
|
|
if (rtc)
|
|
rtc_timer_cancel(rtc, &rtctimer);
|
|
return 0;
|
|
}
|
|
|
|
#else
|
|
static int alarmtimer_suspend(struct device *dev)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int alarmtimer_resume(struct device *dev)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static void
|
|
__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
|
|
enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
|
|
{
|
|
timerqueue_init(&alarm->node);
|
|
alarm->timer.function = alarmtimer_fired;
|
|
alarm->function = function;
|
|
alarm->type = type;
|
|
alarm->state = ALARMTIMER_STATE_INACTIVE;
|
|
}
|
|
|
|
/**
|
|
* alarm_init - Initialize an alarm structure
|
|
* @alarm: ptr to alarm to be initialized
|
|
* @type: the type of the alarm
|
|
* @function: callback that is run when the alarm fires
|
|
*/
|
|
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
|
|
enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
|
|
{
|
|
hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
|
|
HRTIMER_MODE_ABS);
|
|
__alarm_init(alarm, type, function);
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_init);
|
|
|
|
/**
|
|
* alarm_start - Sets an absolute alarm to fire
|
|
* @alarm: ptr to alarm to set
|
|
* @start: time to run the alarm
|
|
*/
|
|
void alarm_start(struct alarm *alarm, ktime_t start)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
alarm->node.expires = start;
|
|
alarmtimer_enqueue(base, alarm);
|
|
hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
trace_alarmtimer_start(alarm, base->gettime());
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_start);
|
|
|
|
/**
|
|
* alarm_start_relative - Sets a relative alarm to fire
|
|
* @alarm: ptr to alarm to set
|
|
* @start: time relative to now to run the alarm
|
|
*/
|
|
void alarm_start_relative(struct alarm *alarm, ktime_t start)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
|
|
start = ktime_add_safe(start, base->gettime());
|
|
alarm_start(alarm, start);
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_start_relative);
|
|
|
|
void alarm_restart(struct alarm *alarm)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
hrtimer_set_expires(&alarm->timer, alarm->node.expires);
|
|
hrtimer_restart(&alarm->timer);
|
|
alarmtimer_enqueue(base, alarm);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_restart);
|
|
|
|
/**
|
|
* alarm_try_to_cancel - Tries to cancel an alarm timer
|
|
* @alarm: ptr to alarm to be canceled
|
|
*
|
|
* Returns 1 if the timer was canceled, 0 if it was not running,
|
|
* and -1 if the callback was running
|
|
*/
|
|
int alarm_try_to_cancel(struct alarm *alarm)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
spin_lock_irqsave(&base->lock, flags);
|
|
ret = hrtimer_try_to_cancel(&alarm->timer);
|
|
if (ret >= 0)
|
|
alarmtimer_dequeue(base, alarm);
|
|
spin_unlock_irqrestore(&base->lock, flags);
|
|
|
|
trace_alarmtimer_cancel(alarm, base->gettime());
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_try_to_cancel);
|
|
|
|
|
|
/**
|
|
* alarm_cancel - Spins trying to cancel an alarm timer until it is done
|
|
* @alarm: ptr to alarm to be canceled
|
|
*
|
|
* Returns 1 if the timer was canceled, 0 if it was not active.
|
|
*/
|
|
int alarm_cancel(struct alarm *alarm)
|
|
{
|
|
for (;;) {
|
|
int ret = alarm_try_to_cancel(alarm);
|
|
if (ret >= 0)
|
|
return ret;
|
|
hrtimer_cancel_wait_running(&alarm->timer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_cancel);
|
|
|
|
|
|
u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
|
|
{
|
|
u64 overrun = 1;
|
|
ktime_t delta;
|
|
|
|
delta = ktime_sub(now, alarm->node.expires);
|
|
|
|
if (delta < 0)
|
|
return 0;
|
|
|
|
if (unlikely(delta >= interval)) {
|
|
s64 incr = ktime_to_ns(interval);
|
|
|
|
overrun = ktime_divns(delta, incr);
|
|
|
|
alarm->node.expires = ktime_add_ns(alarm->node.expires,
|
|
incr*overrun);
|
|
|
|
if (alarm->node.expires > now)
|
|
return overrun;
|
|
/*
|
|
* This (and the ktime_add() below) is the
|
|
* correction for exact:
|
|
*/
|
|
overrun++;
|
|
}
|
|
|
|
alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
|
|
return overrun;
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_forward);
|
|
|
|
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
|
|
return alarm_forward(alarm, base->gettime(), interval);
|
|
}
|
|
EXPORT_SYMBOL_GPL(alarm_forward_now);
|
|
|
|
#ifdef CONFIG_POSIX_TIMERS
|
|
|
|
static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
|
|
{
|
|
struct alarm_base *base;
|
|
unsigned long flags;
|
|
ktime_t delta;
|
|
|
|
switch(type) {
|
|
case ALARM_REALTIME:
|
|
base = &alarm_bases[ALARM_REALTIME];
|
|
type = ALARM_REALTIME_FREEZER;
|
|
break;
|
|
case ALARM_BOOTTIME:
|
|
base = &alarm_bases[ALARM_BOOTTIME];
|
|
type = ALARM_BOOTTIME_FREEZER;
|
|
break;
|
|
default:
|
|
WARN_ONCE(1, "Invalid alarm type: %d\n", type);
|
|
return;
|
|
}
|
|
|
|
delta = ktime_sub(absexp, base->gettime());
|
|
|
|
spin_lock_irqsave(&freezer_delta_lock, flags);
|
|
if (!freezer_delta || (delta < freezer_delta)) {
|
|
freezer_delta = delta;
|
|
freezer_expires = absexp;
|
|
freezer_alarmtype = type;
|
|
}
|
|
spin_unlock_irqrestore(&freezer_delta_lock, flags);
|
|
}
|
|
|
|
/**
|
|
* clock2alarm - helper that converts from clockid to alarmtypes
|
|
* @clockid: clockid.
|
|
*/
|
|
static enum alarmtimer_type clock2alarm(clockid_t clockid)
|
|
{
|
|
if (clockid == CLOCK_REALTIME_ALARM)
|
|
return ALARM_REALTIME;
|
|
if (clockid == CLOCK_BOOTTIME_ALARM)
|
|
return ALARM_BOOTTIME;
|
|
return -1;
|
|
}
|
|
|
|
/**
|
|
* alarm_handle_timer - Callback for posix timers
|
|
* @alarm: alarm that fired
|
|
*
|
|
* Posix timer callback for expired alarm timers.
|
|
*/
|
|
static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
|
|
ktime_t now)
|
|
{
|
|
struct k_itimer *ptr = container_of(alarm, struct k_itimer,
|
|
it.alarm.alarmtimer);
|
|
enum alarmtimer_restart result = ALARMTIMER_NORESTART;
|
|
unsigned long flags;
|
|
int si_private = 0;
|
|
|
|
spin_lock_irqsave(&ptr->it_lock, flags);
|
|
|
|
ptr->it_active = 0;
|
|
if (ptr->it_interval)
|
|
si_private = ++ptr->it_requeue_pending;
|
|
|
|
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
|
|
/*
|
|
* Handle ignored signals and rearm the timer. This will go
|
|
* away once we handle ignored signals proper.
|
|
*/
|
|
ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
|
|
++ptr->it_requeue_pending;
|
|
ptr->it_active = 1;
|
|
result = ALARMTIMER_RESTART;
|
|
}
|
|
spin_unlock_irqrestore(&ptr->it_lock, flags);
|
|
|
|
return result;
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_rearm - Posix timer callback for rearming timer
|
|
* @timr: Pointer to the posixtimer data struct
|
|
*/
|
|
static void alarm_timer_rearm(struct k_itimer *timr)
|
|
{
|
|
struct alarm *alarm = &timr->it.alarm.alarmtimer;
|
|
|
|
timr->it_overrun += alarm_forward_now(alarm, timr->it_interval);
|
|
alarm_start(alarm, alarm->node.expires);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_forward - Posix timer callback for forwarding timer
|
|
* @timr: Pointer to the posixtimer data struct
|
|
* @now: Current time to forward the timer against
|
|
*/
|
|
static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now)
|
|
{
|
|
struct alarm *alarm = &timr->it.alarm.alarmtimer;
|
|
|
|
return alarm_forward(alarm, timr->it_interval, now);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_remaining - Posix timer callback to retrieve remaining time
|
|
* @timr: Pointer to the posixtimer data struct
|
|
* @now: Current time to calculate against
|
|
*/
|
|
static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
|
|
{
|
|
struct alarm *alarm = &timr->it.alarm.alarmtimer;
|
|
|
|
return ktime_sub(alarm->node.expires, now);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_try_to_cancel - Posix timer callback to cancel a timer
|
|
* @timr: Pointer to the posixtimer data struct
|
|
*/
|
|
static int alarm_timer_try_to_cancel(struct k_itimer *timr)
|
|
{
|
|
return alarm_try_to_cancel(&timr->it.alarm.alarmtimer);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_wait_running - Posix timer callback to wait for a timer
|
|
* @timr: Pointer to the posixtimer data struct
|
|
*
|
|
* Called from the core code when timer cancel detected that the callback
|
|
* is running. @timr is unlocked and rcu read lock is held to prevent it
|
|
* from being freed.
|
|
*/
|
|
static void alarm_timer_wait_running(struct k_itimer *timr)
|
|
{
|
|
hrtimer_cancel_wait_running(&timr->it.alarm.alarmtimer.timer);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_arm - Posix timer callback to arm a timer
|
|
* @timr: Pointer to the posixtimer data struct
|
|
* @expires: The new expiry time
|
|
* @absolute: Expiry value is absolute time
|
|
* @sigev_none: Posix timer does not deliver signals
|
|
*/
|
|
static void alarm_timer_arm(struct k_itimer *timr, ktime_t expires,
|
|
bool absolute, bool sigev_none)
|
|
{
|
|
struct alarm *alarm = &timr->it.alarm.alarmtimer;
|
|
struct alarm_base *base = &alarm_bases[alarm->type];
|
|
|
|
if (!absolute)
|
|
expires = ktime_add_safe(expires, base->gettime());
|
|
if (sigev_none)
|
|
alarm->node.expires = expires;
|
|
else
|
|
alarm_start(&timr->it.alarm.alarmtimer, expires);
|
|
}
|
|
|
|
/**
|
|
* alarm_clock_getres - posix getres interface
|
|
* @which_clock: clockid
|
|
* @tp: timespec to fill
|
|
*
|
|
* Returns the granularity of underlying alarm base clock
|
|
*/
|
|
static int alarm_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
|
|
{
|
|
if (!alarmtimer_get_rtcdev())
|
|
return -EINVAL;
|
|
|
|
tp->tv_sec = 0;
|
|
tp->tv_nsec = hrtimer_resolution;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* alarm_clock_get - posix clock_get interface
|
|
* @which_clock: clockid
|
|
* @tp: timespec to fill.
|
|
*
|
|
* Provides the underlying alarm base time.
|
|
*/
|
|
static int alarm_clock_get(clockid_t which_clock, struct timespec64 *tp)
|
|
{
|
|
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
|
|
|
|
if (!alarmtimer_get_rtcdev())
|
|
return -EINVAL;
|
|
|
|
*tp = ktime_to_timespec64(base->gettime());
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_create - posix timer_create interface
|
|
* @new_timer: k_itimer pointer to manage
|
|
*
|
|
* Initializes the k_itimer structure.
|
|
*/
|
|
static int alarm_timer_create(struct k_itimer *new_timer)
|
|
{
|
|
enum alarmtimer_type type;
|
|
|
|
if (!alarmtimer_get_rtcdev())
|
|
return -EOPNOTSUPP;
|
|
|
|
if (!capable(CAP_WAKE_ALARM))
|
|
return -EPERM;
|
|
|
|
type = clock2alarm(new_timer->it_clock);
|
|
alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep
|
|
* @alarm: ptr to alarm that fired
|
|
*
|
|
* Wakes up the task that set the alarmtimer
|
|
*/
|
|
static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
|
|
ktime_t now)
|
|
{
|
|
struct task_struct *task = (struct task_struct *)alarm->data;
|
|
|
|
alarm->data = NULL;
|
|
if (task)
|
|
wake_up_process(task);
|
|
return ALARMTIMER_NORESTART;
|
|
}
|
|
|
|
/**
|
|
* alarmtimer_do_nsleep - Internal alarmtimer nsleep implementation
|
|
* @alarm: ptr to alarmtimer
|
|
* @absexp: absolute expiration time
|
|
*
|
|
* Sets the alarm timer and sleeps until it is fired or interrupted.
|
|
*/
|
|
static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
|
|
enum alarmtimer_type type)
|
|
{
|
|
struct restart_block *restart;
|
|
alarm->data = (void *)current;
|
|
do {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
alarm_start(alarm, absexp);
|
|
if (likely(alarm->data))
|
|
schedule();
|
|
|
|
alarm_cancel(alarm);
|
|
} while (alarm->data && !signal_pending(current));
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
destroy_hrtimer_on_stack(&alarm->timer);
|
|
|
|
if (!alarm->data)
|
|
return 0;
|
|
|
|
if (freezing(current))
|
|
alarmtimer_freezerset(absexp, type);
|
|
restart = ¤t->restart_block;
|
|
if (restart->nanosleep.type != TT_NONE) {
|
|
struct timespec64 rmt;
|
|
ktime_t rem;
|
|
|
|
rem = ktime_sub(absexp, alarm_bases[type].gettime());
|
|
|
|
if (rem <= 0)
|
|
return 0;
|
|
rmt = ktime_to_timespec64(rem);
|
|
|
|
return nanosleep_copyout(restart, &rmt);
|
|
}
|
|
return -ERESTART_RESTARTBLOCK;
|
|
}
|
|
|
|
static void
|
|
alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
|
|
enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
|
|
{
|
|
hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
|
|
HRTIMER_MODE_ABS);
|
|
__alarm_init(alarm, type, function);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_nsleep_restart - restartblock alarmtimer nsleep
|
|
* @restart: ptr to restart block
|
|
*
|
|
* Handles restarted clock_nanosleep calls
|
|
*/
|
|
static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
|
|
{
|
|
enum alarmtimer_type type = restart->nanosleep.clockid;
|
|
ktime_t exp = restart->nanosleep.expires;
|
|
struct alarm alarm;
|
|
|
|
alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
|
|
|
|
return alarmtimer_do_nsleep(&alarm, exp, type);
|
|
}
|
|
|
|
/**
|
|
* alarm_timer_nsleep - alarmtimer nanosleep
|
|
* @which_clock: clockid
|
|
* @flags: determins abstime or relative
|
|
* @tsreq: requested sleep time (abs or rel)
|
|
* @rmtp: remaining sleep time saved
|
|
*
|
|
* Handles clock_nanosleep calls against _ALARM clockids
|
|
*/
|
|
static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
|
|
const struct timespec64 *tsreq)
|
|
{
|
|
enum alarmtimer_type type = clock2alarm(which_clock);
|
|
struct restart_block *restart = ¤t->restart_block;
|
|
struct alarm alarm;
|
|
ktime_t exp;
|
|
int ret = 0;
|
|
|
|
if (!alarmtimer_get_rtcdev())
|
|
return -EOPNOTSUPP;
|
|
|
|
if (flags & ~TIMER_ABSTIME)
|
|
return -EINVAL;
|
|
|
|
if (!capable(CAP_WAKE_ALARM))
|
|
return -EPERM;
|
|
|
|
alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
|
|
|
|
exp = timespec64_to_ktime(*tsreq);
|
|
/* Convert (if necessary) to absolute time */
|
|
if (flags != TIMER_ABSTIME) {
|
|
ktime_t now = alarm_bases[type].gettime();
|
|
|
|
exp = ktime_add_safe(now, exp);
|
|
}
|
|
|
|
ret = alarmtimer_do_nsleep(&alarm, exp, type);
|
|
if (ret != -ERESTART_RESTARTBLOCK)
|
|
return ret;
|
|
|
|
/* abs timers don't set remaining time or restart */
|
|
if (flags == TIMER_ABSTIME)
|
|
return -ERESTARTNOHAND;
|
|
|
|
restart->fn = alarm_timer_nsleep_restart;
|
|
restart->nanosleep.clockid = type;
|
|
restart->nanosleep.expires = exp;
|
|
return ret;
|
|
}
|
|
|
|
const struct k_clock alarm_clock = {
|
|
.clock_getres = alarm_clock_getres,
|
|
.clock_get = alarm_clock_get,
|
|
.timer_create = alarm_timer_create,
|
|
.timer_set = common_timer_set,
|
|
.timer_del = common_timer_del,
|
|
.timer_get = common_timer_get,
|
|
.timer_arm = alarm_timer_arm,
|
|
.timer_rearm = alarm_timer_rearm,
|
|
.timer_forward = alarm_timer_forward,
|
|
.timer_remaining = alarm_timer_remaining,
|
|
.timer_try_to_cancel = alarm_timer_try_to_cancel,
|
|
.timer_wait_running = alarm_timer_wait_running,
|
|
.nsleep = alarm_timer_nsleep,
|
|
};
|
|
#endif /* CONFIG_POSIX_TIMERS */
|
|
|
|
|
|
/* Suspend hook structures */
|
|
static const struct dev_pm_ops alarmtimer_pm_ops = {
|
|
.suspend = alarmtimer_suspend,
|
|
.resume = alarmtimer_resume,
|
|
};
|
|
|
|
static struct platform_driver alarmtimer_driver = {
|
|
.driver = {
|
|
.name = "alarmtimer",
|
|
.pm = &alarmtimer_pm_ops,
|
|
}
|
|
};
|
|
|
|
/**
|
|
* alarmtimer_init - Initialize alarm timer code
|
|
*
|
|
* This function initializes the alarm bases and registers
|
|
* the posix clock ids.
|
|
*/
|
|
static int __init alarmtimer_init(void)
|
|
{
|
|
struct platform_device *pdev;
|
|
int error = 0;
|
|
int i;
|
|
|
|
alarmtimer_rtc_timer_init();
|
|
|
|
/* Initialize alarm bases */
|
|
alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
|
|
alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
|
|
alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
|
|
alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
|
|
for (i = 0; i < ALARM_NUMTYPE; i++) {
|
|
timerqueue_init_head(&alarm_bases[i].timerqueue);
|
|
spin_lock_init(&alarm_bases[i].lock);
|
|
}
|
|
|
|
error = alarmtimer_rtc_interface_setup();
|
|
if (error)
|
|
return error;
|
|
|
|
error = platform_driver_register(&alarmtimer_driver);
|
|
if (error)
|
|
goto out_if;
|
|
|
|
pdev = platform_device_register_simple("alarmtimer", -1, NULL, 0);
|
|
if (IS_ERR(pdev)) {
|
|
error = PTR_ERR(pdev);
|
|
goto out_drv;
|
|
}
|
|
return 0;
|
|
|
|
out_drv:
|
|
platform_driver_unregister(&alarmtimer_driver);
|
|
out_if:
|
|
alarmtimer_rtc_interface_remove();
|
|
return error;
|
|
}
|
|
device_initcall(alarmtimer_init);
|