mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 16:53:54 +08:00
ab6a2d70d1
This patch removes class_device from the programming interface that the RTC framework exposes to the rest of the kernel. Now an rtc_device is passed, which is more type-safe and streamlines all the relevant code. Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Acked-by: Greg Kroah-Hartman <gregkh@suse.de> Acked-By: Alessandro Zummo <a.zummo@towertech.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
271 lines
5.9 KiB
C
271 lines
5.9 KiB
C
/*
|
|
* RTC subsystem, interface functions
|
|
*
|
|
* Copyright (C) 2005 Tower Technologies
|
|
* Author: Alessandro Zummo <a.zummo@towertech.it>
|
|
*
|
|
* based on arch/arm/common/rtctime.c
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/rtc.h>
|
|
|
|
int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
|
|
{
|
|
int err;
|
|
|
|
err = mutex_lock_interruptible(&rtc->ops_lock);
|
|
if (err)
|
|
return -EBUSY;
|
|
|
|
if (!rtc->ops)
|
|
err = -ENODEV;
|
|
else if (!rtc->ops->read_time)
|
|
err = -EINVAL;
|
|
else {
|
|
memset(tm, 0, sizeof(struct rtc_time));
|
|
err = rtc->ops->read_time(rtc->class_dev.dev, tm);
|
|
}
|
|
|
|
mutex_unlock(&rtc->ops_lock);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_read_time);
|
|
|
|
int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
|
|
{
|
|
int err;
|
|
|
|
err = rtc_valid_tm(tm);
|
|
if (err != 0)
|
|
return err;
|
|
|
|
err = mutex_lock_interruptible(&rtc->ops_lock);
|
|
if (err)
|
|
return -EBUSY;
|
|
|
|
if (!rtc->ops)
|
|
err = -ENODEV;
|
|
else if (!rtc->ops->set_time)
|
|
err = -EINVAL;
|
|
else
|
|
err = rtc->ops->set_time(rtc->class_dev.dev, tm);
|
|
|
|
mutex_unlock(&rtc->ops_lock);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_set_time);
|
|
|
|
int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
|
|
{
|
|
int err;
|
|
|
|
err = mutex_lock_interruptible(&rtc->ops_lock);
|
|
if (err)
|
|
return -EBUSY;
|
|
|
|
if (!rtc->ops)
|
|
err = -ENODEV;
|
|
else if (rtc->ops->set_mmss)
|
|
err = rtc->ops->set_mmss(rtc->class_dev.dev, secs);
|
|
else if (rtc->ops->read_time && rtc->ops->set_time) {
|
|
struct rtc_time new, old;
|
|
|
|
err = rtc->ops->read_time(rtc->class_dev.dev, &old);
|
|
if (err == 0) {
|
|
rtc_time_to_tm(secs, &new);
|
|
|
|
/*
|
|
* avoid writing when we're going to change the day of
|
|
* the month. We will retry in the next minute. This
|
|
* basically means that if the RTC must not drift
|
|
* by more than 1 minute in 11 minutes.
|
|
*/
|
|
if (!((old.tm_hour == 23 && old.tm_min == 59) ||
|
|
(new.tm_hour == 23 && new.tm_min == 59)))
|
|
err = rtc->ops->set_time(rtc->class_dev.dev,
|
|
&new);
|
|
}
|
|
}
|
|
else
|
|
err = -EINVAL;
|
|
|
|
mutex_unlock(&rtc->ops_lock);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_set_mmss);
|
|
|
|
int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
|
|
{
|
|
int err;
|
|
|
|
err = mutex_lock_interruptible(&rtc->ops_lock);
|
|
if (err)
|
|
return -EBUSY;
|
|
|
|
if (rtc->ops == NULL)
|
|
err = -ENODEV;
|
|
else if (!rtc->ops->read_alarm)
|
|
err = -EINVAL;
|
|
else {
|
|
memset(alarm, 0, sizeof(struct rtc_wkalrm));
|
|
err = rtc->ops->read_alarm(rtc->class_dev.dev, alarm);
|
|
}
|
|
|
|
mutex_unlock(&rtc->ops_lock);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_read_alarm);
|
|
|
|
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
|
|
{
|
|
int err;
|
|
|
|
err = mutex_lock_interruptible(&rtc->ops_lock);
|
|
if (err)
|
|
return -EBUSY;
|
|
|
|
if (!rtc->ops)
|
|
err = -ENODEV;
|
|
else if (!rtc->ops->set_alarm)
|
|
err = -EINVAL;
|
|
else
|
|
err = rtc->ops->set_alarm(rtc->class_dev.dev, alarm);
|
|
|
|
mutex_unlock(&rtc->ops_lock);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_set_alarm);
|
|
|
|
/**
|
|
* rtc_update_irq - report RTC periodic, alarm, and/or update irqs
|
|
* @rtc: the rtc device
|
|
* @num: how many irqs are being reported (usually one)
|
|
* @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
|
|
* Context: in_interrupt(), irqs blocked
|
|
*/
|
|
void rtc_update_irq(struct rtc_device *rtc,
|
|
unsigned long num, unsigned long events)
|
|
{
|
|
spin_lock(&rtc->irq_lock);
|
|
rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
|
|
spin_unlock(&rtc->irq_lock);
|
|
|
|
spin_lock(&rtc->irq_task_lock);
|
|
if (rtc->irq_task)
|
|
rtc->irq_task->func(rtc->irq_task->private_data);
|
|
spin_unlock(&rtc->irq_task_lock);
|
|
|
|
wake_up_interruptible(&rtc->irq_queue);
|
|
kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_update_irq);
|
|
|
|
struct rtc_device *rtc_class_open(char *name)
|
|
{
|
|
struct class_device *class_dev_tmp;
|
|
struct rtc_device *rtc = NULL;
|
|
|
|
down(&rtc_class->sem);
|
|
list_for_each_entry(class_dev_tmp, &rtc_class->children, node) {
|
|
if (strncmp(class_dev_tmp->class_id, name, BUS_ID_SIZE) == 0) {
|
|
class_dev_tmp = class_device_get(class_dev_tmp);
|
|
if (class_dev_tmp)
|
|
rtc = to_rtc_device(class_dev_tmp);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (rtc) {
|
|
if (!try_module_get(rtc->owner)) {
|
|
class_device_put(class_dev_tmp);
|
|
rtc = NULL;
|
|
}
|
|
}
|
|
up(&rtc_class->sem);
|
|
|
|
return rtc;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_class_open);
|
|
|
|
void rtc_class_close(struct rtc_device *rtc)
|
|
{
|
|
module_put(rtc->owner);
|
|
class_device_put(&rtc->class_dev);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_class_close);
|
|
|
|
int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
|
|
{
|
|
int retval = -EBUSY;
|
|
|
|
if (task == NULL || task->func == NULL)
|
|
return -EINVAL;
|
|
|
|
spin_lock_irq(&rtc->irq_task_lock);
|
|
if (rtc->irq_task == NULL) {
|
|
rtc->irq_task = task;
|
|
retval = 0;
|
|
}
|
|
spin_unlock_irq(&rtc->irq_task_lock);
|
|
|
|
return retval;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_irq_register);
|
|
|
|
void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
|
|
{
|
|
|
|
spin_lock_irq(&rtc->irq_task_lock);
|
|
if (rtc->irq_task == task)
|
|
rtc->irq_task = NULL;
|
|
spin_unlock_irq(&rtc->irq_task_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_irq_unregister);
|
|
|
|
int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
|
|
{
|
|
int err = 0;
|
|
unsigned long flags;
|
|
|
|
if (rtc->ops->irq_set_state == NULL)
|
|
return -ENXIO;
|
|
|
|
spin_lock_irqsave(&rtc->irq_task_lock, flags);
|
|
if (rtc->irq_task != task)
|
|
err = -ENXIO;
|
|
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
|
|
|
|
if (err == 0)
|
|
err = rtc->ops->irq_set_state(rtc->class_dev.dev, enabled);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_irq_set_state);
|
|
|
|
int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
|
|
{
|
|
int err = 0;
|
|
unsigned long flags;
|
|
|
|
if (rtc->ops->irq_set_freq == NULL)
|
|
return -ENXIO;
|
|
|
|
spin_lock_irqsave(&rtc->irq_task_lock, flags);
|
|
if (rtc->irq_task != task)
|
|
err = -ENXIO;
|
|
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
|
|
|
|
if (err == 0) {
|
|
err = rtc->ops->irq_set_freq(rtc->class_dev.dev, freq);
|
|
if (err == 0)
|
|
rtc->irq_freq = freq;
|
|
}
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
|