2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 08:44:14 +08:00

sysfs: only allow one scheduled removal callback per kobj

The only way for a sysfs attribute to remove itself (without
deadlock) is to use the sysfs_schedule_callback() interface.

Vegard Nossum discovered that a poorly written sysfs ->store
callback can repeatedly schedule remove callbacks on the same
device over and over, e.g.

	$ while true ; do echo 1 > /sys/devices/.../remove ; done

If the 'remove' attribute uses the sysfs_schedule_callback API
and also does not protect itself from concurrent accesses, its
callback handler will be called multiple times, and will
eventually attempt to perform operations on a freed kobject,
leading to many problems.

Instead of requiring all callers of sysfs_schedule_callback to
implement their own synchronization, provide the protection in
the infrastructure.

Now, sysfs_schedule_callback will only allow one scheduled
callback per kobject. On subsequent calls with the same kobject,
return -EAGAIN.

This is a short term fix. The long term fix is to allow sysfs
attributes to remove themselves directly, without any of this
callback hokey pokey.

[cornelia.huck@de.ibm.com: s390 ccwgroup bits]

Reported-by: vegard.nossum@gmail.com
Signed-off-by: Alex Chiang <achiang@hp.com>
Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Alex Chiang 2009-03-13 12:07:36 -06:00 committed by Greg Kroah-Hartman
parent ffa6a7054d
commit 669420644c
2 changed files with 26 additions and 5 deletions

View File

@ -104,8 +104,9 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
if (rc != -EAGAIN)
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;

View File

@ -659,13 +659,16 @@ void sysfs_remove_file_from_group(struct kobject *kobj,
EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
struct sysfs_schedule_callback_struct {
struct kobject *kobj;
struct list_head workq_list;
struct kobject *kobj;
void (*func)(void *);
void *data;
struct module *owner;
struct work_struct work;
};
static DEFINE_MUTEX(sysfs_workq_mutex);
static LIST_HEAD(sysfs_workq);
static void sysfs_schedule_callback_work(struct work_struct *work)
{
struct sysfs_schedule_callback_struct *ss = container_of(work,
@ -674,6 +677,9 @@ static void sysfs_schedule_callback_work(struct work_struct *work)
(ss->func)(ss->data);
kobject_put(ss->kobj);
module_put(ss->owner);
mutex_lock(&sysfs_workq_mutex);
list_del(&ss->workq_list);
mutex_unlock(&sysfs_workq_mutex);
kfree(ss);
}
@ -695,15 +701,25 @@ static void sysfs_schedule_callback_work(struct work_struct *work)
* until @func returns.
*
* Returns 0 if the request was submitted, -ENOMEM if storage could not
* be allocated, -ENODEV if a reference to @owner isn't available.
* be allocated, -ENODEV if a reference to @owner isn't available,
* -EAGAIN if a callback has already been scheduled for @kobj.
*/
int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
void *data, struct module *owner)
{
struct sysfs_schedule_callback_struct *ss;
struct sysfs_schedule_callback_struct *ss, *tmp;
if (!try_module_get(owner))
return -ENODEV;
mutex_lock(&sysfs_workq_mutex);
list_for_each_entry_safe(ss, tmp, &sysfs_workq, workq_list)
if (ss->kobj == kobj) {
mutex_unlock(&sysfs_workq_mutex);
return -EAGAIN;
}
mutex_unlock(&sysfs_workq_mutex);
ss = kmalloc(sizeof(*ss), GFP_KERNEL);
if (!ss) {
module_put(owner);
@ -715,6 +731,10 @@ int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
ss->data = data;
ss->owner = owner;
INIT_WORK(&ss->work, sysfs_schedule_callback_work);
INIT_LIST_HEAD(&ss->workq_list);
mutex_lock(&sysfs_workq_mutex);
list_add_tail(&ss->workq_list, &sysfs_workq);
mutex_unlock(&sysfs_workq_mutex);
schedule_work(&ss->work);
return 0;
}