mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 16:13:54 +08:00
0d167518e0
Merge block/IO core bits from Jens Axboe: "This is a bit bigger on the core side than usual, but that is purely because we decided to hold off on parts of Tejun's submission on 3.4 to give it a bit more time to simmer. As a consequence, it's seen a long cycle in for-next. It contains: - Bug fix from Dan, wrong locking type. - Relax splice gifting restriction from Eric. - A ton of updates from Tejun, primarily for blkcg. This improves the code a lot, making the API nicer and cleaner, and also includes fixes for how we handle and tie policies and re-activate on switches. The changes also include generic bug fixes. - A simple fix from Vivek, along with a fix for doing proper delayed allocation of the blkcg stats." Fix up annoying conflict just due to different merge resolution in Documentation/feature-removal-schedule.txt * 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits) blkcg: tg_stats_alloc_lock is an irq lock vmsplice: relax alignement requirements for SPLICE_F_GIFT blkcg: use radix tree to index blkgs from blkcg blkcg: fix blkcg->css ref leak in __blkg_lookup_create() block: fix elvpriv allocation failure handling block: collapse blk_alloc_request() into get_request() blkcg: collapse blkcg_policy_ops into blkcg_policy blkcg: embed struct blkg_policy_data in policy specific data blkcg: mass rename of blkcg API blkcg: style cleanups for blk-cgroup.h blkcg: remove blkio_group->path[] blkcg: blkg_rwstat_read() was missing inline blkcg: shoot down blkgs if all policies are deactivated blkcg: drop stuff unused after per-queue policy activation update blkcg: implement per-queue policy activation blkcg: add request_queue->root_blkg blkcg: make request_queue bypassing on allocation blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing blkcg: make blkg_conf_prep() take @pol and return with queue lock held blkcg: remove static policy ID enums ...
242 lines
5.0 KiB
C
242 lines
5.0 KiB
C
/*
|
|
* fs/ioprio.c
|
|
*
|
|
* Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
|
|
*
|
|
* Helper functions for setting/querying io priorities of processes. The
|
|
* system calls closely mimmick getpriority/setpriority, see the man page for
|
|
* those. The prio argument is a composite of prio class and prio data, where
|
|
* the data argument has meaning within that class. The standard scheduling
|
|
* classes have 8 distinct prio levels, with 0 being the highest prio and 7
|
|
* being the lowest.
|
|
*
|
|
* IOW, setting BE scheduling class with prio 2 is done ala:
|
|
*
|
|
* unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
|
|
*
|
|
* ioprio_set(PRIO_PROCESS, pid, prio);
|
|
*
|
|
* See also Documentation/block/ioprio.txt
|
|
*
|
|
*/
|
|
#include <linux/gfp.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/ioprio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/security.h>
|
|
#include <linux/pid_namespace.h>
|
|
|
|
int set_task_ioprio(struct task_struct *task, int ioprio)
|
|
{
|
|
int err;
|
|
struct io_context *ioc;
|
|
const struct cred *cred = current_cred(), *tcred;
|
|
|
|
rcu_read_lock();
|
|
tcred = __task_cred(task);
|
|
if (!uid_eq(tcred->uid, cred->euid) &&
|
|
!uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
|
|
rcu_read_unlock();
|
|
return -EPERM;
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
err = security_task_setioprio(task, ioprio);
|
|
if (err)
|
|
return err;
|
|
|
|
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
|
|
if (ioc) {
|
|
ioc->ioprio = ioprio;
|
|
put_io_context(ioc);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(set_task_ioprio);
|
|
|
|
SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
|
|
{
|
|
int class = IOPRIO_PRIO_CLASS(ioprio);
|
|
int data = IOPRIO_PRIO_DATA(ioprio);
|
|
struct task_struct *p, *g;
|
|
struct user_struct *user;
|
|
struct pid *pgrp;
|
|
kuid_t uid;
|
|
int ret;
|
|
|
|
switch (class) {
|
|
case IOPRIO_CLASS_RT:
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
/* fall through, rt has prio field too */
|
|
case IOPRIO_CLASS_BE:
|
|
if (data >= IOPRIO_BE_NR || data < 0)
|
|
return -EINVAL;
|
|
|
|
break;
|
|
case IOPRIO_CLASS_IDLE:
|
|
break;
|
|
case IOPRIO_CLASS_NONE:
|
|
if (data)
|
|
return -EINVAL;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = -ESRCH;
|
|
rcu_read_lock();
|
|
switch (which) {
|
|
case IOPRIO_WHO_PROCESS:
|
|
if (!who)
|
|
p = current;
|
|
else
|
|
p = find_task_by_vpid(who);
|
|
if (p)
|
|
ret = set_task_ioprio(p, ioprio);
|
|
break;
|
|
case IOPRIO_WHO_PGRP:
|
|
if (!who)
|
|
pgrp = task_pgrp(current);
|
|
else
|
|
pgrp = find_vpid(who);
|
|
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
|
ret = set_task_ioprio(p, ioprio);
|
|
if (ret)
|
|
break;
|
|
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
|
break;
|
|
case IOPRIO_WHO_USER:
|
|
uid = make_kuid(current_user_ns(), who);
|
|
if (!uid_valid(uid))
|
|
break;
|
|
if (!who)
|
|
user = current_user();
|
|
else
|
|
user = find_user(uid);
|
|
|
|
if (!user)
|
|
break;
|
|
|
|
do_each_thread(g, p) {
|
|
if (!uid_eq(task_uid(p), uid))
|
|
continue;
|
|
ret = set_task_ioprio(p, ioprio);
|
|
if (ret)
|
|
goto free_uid;
|
|
} while_each_thread(g, p);
|
|
free_uid:
|
|
if (who)
|
|
free_uid(user);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|
|
|
|
static int get_task_ioprio(struct task_struct *p)
|
|
{
|
|
int ret;
|
|
|
|
ret = security_task_getioprio(p);
|
|
if (ret)
|
|
goto out;
|
|
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
|
|
if (p->io_context)
|
|
ret = p->io_context->ioprio;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
int ioprio_best(unsigned short aprio, unsigned short bprio)
|
|
{
|
|
unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
|
unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
|
|
|
if (aclass == IOPRIO_CLASS_NONE)
|
|
aclass = IOPRIO_CLASS_BE;
|
|
if (bclass == IOPRIO_CLASS_NONE)
|
|
bclass = IOPRIO_CLASS_BE;
|
|
|
|
if (aclass == bclass)
|
|
return min(aprio, bprio);
|
|
if (aclass > bclass)
|
|
return bprio;
|
|
else
|
|
return aprio;
|
|
}
|
|
|
|
SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct user_struct *user;
|
|
struct pid *pgrp;
|
|
kuid_t uid;
|
|
int ret = -ESRCH;
|
|
int tmpio;
|
|
|
|
rcu_read_lock();
|
|
switch (which) {
|
|
case IOPRIO_WHO_PROCESS:
|
|
if (!who)
|
|
p = current;
|
|
else
|
|
p = find_task_by_vpid(who);
|
|
if (p)
|
|
ret = get_task_ioprio(p);
|
|
break;
|
|
case IOPRIO_WHO_PGRP:
|
|
if (!who)
|
|
pgrp = task_pgrp(current);
|
|
else
|
|
pgrp = find_vpid(who);
|
|
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
|
tmpio = get_task_ioprio(p);
|
|
if (tmpio < 0)
|
|
continue;
|
|
if (ret == -ESRCH)
|
|
ret = tmpio;
|
|
else
|
|
ret = ioprio_best(ret, tmpio);
|
|
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
|
break;
|
|
case IOPRIO_WHO_USER:
|
|
uid = make_kuid(current_user_ns(), who);
|
|
if (!who)
|
|
user = current_user();
|
|
else
|
|
user = find_user(uid);
|
|
|
|
if (!user)
|
|
break;
|
|
|
|
do_each_thread(g, p) {
|
|
if (!uid_eq(task_uid(p), user->uid))
|
|
continue;
|
|
tmpio = get_task_ioprio(p);
|
|
if (tmpio < 0)
|
|
continue;
|
|
if (ret == -ESRCH)
|
|
ret = tmpio;
|
|
else
|
|
ret = ioprio_best(ret, tmpio);
|
|
} while_each_thread(g, p);
|
|
|
|
if (who)
|
|
free_uid(user);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
return ret;
|
|
}
|