2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00
linux-next/ipc/util.c

987 lines
23 KiB
C
Raw Normal View History

/*
* linux/ipc/util.c
* Copyright (C) 1992 Krishna Balasubramanian
*
* Sep 1997 - Call suser() last after "normal" permission checks so we
* get BSD style process accounting right.
* Occurs in several places in the IPC code.
* Chris Evans, <chris@ferret.lmh.ox.ac.uk>
* Nov 1999 - ipc helper functions, unified SMP locking
* Manfred Spraul <manfred@colorfullife.com>
* Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
* Mingming Cao <cmm@us.ibm.com>
[PATCH] Rework of IPC auditing 1) The audit_ipc_perms() function has been split into two different functions: - audit_ipc_obj() - audit_ipc_set_perm() There's a key shift here... The audit_ipc_obj() collects the uid, gid, mode, and SElinux context label of the current ipc object. This audit_ipc_obj() hook is now found in several places. Most notably, it is hooked in ipcperms(), which is called in various places around the ipc code permforming a MAC check. Additionally there are several places where *checkid() is used to validate that an operation is being performed on a valid object while not necessarily having a nearby ipcperms() call. In these locations, audit_ipc_obj() is called to ensure that the information is captured by the audit system. The audit_set_new_perm() function is called any time the permissions on the ipc object changes. In this case, the NEW permissions are recorded (and note that an audit_ipc_obj() call exists just a few lines before each instance). 2) Support for an AUDIT_IPC_SET_PERM audit message type. This allows for separate auxiliary audit records for normal operations on an IPC object and permissions changes. Note that the same struct audit_aux_data_ipcctl is used and populated, however there are separate audit_log_format statements based on the type of the message. Finally, the AUDIT_IPC block of code in audit_free_aux() was extended to handle aux messages of this new type. No more mem leaks I hope ;-) Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2006-04-03 05:07:33 +08:00
* Mar 2006 - support for audit of ipc object properties
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
* Jun 2006 - namespaces ssupport
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*/
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/init.h>
#include <linux/msg.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/capability.h>
#include <linux/highuid.h>
#include <linux/security.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
[PATCH] Rework of IPC auditing 1) The audit_ipc_perms() function has been split into two different functions: - audit_ipc_obj() - audit_ipc_set_perm() There's a key shift here... The audit_ipc_obj() collects the uid, gid, mode, and SElinux context label of the current ipc object. This audit_ipc_obj() hook is now found in several places. Most notably, it is hooked in ipcperms(), which is called in various places around the ipc code permforming a MAC check. Additionally there are several places where *checkid() is used to validate that an operation is being performed on a valid object while not necessarily having a nearby ipcperms() call. In these locations, audit_ipc_obj() is called to ensure that the information is captured by the audit system. The audit_set_new_perm() function is called any time the permissions on the ipc object changes. In this case, the NEW permissions are recorded (and note that an audit_ipc_obj() call exists just a few lines before each instance). 2) Support for an AUDIT_IPC_SET_PERM audit message type. This allows for separate auxiliary audit records for normal operations on an IPC object and permissions changes. Note that the same struct audit_aux_data_ipcctl is used and populated, however there are separate audit_log_format statements based on the type of the message. Finally, the AUDIT_IPC block of code in audit_free_aux() was extended to handle aux messages of this new type. No more mem leaks I hope ;-) Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2006-04-03 05:07:33 +08:00
#include <linux/audit.h>
#include <linux/nsproxy.h>
#include <linux/rwsem.h>
#include <linux/memory.h>
namespaces: move the IPC namespace under IPC_NS option Currently the IPC namespace management code is spread over the ipc/*.c files. I moved this code into ipc/namespace.c file which is compiled out when needed. The linux/ipc_namespace.h file is used to store the prototypes of the functions in namespace.c and the stubs for NAMESPACES=n case. This is done so, because the stub for copy_ipc_namespace requires the knowledge of the CLONE_NEWIPC flag, which is in sched.h. But the linux/ipc.h file itself in included into many many .c files via the sys.h->sem.h sequence so adding the sched.h into it will make all these .c depend on sched.h which is not that good. On the other hand the knowledge about the namespaces stuff is required in 4 .c files only. Besides, this patch compiles out some auxiliary functions from ipc/sem.c, msg.c and shm.c files. It turned out that moving these functions into namespaces.c is not that easy because they use many other calls and macros from the original file. Moving them would make this patch complicated. On the other hand all these functions can be consolidated, so I will send a separate patch doing this a bit later. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Serge Hallyn <serue@us.ibm.com> Cc: Cedric Le Goater <clg@fr.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Kirill Korotaev <dev@sw.ru> Cc: Sukadev Bhattiprolu <sukadev@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-08 20:18:22 +08:00
#include <linux/ipc_namespace.h>
#include <asm/unistd.h>
#include "util.h"
struct ipc_proc_iface {
const char *path;
const char *header;
int ids;
int (*show)(struct seq_file *, void *);
};
static void ipc_memory_notifier(struct work_struct *work)
{
ipcns_notify(IPCNS_MEMCHANGED);
}
static int ipc_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
switch (action) {
case MEM_ONLINE: /* memory successfully brought online */
case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
/*
* This is done by invoking the ipcns notifier chain with the
* IPC_MEMCHANGED event.
* In order not to keep the lock on the hotplug memory chain
* for too long, queue a work item that will, when waken up,
* activate the ipcns notification chain.
* No need to keep several ipc work items on the queue.
*/
if (!work_pending(&ipc_memory_wq))
schedule_work(&ipc_memory_wq);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_CANCEL_OFFLINE:
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block ipc_memory_nb = {
.notifier_call = ipc_memory_callback,
.priority = IPC_CALLBACK_PRI,
};
/**
* ipc_init - initialise IPC subsystem
*
* The various system5 IPC resources (semaphores, messages and shared
* memory) are initialised
* A callback routine is registered into the memory hotplug notifier
* chain: since msgmni scales to lowmem this callback routine will be
* called upon successful memory add / remove to recompute msmgni.
*/
static int __init ipc_init(void)
{
sem_init();
msg_init();
shm_init();
register_hotmemory_notifier(&ipc_memory_nb);
register_ipcns_notifier(&init_ipc_ns);
return 0;
}
__initcall(ipc_init);
/**
* ipc_init_ids - initialise IPC identifiers
* @ids: Identifier set
*
* Set up the sequence range to use for the ipc identifier range (limited
* below IPCMNI) then initialise the ids idr.
*/
void ipc_init_ids(struct ipc_ids *ids)
{
init_rwsem(&ids->rw_mutex);
ids->in_use = 0;
ids->seq = 0;
ids->next_id = -1;
{
int seq_limit = INT_MAX/SEQ_MULTIPLIER;
if (seq_limit > USHRT_MAX)
ids->seq_max = USHRT_MAX;
else
ids->seq_max = seq_limit;
}
idr_init(&ids->ipcs_idr);
}
#ifdef CONFIG_PROC_FS
static const struct file_operations sysvipc_proc_fops;
/**
* ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
* @path: Path in procfs
* @header: Banner to be printed at the beginning of the file.
* @ids: ipc id table to iterate.
* @show: show routine.
*/
void __init ipc_init_proc_interface(const char *path, const char *header,
int ids, int (*show)(struct seq_file *, void *))
{
struct proc_dir_entry *pde;
struct ipc_proc_iface *iface;
iface = kmalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
return;
iface->path = path;
iface->header = header;
iface->ids = ids;
iface->show = show;
pde = proc_create_data(path,
S_IRUGO, /* world readable */
NULL, /* parent dir */
&sysvipc_proc_fops,
iface);
if (!pde) {
kfree(iface);
}
}
#endif
/**
* ipc_findkey - find a key in an ipc identifier set
* @ids: Identifier set
* @key: The key to find
*
* Requires ipc_ids.rw_mutex locked.
* Returns the LOCKED pointer to the ipc structure if found or NULL
* if not.
* If key is found ipc points to the owning ipc structure
*/
static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{
struct kern_ipc_perm *ipc;
int next_id;
int total;
for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
ipc = idr_find(&ids->ipcs_idr, next_id);
if (ipc == NULL)
continue;
if (ipc->key != key) {
total++;
continue;
}
ipc_lock_by_ptr(ipc);
return ipc;
}
return NULL;
}
/**
* ipc_get_maxid - get the last assigned id
* @ids: IPC identifier set
*
* Called with ipc_ids.rw_mutex held.
*/
int ipc_get_maxid(struct ipc_ids *ids)
{
struct kern_ipc_perm *ipc;
int max_id = -1;
int total, id;
if (ids->in_use == 0)
return -1;
if (ids->in_use == IPCMNI)
return IPCMNI - 1;
/* Look for the last assigned id */
total = 0;
for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
ipc = idr_find(&ids->ipcs_idr, id);
if (ipc != NULL) {
max_id = id;
total++;
}
}
return max_id;
}
/**
* ipc_addid - add an IPC identifier
* @ids: IPC identifier set
* @new: new IPC permission set
* @size: limit for the number of used ids
*
* Add an entry 'new' to the IPC ids idr. The permissions object is
* initialised and the first free entry is set up and the id assigned
* is returned. The 'new' entry is returned in a locked state on success.
* On failure the entry is not locked and a negative err-code is returned.
*
* Called with ipc_ids.rw_mutex held as a writer.
*/
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = 0;
rcu_read_lock();
spin_lock(&new->lock);
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > ids->seq_max)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
}
/**
* ipcget_new - create a new ipc object
* @ns: namespace
* @ids: IPC identifer set
* @ops: the actual creation routine to call
* @params: its parameters
*
* This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is IPC_PRIVATE.
*/
static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
int err;
down_write(&ids->rw_mutex);
err = ops->getnew(ns, params);
up_write(&ids->rw_mutex);
return err;
}
/**
* ipc_check_perms - check security and permissions for an IPC
* @ns: IPC namespace
* @ipcp: ipc permission set
* @ops: the actual security routine to call
* @params: its parameters
*
* This routine is called by sys_msgget(), sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE and that key already exists in the
* ids IDR.
*
* On success, the IPC id is returned.
*
* It is called with ipc_ids.rw_mutex and ipcp->lock held.
*/
static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
struct ipc_ops *ops,
struct ipc_params *params)
{
int err;
if (ipcperms(ns, ipcp, params->flg))
err = -EACCES;
else {
err = ops->associate(ipcp, params->flg);
if (!err)
err = ipcp->id;
}
return err;
}
/**
* ipcget_public - get an ipc object or create a new one
* @ns: namespace
* @ids: IPC identifer set
* @ops: the actual creation routine to call
* @params: its parameters
*
* This routine is called by sys_msgget, sys_semget() and sys_shmget()
* when the key is not IPC_PRIVATE.
* It adds a new entry if the key is not found and does some permission
* / security checkings if the key is found.
*
* On success, the ipc id is returned.
*/
static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
struct kern_ipc_perm *ipcp;
int flg = params->flg;
int err;
/*
* Take the lock as a writer since we are potentially going to add
* a new entry + read locks are not "upgradable"
*/
down_write(&ids->rw_mutex);
ipcp = ipc_findkey(ids, params->key);
if (ipcp == NULL) {
/* key not used */
if (!(flg & IPC_CREAT))
err = -ENOENT;
else
err = ops->getnew(ns, params);
} else {
/* ipc object has been locked by ipc_findkey() */
if (flg & IPC_CREAT && flg & IPC_EXCL)
err = -EEXIST;
else {
err = 0;
if (ops->more_checks)
err = ops->more_checks(ipcp, params);
if (!err)
/*
* ipc_check_perms returns the IPC id on
* success
*/
err = ipc_check_perms(ns, ipcp, ops, params);
}
ipc_unlock(ipcp);
}
up_write(&ids->rw_mutex);
return err;
}
/**
* ipc_rmid - remove an IPC identifier
* @ids: IPC identifier set
* @ipcp: ipc perm structure containing the identifier to remove
*
* ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
* before this function is called, and remain locked on the exit.
*/
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
int lid = ipcid_to_idx(ipcp->id);
idr_remove(&ids->ipcs_idr, lid);
ids->in_use--;
ipcp->deleted = 1;
return;
}
/**
* ipc_alloc - allocate ipc space
* @size: size desired
*
* Allocate memory from the appropriate pools and return a pointer to it.
* NULL is returned if the allocation fails
*/
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
void *ipc_alloc(int size)
{
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
out = kmalloc(size, GFP_KERNEL);
return out;
}
/**
* ipc_free - free ipc space
* @ptr: pointer returned by ipc_alloc
* @size: size of block
*
* Free a block created with ipc_alloc(). The caller must know the size
* used in the allocation call.
*/
void ipc_free(void* ptr, int size)
{
if(size > PAGE_SIZE)
vfree(ptr);
else
kfree(ptr);
}
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
/* "void *" makes sure alignment of following data is sane. */
void *data[0];
};
/**
* ipc_rcu_alloc - allocate ipc and rcu space
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
* Returns the pointer to the object or NULL upon failure.
*/
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
void *ipc_rcu_alloc(int size)
{
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
/*
* We prepend the allocation with the rcu struct
*/
struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size);
if (unlikely(!out))
return NULL;
atomic_set(&out->refcount, 1);
return out->data;
}
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-05-01 10:15:44 +08:00
int ipc_rcu_getref(void *ptr)
{
return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount);
2006-11-22 22:55:48 +08:00
}
/**
* ipc_schedule_free - free ipc + rcu space
* @head: RCU callback structure for queued work
*/
static void ipc_schedule_free(struct rcu_head *head)
{
vfree(container_of(head, struct ipc_rcu, rcu));
}
void ipc_rcu_putref(void *ptr)
{
struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data);
if (!atomic_dec_and_test(&p->refcount))
return;
if (is_vmalloc_addr(ptr)) {
call_rcu(&p->rcu, ipc_schedule_free);
} else {
kfree_rcu(p, rcu);
}
}
/**
* ipcperms - check IPC permissions
* @ns: IPC namespace
* @ipcp: IPC permission set
* @flag: desired permission set.
*
* Check user, group, other permissions for access
* to ipc resources. return 0 if allowed
*
* @flag will most probably be 0 or S_...UGO from <linux/stat.h>
*/
int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
{
kuid_t euid = current_euid();
int requested_mode, granted_mode;
audit_ipc_obj(ipcp);
requested_mode = (flag >> 6) | (flag >> 3) | flag;
granted_mode = ipcp->mode;
if (uid_eq(euid, ipcp->cuid) ||
uid_eq(euid, ipcp->uid))
granted_mode >>= 6;
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
!ns_capable(ns->user_ns, CAP_IPC_OWNER))
return -1;
return security_ipc_permission(ipcp, flag);
}
/*
* Functions to convert between the kern_ipc_perm structure and the
* old/new ipc_perm structures
*/
/**
* kernel_to_ipc64_perm - convert kernel ipc permissions to user
* @in: kernel permissions
* @out: new style IPC permissions
*
* Turn the kernel object @in into a set of permissions descriptions
* for returning to userspace (@out).
*/
void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
{
out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid);
out->gid = from_kgid_munged(current_user_ns(), in->gid);
out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
/**
* ipc64_perm_to_ipc_perm - convert new ipc permissions to old
* @in: new style IPC permissions
* @out: old style IPC permissions
*
* Turn the new style permissions object @in into a compatibility
* object and store it into the @out pointer.
*/
void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
{
out->key = in->key;
SET_UID(out->uid, in->uid);
SET_GID(out->gid, in->gid);
SET_UID(out->cuid, in->cuid);
SET_GID(out->cgid, in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
/**
* ipc_obtain_object
* @ids: ipc identifier set
* @id: ipc id to look for
*
* Look for an id in the ipc ids idr and return associated ipc object.
*
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
int lid = ipcid_to_idx(id);
out = idr_find(&ids->ipcs_idr, lid);
if (!out)
return ERR_PTR(-EINVAL);
return out;
}
/**
* ipc_lock - Lock an ipc structure without rw_mutex held
* @ids: IPC identifier set
* @id: ipc id to look for
*
* Look for an id in the ipc ids idr and lock the associated ipc object.
*
* The ipc object is locked on successful exit.
*/
struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
rcu_read_lock();
out = ipc_obtain_object(ids, id);
if (IS_ERR(out))
goto err1;
spin_lock(&out->lock);
/* ipc_rmid() may have already freed the ID while ipc_lock
* was spinning: here verify that the structure is still valid
*/
if (!out->deleted)
return out;
spin_unlock(&out->lock);
out = ERR_PTR(-EINVAL);
err1:
rcu_read_unlock();
return out;
}
/**
* ipc_obtain_object_check
* @ids: ipc identifier set
* @id: ipc id to look for
*
* Similar to ipc_obtain_object() but also checks
* the ipc object reference counter.
*
* Call inside the RCU critical section.
* The ipc object is *not* locked on exit.
*/
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out = ipc_obtain_object(ids, id);
if (IS_ERR(out))
goto out;
if (ipc_checkid(out, id))
return ERR_PTR(-EIDRM);
out:
return out;
}
struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
out = ipc_lock(ids, id);
if (IS_ERR(out))
return out;
if (ipc_checkid(out, id)) {
ipc_unlock(out);
return ERR_PTR(-EIDRM);
}
return out;
}
/**
* ipcget - Common sys_*get() code
* @ns : namsepace
* @ids : IPC identifier set
* @ops : operations to be called on ipc object creation, permission checks
* and further checks
* @params : the parameters needed by the previous operations.
*
* Common routine called by sys_msgget(), sys_semget() and sys_shmget().
*/
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
if (params->key == IPC_PRIVATE)
return ipcget_new(ns, ids, ops, params);
else
return ipcget_public(ns, ids, ops, params);
}
/**
* ipc_update_perm - update the permissions of an IPC.
* @in: the permission given as input.
* @out: the permission of the ipc to set.
*/
int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
{
kuid_t uid = make_kuid(current_user_ns(), in->uid);
kgid_t gid = make_kgid(current_user_ns(), in->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
out->uid = uid;
out->gid = gid;
out->mode = (out->mode & ~S_IRWXUGO)
| (in->mode & S_IRWXUGO);
return 0;
}
/**
* ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
* @ns: the ipc namespace
* @ids: the table of ids where to look for the ipc
* @id: the id of the ipc to retrieve
* @cmd: the cmd to check
* @perm: the permission to set
* @extra_perm: one extra permission parameter used by msq
*
* This function does some common audit and permissions check for some IPC_XXX
* cmd and is called from semctl_down, shmctl_down and msgctl_down.
* It must be called without any lock held and
* - retrieves the ipc with the given id in the given table.
* - performs some audit and permission check, depending on the given cmd
* - returns the ipc with both ipc and rw_mutex locks held in case of success
* or an err-code without any lock held otherwise.
*/
struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
struct kern_ipc_perm *ipcp;
ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
if (IS_ERR(ipcp))
goto out;
spin_lock(&ipcp->lock);
out:
return ipcp;
}
struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
kuid_t euid;
int err = -EPERM;
struct kern_ipc_perm *ipcp;
down_write(&ids->rw_mutex);
rcu_read_lock();
ipcp = ipc_obtain_object_check(ids, id);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_up;
}
audit_ipc_obj(ipcp);
if (cmd == IPC_SET)
audit_ipc_set_perm(extra_perm, perm->uid,
perm->gid, perm->mode);
euid = current_euid();
if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) ||
ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return ipcp;
out_up:
/*
* Unsuccessful lookup, unlock and return
* the corresponding error.
*/
rcu_read_unlock();
up_write(&ids->rw_mutex);
return ERR_PTR(err);
}
#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/**
* ipc_parse_version - IPC call version
* @cmd: pointer to command
*
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
* The @cmd value is turned from an encoding command and version into
* just the command code.
*/
int ipc_parse_version (int *cmd)
{
if (*cmd & IPC_64) {
*cmd ^= IPC_64;
return IPC_64;
} else {
return IPC_OLD;
}
}
#endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */
#ifdef CONFIG_PROC_FS
struct ipc_proc_iter {
struct ipc_namespace *ns;
struct ipc_proc_iface *iface;
};
/*
* This routine locks the ipc structure found at least at position pos.
*/
static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
loff_t *new_pos)
{
struct kern_ipc_perm *ipc;
int total, id;
total = 0;
for (id = 0; id < pos && total < ids->in_use; id++) {
ipc = idr_find(&ids->ipcs_idr, id);
if (ipc != NULL)
total++;
}
if (total >= ids->in_use)
return NULL;
for ( ; pos < IPCMNI; pos++) {
ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) {
*new_pos = pos + 1;
ipc_lock_by_ptr(ipc);
return ipc;
}
}
/* Out of range - return NULL to terminate iteration */
return NULL;
}
static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct kern_ipc_perm *ipc = it;
/* If we had an ipc id locked before, unlock it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
}
/*
* File positions: pos 0 -> header, pos n -> ipc id = n - 1.
* SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
*/
static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
ids = &iter->ns->ids[iface->ids];
/*
* Take the lock - this will be released by the corresponding
* call to stop().
*/
down_read(&ids->rw_mutex);
/* pos < 0 is invalid */
if (*pos < 0)
return NULL;
/* pos == 0 means header */
if (*pos == 0)
return SEQ_START_TOKEN;
/* Find the (pos-1)th ipc */
return sysvipc_find_ipc(ids, *pos - 1, pos);
}
static void sysvipc_proc_stop(struct seq_file *s, void *it)
{
struct kern_ipc_perm *ipc = it;
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
/* If we had a locked structure, release it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
ids = &iter->ns->ids[iface->ids];
/* Release the lock we took in start() */
up_read(&ids->rw_mutex);
}
static int sysvipc_proc_show(struct seq_file *s, void *it)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
if (it == SEQ_START_TOKEN)
return seq_puts(s, iface->header);
return iface->show(s, it);
}
static const struct seq_operations sysvipc_proc_seqops = {
.start = sysvipc_proc_start,
.stop = sysvipc_proc_stop,
.next = sysvipc_proc_next,
.show = sysvipc_proc_show,
};
static int sysvipc_proc_open(struct inode *inode, struct file *file)
{
int ret;
struct seq_file *seq;
struct ipc_proc_iter *iter;
ret = -ENOMEM;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
goto out;
ret = seq_open(file, &sysvipc_proc_seqops);
if (ret)
goto out_kfree;
seq = file->private_data;
seq->private = iter;
iter->iface = PDE_DATA(inode);
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
out:
return ret;
out_kfree:
kfree(iter);
goto out;
}
static int sysvipc_proc_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct ipc_proc_iter *iter = seq->private;
put_ipc_ns(iter->ns);
return seq_release_private(inode, file);
}
static const struct file_operations sysvipc_proc_fops = {
.open = sysvipc_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sysvipc_proc_release,
};
#endif /* CONFIG_PROC_FS */