mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 12:04:46 +08:00
f36f8c75ae
Add support for per-user_namespace registers of persistent per-UID kerberos caches held within the kernel. This allows the kerberos cache to be retained beyond the life of all a user's processes so that the user's cron jobs can work. The kerberos cache is envisioned as a keyring/key tree looking something like: struct user_namespace \___ .krb_cache keyring - The register \___ _krb.0 keyring - Root's Kerberos cache \___ _krb.5000 keyring - User 5000's Kerberos cache \___ _krb.5001 keyring - User 5001's Kerberos cache \___ tkt785 big_key - A ccache blob \___ tkt12345 big_key - Another ccache blob Or possibly: struct user_namespace \___ .krb_cache keyring - The register \___ _krb.0 keyring - Root's Kerberos cache \___ _krb.5000 keyring - User 5000's Kerberos cache \___ _krb.5001 keyring - User 5001's Kerberos cache \___ tkt785 keyring - A ccache \___ krbtgt/REDHAT.COM@REDHAT.COM big_key \___ http/REDHAT.COM@REDHAT.COM user \___ afs/REDHAT.COM@REDHAT.COM user \___ nfs/REDHAT.COM@REDHAT.COM user \___ krbtgt/KERNEL.ORG@KERNEL.ORG big_key \___ http/KERNEL.ORG@KERNEL.ORG big_key What goes into a particular Kerberos cache is entirely up to userspace. Kernel support is limited to giving you the Kerberos cache keyring that you want. The user asks for their Kerberos cache by: krb_cache = keyctl_get_krbcache(uid, dest_keyring); The uid is -1 or the user's own UID for the user's own cache or the uid of some other user's cache (requires CAP_SETUID). This permits rpc.gssd or whatever to mess with the cache. The cache returned is a keyring named "_krb.<uid>" that the possessor can read, search, clear, invalidate, unlink from and add links to. Active LSMs get a chance to rule on whether the caller is permitted to make a link. Each uid's cache keyring is created when it first accessed and is given a timeout that is extended each time this function is called so that the keyring goes away after a while. The timeout is configurable by sysctl but defaults to three days. Each user_namespace struct gets a lazily-created keyring that serves as the register. The cache keyrings are added to it. This means that standard key search and garbage collection facilities are available. The user_namespace struct's register goes away when it does and anything left in it is then automatically gc'd. Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Simo Sorce <simo@redhat.com> cc: Serge E. Hallyn <serge.hallyn@ubuntu.com> cc: Eric W. Biederman <ebiederm@xmission.com>
227 lines
5.3 KiB
C
227 lines
5.3 KiB
C
/*
|
|
* The "user cache".
|
|
*
|
|
* (C) Copyright 1991-2000 Linus Torvalds
|
|
*
|
|
* We have a per-user structure to keep track of how many
|
|
* processes, files etc the user has claimed, in order to be
|
|
* able to have per-user limits for system resources.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/key.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/export.h>
|
|
#include <linux/user_namespace.h>
|
|
#include <linux/proc_ns.h>
|
|
|
|
/*
|
|
* userns count is 1 for root user, 1 for init_uts_ns,
|
|
* and 1 for... ?
|
|
*/
|
|
struct user_namespace init_user_ns = {
|
|
.uid_map = {
|
|
.nr_extents = 1,
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
.gid_map = {
|
|
.nr_extents = 1,
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
.projid_map = {
|
|
.nr_extents = 1,
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
.count = ATOMIC_INIT(3),
|
|
.owner = GLOBAL_ROOT_UID,
|
|
.group = GLOBAL_ROOT_GID,
|
|
.proc_inum = PROC_USER_INIT_INO,
|
|
#ifdef CONFIG_KEYS_KERBEROS_CACHE
|
|
.krb_cache_register_sem =
|
|
__RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem),
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL_GPL(init_user_ns);
|
|
|
|
/*
|
|
* UID task count cache, to get fast user lookup in "alloc_uid"
|
|
* when changing user ID's (ie setuid() and friends).
|
|
*/
|
|
|
|
#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
|
|
#define UIDHASH_SZ (1 << UIDHASH_BITS)
|
|
#define UIDHASH_MASK (UIDHASH_SZ - 1)
|
|
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
|
|
#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
|
|
|
|
static struct kmem_cache *uid_cachep;
|
|
struct hlist_head uidhash_table[UIDHASH_SZ];
|
|
|
|
/*
|
|
* The uidhash_lock is mostly taken from process context, but it is
|
|
* occasionally also taken from softirq/tasklet context, when
|
|
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
|
|
* But free_uid() is also called with local interrupts disabled, and running
|
|
* local_bh_enable() with local interrupts disabled is an error - we'll run
|
|
* softirq callbacks, and they can unconditionally enable interrupts, and
|
|
* the caller of free_uid() didn't expect that..
|
|
*/
|
|
static DEFINE_SPINLOCK(uidhash_lock);
|
|
|
|
/* root_user.__count is 1, for init task cred */
|
|
struct user_struct root_user = {
|
|
.__count = ATOMIC_INIT(1),
|
|
.processes = ATOMIC_INIT(1),
|
|
.files = ATOMIC_INIT(0),
|
|
.sigpending = ATOMIC_INIT(0),
|
|
.locked_shm = 0,
|
|
.uid = GLOBAL_ROOT_UID,
|
|
};
|
|
|
|
/*
|
|
* These routines must be called with the uidhash spinlock held!
|
|
*/
|
|
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
|
|
{
|
|
hlist_add_head(&up->uidhash_node, hashent);
|
|
}
|
|
|
|
static void uid_hash_remove(struct user_struct *up)
|
|
{
|
|
hlist_del_init(&up->uidhash_node);
|
|
}
|
|
|
|
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
|
|
{
|
|
struct user_struct *user;
|
|
|
|
hlist_for_each_entry(user, hashent, uidhash_node) {
|
|
if (uid_eq(user->uid, uid)) {
|
|
atomic_inc(&user->__count);
|
|
return user;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* IRQs are disabled and uidhash_lock is held upon function entry.
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
|
* upon function exit.
|
|
*/
|
|
static void free_user(struct user_struct *up, unsigned long flags)
|
|
__releases(&uidhash_lock)
|
|
{
|
|
uid_hash_remove(up);
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
key_put(up->uid_keyring);
|
|
key_put(up->session_keyring);
|
|
kmem_cache_free(uid_cachep, up);
|
|
}
|
|
|
|
/*
|
|
* Locate the user_struct for the passed UID. If found, take a ref on it. The
|
|
* caller must undo that ref with free_uid().
|
|
*
|
|
* If the user_struct could not be found, return NULL.
|
|
*/
|
|
struct user_struct *find_user(kuid_t uid)
|
|
{
|
|
struct user_struct *ret;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&uidhash_lock, flags);
|
|
ret = uid_hash_find(uid, uidhashentry(uid));
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
void free_uid(struct user_struct *up)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (!up)
|
|
return;
|
|
|
|
local_irq_save(flags);
|
|
if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
|
|
free_user(up, flags);
|
|
else
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
struct user_struct *alloc_uid(kuid_t uid)
|
|
{
|
|
struct hlist_head *hashent = uidhashentry(uid);
|
|
struct user_struct *up, *new;
|
|
|
|
spin_lock_irq(&uidhash_lock);
|
|
up = uid_hash_find(uid, hashent);
|
|
spin_unlock_irq(&uidhash_lock);
|
|
|
|
if (!up) {
|
|
new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
|
|
if (!new)
|
|
goto out_unlock;
|
|
|
|
new->uid = uid;
|
|
atomic_set(&new->__count, 1);
|
|
|
|
/*
|
|
* Before adding this, check whether we raced
|
|
* on adding the same user already..
|
|
*/
|
|
spin_lock_irq(&uidhash_lock);
|
|
up = uid_hash_find(uid, hashent);
|
|
if (up) {
|
|
key_put(new->uid_keyring);
|
|
key_put(new->session_keyring);
|
|
kmem_cache_free(uid_cachep, new);
|
|
} else {
|
|
uid_hash_insert(new, hashent);
|
|
up = new;
|
|
}
|
|
spin_unlock_irq(&uidhash_lock);
|
|
}
|
|
|
|
return up;
|
|
|
|
out_unlock:
|
|
return NULL;
|
|
}
|
|
|
|
static int __init uid_cache_init(void)
|
|
{
|
|
int n;
|
|
|
|
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
|
|
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
|
|
|
for(n = 0; n < UIDHASH_SZ; ++n)
|
|
INIT_HLIST_HEAD(uidhash_table + n);
|
|
|
|
/* Insert the root user immediately (init already runs as root) */
|
|
spin_lock_irq(&uidhash_lock);
|
|
uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
|
|
spin_unlock_irq(&uidhash_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
module_init(uid_cache_init);
|