mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 07:24:39 +08:00
c84ca912b0
-----BEGIN PGP SIGNATURE----- iQIVAwUAXRU89Pu3V2unywtrAQIdBBAAmMBsrfv+LUN4Vru/D6KdUO4zdYGcNK6m S56bcNfP6oIDEj6HrNNnzKkWIZpdZ61Odv1zle96+v4WZ/6rnLCTpcsdaFNTzaoO YT2jk7jplss0ImrMv1DSoykGqO3f0ThMIpGCxHKZADGSu0HMbjSEh+zLPV4BaMtT BVuF7P3eZtDRLdDtMtYcgvf5UlbdoBEY8w1FUjReQx8hKGxVopGmCo5vAeiY8W9S ybFSZhPS5ka33ynVrLJH2dqDo5A8pDhY8I4bdlcxmNtRhnPCYZnuvTqeAzyUKKdI YN9zJeDu1yHs9mi8dp45NPJiKy6xLzWmUwqH8AvR8MWEkrwzqbzNZCEHZ41j74hO YZWI0JXi72cboszFvOwqJERvITKxrQQyVQLPRQE2vVbG0bIZPl8i7oslFVhitsl+ evWqHb4lXY91rI9cC6JIXR1OiUjp68zXPv7DAnxv08O+PGcioU1IeOvPivx8QSx4 5aUeCkYIIAti/GISzv7xvcYh8mfO76kBjZSB35fX+R9DkeQpxsHmmpWe+UCykzWn EwhHQn86+VeBFP6RAXp8CgNCLbrwkEhjzXQl/70s1eYbwvK81VcpDAQ6+cjpf4Hb QUmrUJ9iE0wCNl7oqvJZoJvWVGlArvPmzpkTJk3N070X2R0T7x1WCsMlPDMJGhQ2 fVHvA3QdgWs= =Push -----END PGP SIGNATURE----- Merge tag 'keys-namespace-20190627' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs Pull keyring namespacing from David Howells: "These patches help make keys and keyrings more namespace aware. Firstly some miscellaneous patches to make the process easier: - Simplify key index_key handling so that the word-sized chunks assoc_array requires don't have to be shifted about, making it easier to add more bits into the key. - Cache the hash value in the key so that we don't have to calculate on every key we examine during a search (it involves a bunch of multiplications). - Allow keying_search() to search non-recursively. Then the main patches: - Make it so that keyring names are per-user_namespace from the point of view of KEYCTL_JOIN_SESSION_KEYRING so that they're not accessible cross-user_namespace. keyctl_capabilities() shows KEYCTL_CAPS1_NS_KEYRING_NAME for this. - Move the user and user-session keyrings to the user_namespace rather than the user_struct. This prevents them propagating directly across user_namespaces boundaries (ie. the KEY_SPEC_* flags will only pick from the current user_namespace). - Make it possible to include the target namespace in which the key shall operate in the index_key. This will allow the possibility of multiple keys with the same description, but different target domains to be held in the same keyring. keyctl_capabilities() shows KEYCTL_CAPS1_NS_KEY_TAG for this. - Make it so that keys are implicitly invalidated by removal of a domain tag, causing them to be garbage collected. - Institute a network namespace domain tag that allows keys to be differentiated by the network namespace in which they operate. New keys that are of a type marked 'KEY_TYPE_NET_DOMAIN' are assigned the network domain in force when they are created. - Make it so that the desired network namespace can be handed down into the request_key() mechanism. This allows AFS, NFS, etc. to request keys specific to the network namespace of the superblock. This also means that the keys in the DNS record cache are thenceforth namespaced, provided network filesystems pass the appropriate network namespace down into dns_query(). For DNS, AFS and NFS are good, whilst CIFS and Ceph are not. Other cache keyrings, such as idmapper keyrings, also need to set the domain tag - for which they need access to the network namespace of the superblock" * tag 'keys-namespace-20190627' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: keys: Pass the network namespace into request_key mechanism keys: Network namespace domain tag keys: Garbage collect keys for which the domain has been removed keys: Include target namespace in match criteria keys: Move the user and user-session keyrings to the user_namespace keys: Namespace keyring names keys: Add a 'recurse' flag for keyring searches keys: Cache the hash value to avoid lots of recalculation keys: Simplify key description management
230 lines
5.5 KiB
C
230 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* The "user cache".
|
|
*
|
|
* (C) Copyright 1991-2000 Linus Torvalds
|
|
*
|
|
* We have a per-user structure to keep track of how many
|
|
* processes, files etc the user has claimed, in order to be
|
|
* able to have per-user limits for system resources.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/key.h>
|
|
#include <linux/sched/user.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/export.h>
|
|
#include <linux/user_namespace.h>
|
|
#include <linux/proc_ns.h>
|
|
|
|
/*
|
|
* userns count is 1 for root user, 1 for init_uts_ns,
|
|
* and 1 for... ?
|
|
*/
|
|
struct user_namespace init_user_ns = {
|
|
.uid_map = {
|
|
.nr_extents = 1,
|
|
{
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
},
|
|
.gid_map = {
|
|
.nr_extents = 1,
|
|
{
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
},
|
|
.projid_map = {
|
|
.nr_extents = 1,
|
|
{
|
|
.extent[0] = {
|
|
.first = 0,
|
|
.lower_first = 0,
|
|
.count = 4294967295U,
|
|
},
|
|
},
|
|
},
|
|
.count = ATOMIC_INIT(3),
|
|
.owner = GLOBAL_ROOT_UID,
|
|
.group = GLOBAL_ROOT_GID,
|
|
.ns.inum = PROC_USER_INIT_INO,
|
|
#ifdef CONFIG_USER_NS
|
|
.ns.ops = &userns_operations,
|
|
#endif
|
|
.flags = USERNS_INIT_FLAGS,
|
|
#ifdef CONFIG_KEYS
|
|
.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
|
|
.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL_GPL(init_user_ns);
|
|
|
|
/*
|
|
* UID task count cache, to get fast user lookup in "alloc_uid"
|
|
* when changing user ID's (ie setuid() and friends).
|
|
*/
|
|
|
|
#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
|
|
#define UIDHASH_SZ (1 << UIDHASH_BITS)
|
|
#define UIDHASH_MASK (UIDHASH_SZ - 1)
|
|
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
|
|
#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
|
|
|
|
static struct kmem_cache *uid_cachep;
|
|
struct hlist_head uidhash_table[UIDHASH_SZ];
|
|
|
|
/*
|
|
* The uidhash_lock is mostly taken from process context, but it is
|
|
* occasionally also taken from softirq/tasklet context, when
|
|
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
|
|
* But free_uid() is also called with local interrupts disabled, and running
|
|
* local_bh_enable() with local interrupts disabled is an error - we'll run
|
|
* softirq callbacks, and they can unconditionally enable interrupts, and
|
|
* the caller of free_uid() didn't expect that..
|
|
*/
|
|
static DEFINE_SPINLOCK(uidhash_lock);
|
|
|
|
/* root_user.__count is 1, for init task cred */
|
|
struct user_struct root_user = {
|
|
.__count = REFCOUNT_INIT(1),
|
|
.processes = ATOMIC_INIT(1),
|
|
.sigpending = ATOMIC_INIT(0),
|
|
.locked_shm = 0,
|
|
.uid = GLOBAL_ROOT_UID,
|
|
.ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
|
|
};
|
|
|
|
/*
|
|
* These routines must be called with the uidhash spinlock held!
|
|
*/
|
|
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
|
|
{
|
|
hlist_add_head(&up->uidhash_node, hashent);
|
|
}
|
|
|
|
static void uid_hash_remove(struct user_struct *up)
|
|
{
|
|
hlist_del_init(&up->uidhash_node);
|
|
}
|
|
|
|
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
|
|
{
|
|
struct user_struct *user;
|
|
|
|
hlist_for_each_entry(user, hashent, uidhash_node) {
|
|
if (uid_eq(user->uid, uid)) {
|
|
refcount_inc(&user->__count);
|
|
return user;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* IRQs are disabled and uidhash_lock is held upon function entry.
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
|
* upon function exit.
|
|
*/
|
|
static void free_user(struct user_struct *up, unsigned long flags)
|
|
__releases(&uidhash_lock)
|
|
{
|
|
uid_hash_remove(up);
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
kmem_cache_free(uid_cachep, up);
|
|
}
|
|
|
|
/*
|
|
* Locate the user_struct for the passed UID. If found, take a ref on it. The
|
|
* caller must undo that ref with free_uid().
|
|
*
|
|
* If the user_struct could not be found, return NULL.
|
|
*/
|
|
struct user_struct *find_user(kuid_t uid)
|
|
{
|
|
struct user_struct *ret;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&uidhash_lock, flags);
|
|
ret = uid_hash_find(uid, uidhashentry(uid));
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
void free_uid(struct user_struct *up)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (!up)
|
|
return;
|
|
|
|
if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
|
|
free_user(up, flags);
|
|
}
|
|
|
|
struct user_struct *alloc_uid(kuid_t uid)
|
|
{
|
|
struct hlist_head *hashent = uidhashentry(uid);
|
|
struct user_struct *up, *new;
|
|
|
|
spin_lock_irq(&uidhash_lock);
|
|
up = uid_hash_find(uid, hashent);
|
|
spin_unlock_irq(&uidhash_lock);
|
|
|
|
if (!up) {
|
|
new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
|
|
if (!new)
|
|
return NULL;
|
|
|
|
new->uid = uid;
|
|
refcount_set(&new->__count, 1);
|
|
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
|
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
|
|
|
/*
|
|
* Before adding this, check whether we raced
|
|
* on adding the same user already..
|
|
*/
|
|
spin_lock_irq(&uidhash_lock);
|
|
up = uid_hash_find(uid, hashent);
|
|
if (up) {
|
|
kmem_cache_free(uid_cachep, new);
|
|
} else {
|
|
uid_hash_insert(new, hashent);
|
|
up = new;
|
|
}
|
|
spin_unlock_irq(&uidhash_lock);
|
|
}
|
|
|
|
return up;
|
|
}
|
|
|
|
static int __init uid_cache_init(void)
|
|
{
|
|
int n;
|
|
|
|
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
|
|
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
|
|
|
for(n = 0; n < UIDHASH_SZ; ++n)
|
|
INIT_HLIST_HEAD(uidhash_table + n);
|
|
|
|
/* Insert the root user immediately (init already runs as root) */
|
|
spin_lock_irq(&uidhash_lock);
|
|
uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
|
|
spin_unlock_irq(&uidhash_lock);
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(uid_cache_init);
|