Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * The "user cache".
  3 *
  4 * (C) Copyright 1991-2000 Linus Torvalds
  5 *
  6 * We have a per-user structure to keep track of how many
  7 * processes, files etc the user has claimed, in order to be
  8 * able to have per-user limits for system resources. 
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/bitops.h>
 15#include <linux/key.h>
 
 16#include <linux/interrupt.h>
 17#include <linux/export.h>
 18#include <linux/user_namespace.h>
 
 
 
 
 
 
 
 
 
 
 
 19
 20/*
 21 * userns count is 1 for root user, 1 for init_uts_ns,
 22 * and 1 for... ?
 23 */
 24struct user_namespace init_user_ns = {
 25	.uid_map = {
 26		.nr_extents = 1,
 27		.extent[0] = {
 28			.first = 0,
 29			.lower_first = 0,
 30			.count = 4294967295U,
 
 
 31		},
 32	},
 33	.gid_map = {
 34		.nr_extents = 1,
 35		.extent[0] = {
 36			.first = 0,
 37			.lower_first = 0,
 38			.count = 4294967295U,
 
 
 39		},
 40	},
 41	.kref = {
 42		.refcount	= ATOMIC_INIT(3),
 
 
 
 
 
 
 
 43	},
 
 44	.owner = GLOBAL_ROOT_UID,
 45	.group = GLOBAL_ROOT_GID,
 
 
 
 
 
 
 
 
 
 
 
 
 46};
 47EXPORT_SYMBOL_GPL(init_user_ns);
 48
 49/*
 50 * UID task count cache, to get fast user lookup in "alloc_uid"
 51 * when changing user ID's (ie setuid() and friends).
 52 */
 53
 54#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 55#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 56#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 57#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 58#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 59
 60static struct kmem_cache *uid_cachep;
 61struct hlist_head uidhash_table[UIDHASH_SZ];
 62
 63/*
 64 * The uidhash_lock is mostly taken from process context, but it is
 65 * occasionally also taken from softirq/tasklet context, when
 66 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 67 * But free_uid() is also called with local interrupts disabled, and running
 68 * local_bh_enable() with local interrupts disabled is an error - we'll run
 69 * softirq callbacks, and they can unconditionally enable interrupts, and
 70 * the caller of free_uid() didn't expect that..
 71 */
 72static DEFINE_SPINLOCK(uidhash_lock);
 73
 74/* root_user.__count is 1, for init task cred */
 75struct user_struct root_user = {
 76	.__count	= ATOMIC_INIT(1),
 77	.processes	= ATOMIC_INIT(1),
 78	.files		= ATOMIC_INIT(0),
 79	.sigpending	= ATOMIC_INIT(0),
 80	.locked_shm     = 0,
 81	.uid		= GLOBAL_ROOT_UID,
 
 82};
 83
 84/*
 85 * These routines must be called with the uidhash spinlock held!
 86 */
 87static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
 88{
 89	hlist_add_head(&up->uidhash_node, hashent);
 90}
 91
 92static void uid_hash_remove(struct user_struct *up)
 93{
 94	hlist_del_init(&up->uidhash_node);
 95}
 96
 97static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
 98{
 99	struct user_struct *user;
100	struct hlist_node *h;
101
102	hlist_for_each_entry(user, h, hashent, uidhash_node) {
103		if (uid_eq(user->uid, uid)) {
104			atomic_inc(&user->__count);
105			return user;
106		}
107	}
108
109	return NULL;
110}
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112/* IRQs are disabled and uidhash_lock is held upon function entry.
113 * IRQ state (as stored in flags) is restored and uidhash_lock released
114 * upon function exit.
115 */
116static void free_user(struct user_struct *up, unsigned long flags)
117	__releases(&uidhash_lock)
118{
119	uid_hash_remove(up);
120	spin_unlock_irqrestore(&uidhash_lock, flags);
121	key_put(up->uid_keyring);
122	key_put(up->session_keyring);
123	kmem_cache_free(uid_cachep, up);
124}
125
126/*
127 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
128 * caller must undo that ref with free_uid().
129 *
130 * If the user_struct could not be found, return NULL.
131 */
132struct user_struct *find_user(kuid_t uid)
133{
134	struct user_struct *ret;
135	unsigned long flags;
136
137	spin_lock_irqsave(&uidhash_lock, flags);
138	ret = uid_hash_find(uid, uidhashentry(uid));
139	spin_unlock_irqrestore(&uidhash_lock, flags);
140	return ret;
141}
142
143void free_uid(struct user_struct *up)
144{
145	unsigned long flags;
146
147	if (!up)
148		return;
149
150	local_irq_save(flags);
151	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
152		free_user(up, flags);
153	else
154		local_irq_restore(flags);
155}
 
156
157struct user_struct *alloc_uid(kuid_t uid)
158{
159	struct hlist_head *hashent = uidhashentry(uid);
160	struct user_struct *up, *new;
161
162	spin_lock_irq(&uidhash_lock);
163	up = uid_hash_find(uid, hashent);
164	spin_unlock_irq(&uidhash_lock);
165
166	if (!up) {
167		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
168		if (!new)
169			goto out_unlock;
170
171		new->uid = uid;
172		atomic_set(&new->__count, 1);
 
 
 
 
 
 
173
174		/*
175		 * Before adding this, check whether we raced
176		 * on adding the same user already..
177		 */
178		spin_lock_irq(&uidhash_lock);
179		up = uid_hash_find(uid, hashent);
180		if (up) {
181			key_put(new->uid_keyring);
182			key_put(new->session_keyring);
183			kmem_cache_free(uid_cachep, new);
184		} else {
185			uid_hash_insert(new, hashent);
186			up = new;
187		}
188		spin_unlock_irq(&uidhash_lock);
189	}
190
191	return up;
192
193out_unlock:
194	return NULL;
195}
196
197static int __init uid_cache_init(void)
198{
199	int n;
200
201	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
202			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
203
204	for(n = 0; n < UIDHASH_SZ; ++n)
205		INIT_HLIST_HEAD(uidhash_table + n);
206
 
 
 
207	/* Insert the root user immediately (init already runs as root) */
208	spin_lock_irq(&uidhash_lock);
209	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
210	spin_unlock_irq(&uidhash_lock);
211
212	return 0;
213}
214
215module_init(uid_cache_init);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * The "user cache".
  4 *
  5 * (C) Copyright 1991-2000 Linus Torvalds
  6 *
  7 * We have a per-user structure to keep track of how many
  8 * processes, files etc the user has claimed, in order to be
  9 * able to have per-user limits for system resources. 
 10 */
 11
 12#include <linux/init.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/bitops.h>
 16#include <linux/key.h>
 17#include <linux/sched/user.h>
 18#include <linux/interrupt.h>
 19#include <linux/export.h>
 20#include <linux/user_namespace.h>
 21#include <linux/binfmts.h>
 22#include <linux/proc_ns.h>
 23
 24#if IS_ENABLED(CONFIG_BINFMT_MISC)
 25struct binfmt_misc init_binfmt_misc = {
 26	.entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
 27	.enabled = true,
 28	.entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
 29};
 30EXPORT_SYMBOL_GPL(init_binfmt_misc);
 31#endif
 32
 33/*
 34 * userns count is 1 for root user, 1 for init_uts_ns,
 35 * and 1 for... ?
 36 */
 37struct user_namespace init_user_ns = {
 38	.uid_map = {
 39		{
 40			.extent[0] = {
 41				.first = 0,
 42				.lower_first = 0,
 43				.count = 4294967295U,
 44			},
 45			.nr_extents = 1,
 46		},
 47	},
 48	.gid_map = {
 49		{
 50			.extent[0] = {
 51				.first = 0,
 52				.lower_first = 0,
 53				.count = 4294967295U,
 54			},
 55			.nr_extents = 1,
 56		},
 57	},
 58	.projid_map = {
 59		{
 60			.extent[0] = {
 61				.first = 0,
 62				.lower_first = 0,
 63				.count = 4294967295U,
 64			},
 65			.nr_extents = 1,
 66		},
 67	},
 68	.ns.count = REFCOUNT_INIT(3),
 69	.owner = GLOBAL_ROOT_UID,
 70	.group = GLOBAL_ROOT_GID,
 71	.ns.inum = PROC_USER_INIT_INO,
 72#ifdef CONFIG_USER_NS
 73	.ns.ops = &userns_operations,
 74#endif
 75	.flags = USERNS_INIT_FLAGS,
 76#ifdef CONFIG_KEYS
 77	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
 78	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
 79#endif
 80#if IS_ENABLED(CONFIG_BINFMT_MISC)
 81	.binfmt_misc = &init_binfmt_misc,
 82#endif
 83};
 84EXPORT_SYMBOL_GPL(init_user_ns);
 85
 86/*
 87 * UID task count cache, to get fast user lookup in "alloc_uid"
 88 * when changing user ID's (ie setuid() and friends).
 89 */
 90
 91#define UIDHASH_BITS	(IS_ENABLED(CONFIG_BASE_SMALL) ? 3 : 7)
 92#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 93#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 94#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 95#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 96
 97static struct kmem_cache *uid_cachep;
 98static struct hlist_head uidhash_table[UIDHASH_SZ];
 99
100/*
101 * The uidhash_lock is mostly taken from process context, but it is
102 * occasionally also taken from softirq/tasklet context, when
103 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
104 * But free_uid() is also called with local interrupts disabled, and running
105 * local_bh_enable() with local interrupts disabled is an error - we'll run
106 * softirq callbacks, and they can unconditionally enable interrupts, and
107 * the caller of free_uid() didn't expect that..
108 */
109static DEFINE_SPINLOCK(uidhash_lock);
110
111/* root_user.__count is 1, for init task cred */
112struct user_struct root_user = {
113	.__count	= REFCOUNT_INIT(1),
 
 
 
 
114	.uid		= GLOBAL_ROOT_UID,
115	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
116};
117
118/*
119 * These routines must be called with the uidhash spinlock held!
120 */
121static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
122{
123	hlist_add_head(&up->uidhash_node, hashent);
124}
125
126static void uid_hash_remove(struct user_struct *up)
127{
128	hlist_del_init(&up->uidhash_node);
129}
130
131static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
132{
133	struct user_struct *user;
 
134
135	hlist_for_each_entry(user, hashent, uidhash_node) {
136		if (uid_eq(user->uid, uid)) {
137			refcount_inc(&user->__count);
138			return user;
139		}
140	}
141
142	return NULL;
143}
144
145static int user_epoll_alloc(struct user_struct *up)
146{
147#ifdef CONFIG_EPOLL
148	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
149#else
150	return 0;
151#endif
152}
153
154static void user_epoll_free(struct user_struct *up)
155{
156#ifdef CONFIG_EPOLL
157	percpu_counter_destroy(&up->epoll_watches);
158#endif
159}
160
161/* IRQs are disabled and uidhash_lock is held upon function entry.
162 * IRQ state (as stored in flags) is restored and uidhash_lock released
163 * upon function exit.
164 */
165static void free_user(struct user_struct *up, unsigned long flags)
166	__releases(&uidhash_lock)
167{
168	uid_hash_remove(up);
169	spin_unlock_irqrestore(&uidhash_lock, flags);
170	user_epoll_free(up);
 
171	kmem_cache_free(uid_cachep, up);
172}
173
174/*
175 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
176 * caller must undo that ref with free_uid().
177 *
178 * If the user_struct could not be found, return NULL.
179 */
180struct user_struct *find_user(kuid_t uid)
181{
182	struct user_struct *ret;
183	unsigned long flags;
184
185	spin_lock_irqsave(&uidhash_lock, flags);
186	ret = uid_hash_find(uid, uidhashentry(uid));
187	spin_unlock_irqrestore(&uidhash_lock, flags);
188	return ret;
189}
190
191void free_uid(struct user_struct *up)
192{
193	unsigned long flags;
194
195	if (!up)
196		return;
197
198	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
 
199		free_user(up, flags);
 
 
200}
201EXPORT_SYMBOL_GPL(free_uid);
202
203struct user_struct *alloc_uid(kuid_t uid)
204{
205	struct hlist_head *hashent = uidhashentry(uid);
206	struct user_struct *up, *new;
207
208	spin_lock_irq(&uidhash_lock);
209	up = uid_hash_find(uid, hashent);
210	spin_unlock_irq(&uidhash_lock);
211
212	if (!up) {
213		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
214		if (!new)
215			return NULL;
216
217		new->uid = uid;
218		refcount_set(&new->__count, 1);
219		if (user_epoll_alloc(new)) {
220			kmem_cache_free(uid_cachep, new);
221			return NULL;
222		}
223		ratelimit_state_init(&new->ratelimit, HZ, 100);
224		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
225
226		/*
227		 * Before adding this, check whether we raced
228		 * on adding the same user already..
229		 */
230		spin_lock_irq(&uidhash_lock);
231		up = uid_hash_find(uid, hashent);
232		if (up) {
233			user_epoll_free(new);
 
234			kmem_cache_free(uid_cachep, new);
235		} else {
236			uid_hash_insert(new, hashent);
237			up = new;
238		}
239		spin_unlock_irq(&uidhash_lock);
240	}
241
242	return up;
 
 
 
243}
244
245static int __init uid_cache_init(void)
246{
247	int n;
248
249	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
250			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
251
252	for(n = 0; n < UIDHASH_SZ; ++n)
253		INIT_HLIST_HEAD(uidhash_table + n);
254
255	if (user_epoll_alloc(&root_user))
256		panic("root_user epoll percpu counter alloc failed");
257
258	/* Insert the root user immediately (init already runs as root) */
259	spin_lock_irq(&uidhash_lock);
260	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
261	spin_unlock_irq(&uidhash_lock);
262
263	return 0;
264}
265subsys_initcall(uid_cache_init);