Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * The "user cache".
  4 *
  5 * (C) Copyright 1991-2000 Linus Torvalds
  6 *
  7 * We have a per-user structure to keep track of how many
  8 * processes, files etc the user has claimed, in order to be
  9 * able to have per-user limits for system resources. 
 10 */
 11
 12#include <linux/init.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/bitops.h>
 16#include <linux/key.h>
 17#include <linux/sched/user.h>
 18#include <linux/interrupt.h>
 19#include <linux/export.h>
 20#include <linux/user_namespace.h>
 21#include <linux/proc_ns.h>
 22
 23/*
 24 * userns count is 1 for root user, 1 for init_uts_ns,
 25 * and 1 for... ?
 26 */
 27struct user_namespace init_user_ns = {
 28	.uid_map = {
 29		.nr_extents = 1,
 30		{
 31			.extent[0] = {
 32				.first = 0,
 33				.lower_first = 0,
 34				.count = 4294967295U,
 35			},
 36		},
 37	},
 38	.gid_map = {
 39		.nr_extents = 1,
 40		{
 41			.extent[0] = {
 42				.first = 0,
 43				.lower_first = 0,
 44				.count = 4294967295U,
 45			},
 46		},
 47	},
 48	.projid_map = {
 49		.nr_extents = 1,
 50		{
 51			.extent[0] = {
 52				.first = 0,
 53				.lower_first = 0,
 54				.count = 4294967295U,
 55			},
 56		},
 57	},
 58	.ns.count = REFCOUNT_INIT(3),
 59	.owner = GLOBAL_ROOT_UID,
 60	.group = GLOBAL_ROOT_GID,
 61	.ns.inum = PROC_USER_INIT_INO,
 62#ifdef CONFIG_USER_NS
 63	.ns.ops = &userns_operations,
 64#endif
 65	.flags = USERNS_INIT_FLAGS,
 66#ifdef CONFIG_KEYS
 67	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
 68	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
 69#endif
 70};
 71EXPORT_SYMBOL_GPL(init_user_ns);
 72
 73/*
 74 * UID task count cache, to get fast user lookup in "alloc_uid"
 75 * when changing user ID's (ie setuid() and friends).
 76 */
 77
 78#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 79#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 80#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 81#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 82#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 83
 84static struct kmem_cache *uid_cachep;
 85static struct hlist_head uidhash_table[UIDHASH_SZ];
 86
 87/*
 88 * The uidhash_lock is mostly taken from process context, but it is
 89 * occasionally also taken from softirq/tasklet context, when
 90 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 91 * But free_uid() is also called with local interrupts disabled, and running
 92 * local_bh_enable() with local interrupts disabled is an error - we'll run
 93 * softirq callbacks, and they can unconditionally enable interrupts, and
 94 * the caller of free_uid() didn't expect that..
 95 */
 96static DEFINE_SPINLOCK(uidhash_lock);
 97
 98/* root_user.__count is 1, for init task cred */
 99struct user_struct root_user = {
100	.__count	= REFCOUNT_INIT(1),
 
 
 
101	.uid		= GLOBAL_ROOT_UID,
102	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
103};
104
105/*
106 * These routines must be called with the uidhash spinlock held!
107 */
108static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
109{
110	hlist_add_head(&up->uidhash_node, hashent);
111}
112
113static void uid_hash_remove(struct user_struct *up)
114{
115	hlist_del_init(&up->uidhash_node);
116}
117
118static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
119{
120	struct user_struct *user;
121
122	hlist_for_each_entry(user, hashent, uidhash_node) {
123		if (uid_eq(user->uid, uid)) {
124			refcount_inc(&user->__count);
125			return user;
126		}
127	}
128
129	return NULL;
130}
131
132static int user_epoll_alloc(struct user_struct *up)
133{
134#ifdef CONFIG_EPOLL
135	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
136#else
137	return 0;
138#endif
139}
140
141static void user_epoll_free(struct user_struct *up)
142{
143#ifdef CONFIG_EPOLL
144	percpu_counter_destroy(&up->epoll_watches);
145#endif
146}
147
148/* IRQs are disabled and uidhash_lock is held upon function entry.
149 * IRQ state (as stored in flags) is restored and uidhash_lock released
150 * upon function exit.
151 */
152static void free_user(struct user_struct *up, unsigned long flags)
153	__releases(&uidhash_lock)
154{
155	uid_hash_remove(up);
156	spin_unlock_irqrestore(&uidhash_lock, flags);
157	user_epoll_free(up);
 
158	kmem_cache_free(uid_cachep, up);
159}
160
161/*
162 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
163 * caller must undo that ref with free_uid().
164 *
165 * If the user_struct could not be found, return NULL.
166 */
167struct user_struct *find_user(kuid_t uid)
168{
169	struct user_struct *ret;
170	unsigned long flags;
171
172	spin_lock_irqsave(&uidhash_lock, flags);
173	ret = uid_hash_find(uid, uidhashentry(uid));
174	spin_unlock_irqrestore(&uidhash_lock, flags);
175	return ret;
176}
177
178void free_uid(struct user_struct *up)
179{
180	unsigned long flags;
181
182	if (!up)
183		return;
184
185	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
 
186		free_user(up, flags);
 
 
187}
188EXPORT_SYMBOL_GPL(free_uid);
189
190struct user_struct *alloc_uid(kuid_t uid)
191{
192	struct hlist_head *hashent = uidhashentry(uid);
193	struct user_struct *up, *new;
194
195	spin_lock_irq(&uidhash_lock);
196	up = uid_hash_find(uid, hashent);
197	spin_unlock_irq(&uidhash_lock);
198
199	if (!up) {
200		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
201		if (!new)
202			return NULL;
203
204		new->uid = uid;
205		refcount_set(&new->__count, 1);
206		if (user_epoll_alloc(new)) {
207			kmem_cache_free(uid_cachep, new);
208			return NULL;
209		}
210		ratelimit_state_init(&new->ratelimit, HZ, 100);
211		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
212
213		/*
214		 * Before adding this, check whether we raced
215		 * on adding the same user already..
216		 */
217		spin_lock_irq(&uidhash_lock);
218		up = uid_hash_find(uid, hashent);
219		if (up) {
220			user_epoll_free(new);
 
221			kmem_cache_free(uid_cachep, new);
222		} else {
223			uid_hash_insert(new, hashent);
224			up = new;
225		}
226		spin_unlock_irq(&uidhash_lock);
227	}
228
229	return up;
 
 
 
230}
231
232static int __init uid_cache_init(void)
233{
234	int n;
235
236	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
237			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
238
239	for(n = 0; n < UIDHASH_SZ; ++n)
240		INIT_HLIST_HEAD(uidhash_table + n);
241
242	if (user_epoll_alloc(&root_user))
243		panic("root_user epoll percpu counter alloc failed");
244
245	/* Insert the root user immediately (init already runs as root) */
246	spin_lock_irq(&uidhash_lock);
247	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
248	spin_unlock_irq(&uidhash_lock);
249
250	return 0;
251}
252subsys_initcall(uid_cache_init);
v4.17
 
  1/*
  2 * The "user cache".
  3 *
  4 * (C) Copyright 1991-2000 Linus Torvalds
  5 *
  6 * We have a per-user structure to keep track of how many
  7 * processes, files etc the user has claimed, in order to be
  8 * able to have per-user limits for system resources. 
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/bitops.h>
 15#include <linux/key.h>
 16#include <linux/sched/user.h>
 17#include <linux/interrupt.h>
 18#include <linux/export.h>
 19#include <linux/user_namespace.h>
 20#include <linux/proc_ns.h>
 21
 22/*
 23 * userns count is 1 for root user, 1 for init_uts_ns,
 24 * and 1 for... ?
 25 */
 26struct user_namespace init_user_ns = {
 27	.uid_map = {
 28		.nr_extents = 1,
 29		{
 30			.extent[0] = {
 31				.first = 0,
 32				.lower_first = 0,
 33				.count = 4294967295U,
 34			},
 35		},
 36	},
 37	.gid_map = {
 38		.nr_extents = 1,
 39		{
 40			.extent[0] = {
 41				.first = 0,
 42				.lower_first = 0,
 43				.count = 4294967295U,
 44			},
 45		},
 46	},
 47	.projid_map = {
 48		.nr_extents = 1,
 49		{
 50			.extent[0] = {
 51				.first = 0,
 52				.lower_first = 0,
 53				.count = 4294967295U,
 54			},
 55		},
 56	},
 57	.count = ATOMIC_INIT(3),
 58	.owner = GLOBAL_ROOT_UID,
 59	.group = GLOBAL_ROOT_GID,
 60	.ns.inum = PROC_USER_INIT_INO,
 61#ifdef CONFIG_USER_NS
 62	.ns.ops = &userns_operations,
 63#endif
 64	.flags = USERNS_INIT_FLAGS,
 65#ifdef CONFIG_PERSISTENT_KEYRINGS
 66	.persistent_keyring_register_sem =
 67	__RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
 68#endif
 69};
 70EXPORT_SYMBOL_GPL(init_user_ns);
 71
 72/*
 73 * UID task count cache, to get fast user lookup in "alloc_uid"
 74 * when changing user ID's (ie setuid() and friends).
 75 */
 76
 77#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 78#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 79#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 80#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 81#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 82
 83static struct kmem_cache *uid_cachep;
 84struct hlist_head uidhash_table[UIDHASH_SZ];
 85
 86/*
 87 * The uidhash_lock is mostly taken from process context, but it is
 88 * occasionally also taken from softirq/tasklet context, when
 89 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 90 * But free_uid() is also called with local interrupts disabled, and running
 91 * local_bh_enable() with local interrupts disabled is an error - we'll run
 92 * softirq callbacks, and they can unconditionally enable interrupts, and
 93 * the caller of free_uid() didn't expect that..
 94 */
 95static DEFINE_SPINLOCK(uidhash_lock);
 96
 97/* root_user.__count is 1, for init task cred */
 98struct user_struct root_user = {
 99	.__count	= ATOMIC_INIT(1),
100	.processes	= ATOMIC_INIT(1),
101	.sigpending	= ATOMIC_INIT(0),
102	.locked_shm     = 0,
103	.uid		= GLOBAL_ROOT_UID,
104	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
105};
106
107/*
108 * These routines must be called with the uidhash spinlock held!
109 */
110static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
111{
112	hlist_add_head(&up->uidhash_node, hashent);
113}
114
115static void uid_hash_remove(struct user_struct *up)
116{
117	hlist_del_init(&up->uidhash_node);
118}
119
120static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
121{
122	struct user_struct *user;
123
124	hlist_for_each_entry(user, hashent, uidhash_node) {
125		if (uid_eq(user->uid, uid)) {
126			atomic_inc(&user->__count);
127			return user;
128		}
129	}
130
131	return NULL;
132}
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134/* IRQs are disabled and uidhash_lock is held upon function entry.
135 * IRQ state (as stored in flags) is restored and uidhash_lock released
136 * upon function exit.
137 */
138static void free_user(struct user_struct *up, unsigned long flags)
139	__releases(&uidhash_lock)
140{
141	uid_hash_remove(up);
142	spin_unlock_irqrestore(&uidhash_lock, flags);
143	key_put(up->uid_keyring);
144	key_put(up->session_keyring);
145	kmem_cache_free(uid_cachep, up);
146}
147
148/*
149 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
150 * caller must undo that ref with free_uid().
151 *
152 * If the user_struct could not be found, return NULL.
153 */
154struct user_struct *find_user(kuid_t uid)
155{
156	struct user_struct *ret;
157	unsigned long flags;
158
159	spin_lock_irqsave(&uidhash_lock, flags);
160	ret = uid_hash_find(uid, uidhashentry(uid));
161	spin_unlock_irqrestore(&uidhash_lock, flags);
162	return ret;
163}
164
165void free_uid(struct user_struct *up)
166{
167	unsigned long flags;
168
169	if (!up)
170		return;
171
172	local_irq_save(flags);
173	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
174		free_user(up, flags);
175	else
176		local_irq_restore(flags);
177}
 
178
179struct user_struct *alloc_uid(kuid_t uid)
180{
181	struct hlist_head *hashent = uidhashentry(uid);
182	struct user_struct *up, *new;
183
184	spin_lock_irq(&uidhash_lock);
185	up = uid_hash_find(uid, hashent);
186	spin_unlock_irq(&uidhash_lock);
187
188	if (!up) {
189		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
190		if (!new)
191			goto out_unlock;
192
193		new->uid = uid;
194		atomic_set(&new->__count, 1);
 
 
 
 
195		ratelimit_state_init(&new->ratelimit, HZ, 100);
196		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
197
198		/*
199		 * Before adding this, check whether we raced
200		 * on adding the same user already..
201		 */
202		spin_lock_irq(&uidhash_lock);
203		up = uid_hash_find(uid, hashent);
204		if (up) {
205			key_put(new->uid_keyring);
206			key_put(new->session_keyring);
207			kmem_cache_free(uid_cachep, new);
208		} else {
209			uid_hash_insert(new, hashent);
210			up = new;
211		}
212		spin_unlock_irq(&uidhash_lock);
213	}
214
215	return up;
216
217out_unlock:
218	return NULL;
219}
220
221static int __init uid_cache_init(void)
222{
223	int n;
224
225	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
226			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
227
228	for(n = 0; n < UIDHASH_SZ; ++n)
229		INIT_HLIST_HEAD(uidhash_table + n);
 
 
 
230
231	/* Insert the root user immediately (init already runs as root) */
232	spin_lock_irq(&uidhash_lock);
233	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
234	spin_unlock_irq(&uidhash_lock);
235
236	return 0;
237}
238subsys_initcall(uid_cache_init);