Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * The "user cache".
  3 *
  4 * (C) Copyright 1991-2000 Linus Torvalds
  5 *
  6 * We have a per-user structure to keep track of how many
  7 * processes, files etc the user has claimed, in order to be
  8 * able to have per-user limits for system resources. 
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/bitops.h>
 15#include <linux/key.h>
 16#include <linux/interrupt.h>
 17#include <linux/export.h>
 18#include <linux/user_namespace.h>
 19#include <linux/proc_ns.h>
 20
 21/*
 22 * userns count is 1 for root user, 1 for init_uts_ns,
 23 * and 1 for... ?
 24 */
 25struct user_namespace init_user_ns = {
 26	.uid_map = {
 27		.nr_extents = 1,
 28		.extent[0] = {
 29			.first = 0,
 30			.lower_first = 0,
 31			.count = 4294967295U,
 32		},
 33	},
 34	.gid_map = {
 35		.nr_extents = 1,
 36		.extent[0] = {
 37			.first = 0,
 38			.lower_first = 0,
 39			.count = 4294967295U,
 40		},
 41	},
 42	.projid_map = {
 43		.nr_extents = 1,
 44		.extent[0] = {
 45			.first = 0,
 46			.lower_first = 0,
 47			.count = 4294967295U,
 48		},
 49	},
 50	.count = ATOMIC_INIT(3),
 51	.owner = GLOBAL_ROOT_UID,
 52	.group = GLOBAL_ROOT_GID,
 53	.proc_inum = PROC_USER_INIT_INO,
 54#ifdef CONFIG_PERSISTENT_KEYRINGS
 55	.persistent_keyring_register_sem =
 56	__RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
 57#endif
 58};
 59EXPORT_SYMBOL_GPL(init_user_ns);
 60
 61/*
 62 * UID task count cache, to get fast user lookup in "alloc_uid"
 63 * when changing user ID's (ie setuid() and friends).
 64 */
 65
 66#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 67#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 68#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 69#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 70#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 71
 72static struct kmem_cache *uid_cachep;
 73struct hlist_head uidhash_table[UIDHASH_SZ];
 74
 75/*
 76 * The uidhash_lock is mostly taken from process context, but it is
 77 * occasionally also taken from softirq/tasklet context, when
 78 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 79 * But free_uid() is also called with local interrupts disabled, and running
 80 * local_bh_enable() with local interrupts disabled is an error - we'll run
 81 * softirq callbacks, and they can unconditionally enable interrupts, and
 82 * the caller of free_uid() didn't expect that..
 83 */
 84static DEFINE_SPINLOCK(uidhash_lock);
 85
 86/* root_user.__count is 1, for init task cred */
 87struct user_struct root_user = {
 88	.__count	= ATOMIC_INIT(1),
 89	.processes	= ATOMIC_INIT(1),
 90	.files		= ATOMIC_INIT(0),
 91	.sigpending	= ATOMIC_INIT(0),
 92	.locked_shm     = 0,
 93	.uid		= GLOBAL_ROOT_UID,
 94};
 95
 96/*
 97 * These routines must be called with the uidhash spinlock held!
 98 */
 99static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
100{
101	hlist_add_head(&up->uidhash_node, hashent);
102}
103
104static void uid_hash_remove(struct user_struct *up)
105{
106	hlist_del_init(&up->uidhash_node);
107}
108
109static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
110{
111	struct user_struct *user;
 
112
113	hlist_for_each_entry(user, hashent, uidhash_node) {
114		if (uid_eq(user->uid, uid)) {
115			atomic_inc(&user->__count);
116			return user;
117		}
118	}
119
120	return NULL;
121}
122
123/* IRQs are disabled and uidhash_lock is held upon function entry.
124 * IRQ state (as stored in flags) is restored and uidhash_lock released
125 * upon function exit.
126 */
127static void free_user(struct user_struct *up, unsigned long flags)
128	__releases(&uidhash_lock)
129{
130	uid_hash_remove(up);
131	spin_unlock_irqrestore(&uidhash_lock, flags);
132	key_put(up->uid_keyring);
133	key_put(up->session_keyring);
134	kmem_cache_free(uid_cachep, up);
135}
136
137/*
138 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
139 * caller must undo that ref with free_uid().
140 *
141 * If the user_struct could not be found, return NULL.
142 */
143struct user_struct *find_user(kuid_t uid)
144{
145	struct user_struct *ret;
146	unsigned long flags;
147
148	spin_lock_irqsave(&uidhash_lock, flags);
149	ret = uid_hash_find(uid, uidhashentry(uid));
150	spin_unlock_irqrestore(&uidhash_lock, flags);
151	return ret;
152}
153
154void free_uid(struct user_struct *up)
155{
156	unsigned long flags;
157
158	if (!up)
159		return;
160
161	local_irq_save(flags);
162	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
163		free_user(up, flags);
164	else
165		local_irq_restore(flags);
166}
167
168struct user_struct *alloc_uid(kuid_t uid)
169{
170	struct hlist_head *hashent = uidhashentry(uid);
171	struct user_struct *up, *new;
172
173	spin_lock_irq(&uidhash_lock);
174	up = uid_hash_find(uid, hashent);
175	spin_unlock_irq(&uidhash_lock);
176
177	if (!up) {
178		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
179		if (!new)
180			goto out_unlock;
181
182		new->uid = uid;
183		atomic_set(&new->__count, 1);
184
185		/*
186		 * Before adding this, check whether we raced
187		 * on adding the same user already..
188		 */
189		spin_lock_irq(&uidhash_lock);
190		up = uid_hash_find(uid, hashent);
191		if (up) {
192			key_put(new->uid_keyring);
193			key_put(new->session_keyring);
194			kmem_cache_free(uid_cachep, new);
195		} else {
196			uid_hash_insert(new, hashent);
197			up = new;
198		}
199		spin_unlock_irq(&uidhash_lock);
200	}
201
202	return up;
203
204out_unlock:
205	return NULL;
206}
207
208static int __init uid_cache_init(void)
209{
210	int n;
211
212	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
213			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
214
215	for(n = 0; n < UIDHASH_SZ; ++n)
216		INIT_HLIST_HEAD(uidhash_table + n);
217
218	/* Insert the root user immediately (init already runs as root) */
219	spin_lock_irq(&uidhash_lock);
220	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
221	spin_unlock_irq(&uidhash_lock);
222
223	return 0;
224}
225subsys_initcall(uid_cache_init);
 
v3.5.6
  1/*
  2 * The "user cache".
  3 *
  4 * (C) Copyright 1991-2000 Linus Torvalds
  5 *
  6 * We have a per-user structure to keep track of how many
  7 * processes, files etc the user has claimed, in order to be
  8 * able to have per-user limits for system resources. 
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/bitops.h>
 15#include <linux/key.h>
 16#include <linux/interrupt.h>
 17#include <linux/export.h>
 18#include <linux/user_namespace.h>
 
 19
 20/*
 21 * userns count is 1 for root user, 1 for init_uts_ns,
 22 * and 1 for... ?
 23 */
 24struct user_namespace init_user_ns = {
 25	.uid_map = {
 26		.nr_extents = 1,
 27		.extent[0] = {
 28			.first = 0,
 29			.lower_first = 0,
 30			.count = 4294967295U,
 31		},
 32	},
 33	.gid_map = {
 34		.nr_extents = 1,
 35		.extent[0] = {
 36			.first = 0,
 37			.lower_first = 0,
 38			.count = 4294967295U,
 39		},
 40	},
 41	.kref = {
 42		.refcount	= ATOMIC_INIT(3),
 
 
 
 
 
 43	},
 
 44	.owner = GLOBAL_ROOT_UID,
 45	.group = GLOBAL_ROOT_GID,
 
 
 
 
 
 46};
 47EXPORT_SYMBOL_GPL(init_user_ns);
 48
 49/*
 50 * UID task count cache, to get fast user lookup in "alloc_uid"
 51 * when changing user ID's (ie setuid() and friends).
 52 */
 53
 54#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 55#define UIDHASH_SZ	(1 << UIDHASH_BITS)
 56#define UIDHASH_MASK		(UIDHASH_SZ - 1)
 57#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 58#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
 59
 60static struct kmem_cache *uid_cachep;
 61struct hlist_head uidhash_table[UIDHASH_SZ];
 62
 63/*
 64 * The uidhash_lock is mostly taken from process context, but it is
 65 * occasionally also taken from softirq/tasklet context, when
 66 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
 67 * But free_uid() is also called with local interrupts disabled, and running
 68 * local_bh_enable() with local interrupts disabled is an error - we'll run
 69 * softirq callbacks, and they can unconditionally enable interrupts, and
 70 * the caller of free_uid() didn't expect that..
 71 */
 72static DEFINE_SPINLOCK(uidhash_lock);
 73
 74/* root_user.__count is 1, for init task cred */
 75struct user_struct root_user = {
 76	.__count	= ATOMIC_INIT(1),
 77	.processes	= ATOMIC_INIT(1),
 78	.files		= ATOMIC_INIT(0),
 79	.sigpending	= ATOMIC_INIT(0),
 80	.locked_shm     = 0,
 81	.uid		= GLOBAL_ROOT_UID,
 82};
 83
 84/*
 85 * These routines must be called with the uidhash spinlock held!
 86 */
 87static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
 88{
 89	hlist_add_head(&up->uidhash_node, hashent);
 90}
 91
 92static void uid_hash_remove(struct user_struct *up)
 93{
 94	hlist_del_init(&up->uidhash_node);
 95}
 96
 97static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
 98{
 99	struct user_struct *user;
100	struct hlist_node *h;
101
102	hlist_for_each_entry(user, h, hashent, uidhash_node) {
103		if (uid_eq(user->uid, uid)) {
104			atomic_inc(&user->__count);
105			return user;
106		}
107	}
108
109	return NULL;
110}
111
112/* IRQs are disabled and uidhash_lock is held upon function entry.
113 * IRQ state (as stored in flags) is restored and uidhash_lock released
114 * upon function exit.
115 */
116static void free_user(struct user_struct *up, unsigned long flags)
117	__releases(&uidhash_lock)
118{
119	uid_hash_remove(up);
120	spin_unlock_irqrestore(&uidhash_lock, flags);
121	key_put(up->uid_keyring);
122	key_put(up->session_keyring);
123	kmem_cache_free(uid_cachep, up);
124}
125
126/*
127 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
128 * caller must undo that ref with free_uid().
129 *
130 * If the user_struct could not be found, return NULL.
131 */
132struct user_struct *find_user(kuid_t uid)
133{
134	struct user_struct *ret;
135	unsigned long flags;
136
137	spin_lock_irqsave(&uidhash_lock, flags);
138	ret = uid_hash_find(uid, uidhashentry(uid));
139	spin_unlock_irqrestore(&uidhash_lock, flags);
140	return ret;
141}
142
143void free_uid(struct user_struct *up)
144{
145	unsigned long flags;
146
147	if (!up)
148		return;
149
150	local_irq_save(flags);
151	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
152		free_user(up, flags);
153	else
154		local_irq_restore(flags);
155}
156
157struct user_struct *alloc_uid(kuid_t uid)
158{
159	struct hlist_head *hashent = uidhashentry(uid);
160	struct user_struct *up, *new;
161
162	spin_lock_irq(&uidhash_lock);
163	up = uid_hash_find(uid, hashent);
164	spin_unlock_irq(&uidhash_lock);
165
166	if (!up) {
167		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
168		if (!new)
169			goto out_unlock;
170
171		new->uid = uid;
172		atomic_set(&new->__count, 1);
173
174		/*
175		 * Before adding this, check whether we raced
176		 * on adding the same user already..
177		 */
178		spin_lock_irq(&uidhash_lock);
179		up = uid_hash_find(uid, hashent);
180		if (up) {
181			key_put(new->uid_keyring);
182			key_put(new->session_keyring);
183			kmem_cache_free(uid_cachep, new);
184		} else {
185			uid_hash_insert(new, hashent);
186			up = new;
187		}
188		spin_unlock_irq(&uidhash_lock);
189	}
190
191	return up;
192
193out_unlock:
194	return NULL;
195}
196
197static int __init uid_cache_init(void)
198{
199	int n;
200
201	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
202			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
203
204	for(n = 0; n < UIDHASH_SZ; ++n)
205		INIT_HLIST_HEAD(uidhash_table + n);
206
207	/* Insert the root user immediately (init already runs as root) */
208	spin_lock_irq(&uidhash_lock);
209	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
210	spin_unlock_irq(&uidhash_lock);
211
212	return 0;
213}
214
215module_init(uid_cache_init);